-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtesting.py
More file actions
100 lines (93 loc) · 3.21 KB
/
testing.py
File metadata and controls
100 lines (93 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from gym.envs.mujoco import HalfCheetahEnv
from gym.envs.box2d import CarRacing
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import (
TanhGaussianPolicy,
MakeDeterministic,
TanhCNNGaussianPolicy,
GaussianCNNPolicy,
)
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import ConcatMlp, PretrainedCNN, CNN
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
import torch
import torchvision.models as models
ptu.set_gpu_mode(True)
def experiment(variant):
expl_env = NormalizedBoxEnv(HalfCheetahEnv())
eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant["layer_size"]
qf1 = ConcatMlp(
input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M],
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M],
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M],
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim, action_dim=action_dim, hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(eval_env, eval_policy,)
expl_path_collector = MdpPathCollector(expl_env, policy,)
replay_buffer = EnvReplayBuffer(variant["replay_buffer_size"], expl_env,)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant["trainer_kwargs"]
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant["algorithm_kwargs"]
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm="SAC",
version="normal",
layer_size=256,
replay_buffer_size=int(2e4),
algorithm_kwargs=dict(
num_epochs=100000,
num_eval_steps_per_epoch=500,
num_trains_per_train_loop=100,
num_expl_steps_per_train_loop=100,
min_num_steps_before_training=100,
max_path_length=1000,
batch_size=16,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3e-4,
qf_lr=3e-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
setup_logger("name-of-experiment", variant=variant)
ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)