forked from ray-project/ray
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pong-dqn.yaml
31 lines (31 loc) · 999 Bytes
/
pong-dqn.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# You can expect ~20 reward within 1.1m timesteps / 2.1 hours on a K80 GPU
pong-deterministic-dqn:
env: ALE/Pong-v5
run: DQN
stop:
episode_reward_mean: 20
time_total_s: 7200
config:
# Works for both torch and tf.
framework: torch
env_config:
nondeterministic: False # deterministic
num_gpus: 1
gamma: 0.99
lr: .0001
replay_buffer_config:
type: MultiAgentPrioritizedReplayBuffer
capacity: 50000
num_steps_sampled_before_learning_starts: 10000
rollout_fragment_length: 4
train_batch_size: 32
exploration_config:
epsilon_timesteps: 200000
final_epsilon: .01
model:
grayscale: True
zero_mean: False
dim: 42
# we should set compress_observations to True because few machines
# would be able to contain the replay buffers in memory otherwise
compress_observations: True