-
Notifications
You must be signed in to change notification settings - Fork 0
/
mcts_evaluator.py
75 lines (65 loc) · 2.92 KB
/
mcts_evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
"""Evaluate a trained agent against different MCTS strengths."""
import argparse
import ray
from ray import tune
from ray.tune.registry import register_env
from src.callbacks import mcts_metrics_on_episode_end
from src.policies import HumanPolicy, MCTSPolicy, RandomPolicy
from src.utils import get_debug_config, get_learner_policy_configs, get_model_config
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--policy', type=str, default='PPO')
parser.add_argument('--use-cnn', action='store_true')
parser.add_argument('--num-learners', type=int, default=2)
# e.g. --restore="/home/dave/ray_results/main/PPO_c4_0_2019-09-23_16-17-45z9x1oc9j/checkpoint_782/checkpoint-782"
parser.add_argument('--restore', type=str)
# e.g. --eval-policy=learned06
parser.add_argument('--eval-policy', type=str)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--human', action='store_true')
args = parser.parse_args()
ray.init(local_mode=args.debug)
tune_config = get_debug_config(args)
model_config, env_cls = get_model_config(args.use_cnn)
register_env('c4', lambda cfg: env_cls(cfg))
env = env_cls()
obs_space, action_space = env.observation_space, env.action_space
trainable_policies = get_learner_policy_configs(args.num_learners, obs_space, action_space, model_config)
def get_policy_by_num(num_rollouts):
return {
'policies_to_train': [*trainable_policies],
# 'policy_mapping_fn': lambda agent_id: [args.eval_policy, 'mcts'][agent_id % 2],
'policy_mapping_fn': lambda _: (args.eval_policy, 'mcts'),
'policies': {
**trainable_policies,
'mcts': (MCTSPolicy, obs_space, action_space, {'max_rollouts': num_rollouts}),
'human': (HumanPolicy, obs_space, action_space, {}),
'random': (RandomPolicy, obs_space, action_space, {}),
},
}
mcts_num_rollouts = [4, 8, 16, 32, 64, 128, 256, 512]
# mcts_num_rollouts = [128, 256, 512, 1024, 2048]
def name_trial(trial):
"""Give trials a more readable name in terminal & Tensorboard."""
num_mcts_rollouts = trial.config['multiagent']['policies']['mcts'][3]['max_rollouts']
return f'{trial.trainable_name}_MCTS({num_mcts_rollouts})'
tune.run(
args.policy,
name='mcts_evaluator',
trial_name_creator=name_trial,
stop={
'episodes_total': 1000,
},
config=dict({
'env': 'c4',
'env_config': {},
'lr': 0.001,
'gamma': 0.995,
'lambda': 0.95,
'clip_param': 0.2,
# 'kl_coeff': 1.0,
'multiagent': tune.grid_search([get_policy_by_num(n) for n in mcts_num_rollouts]),
'callbacks': {'on_episode_end': mcts_metrics_on_episode_end},
}, **tune_config),
restore=args.restore,
)