|
| 1 | +from stable_baselines3 import PPO |
| 2 | +import sumo_rl |
| 3 | +import supersuit as ss |
| 4 | +from stable_baselines3.common.vec_env import VecMonitor |
| 5 | +from stable_baselines3.common.evaluation import evaluate_policy |
| 6 | +from stable_baselines3.common.callbacks import EvalCallback |
| 7 | +import numpy as np |
| 8 | +from array2gif import write_gif |
| 9 | + |
| 10 | +n_evaluations = 20 |
| 11 | +n_agents = 2 |
| 12 | +n_envs = 1 |
| 13 | +n_timesteps = 8000000 |
| 14 | + |
| 15 | +env = sumo_rl.parallel_env(net_file='nets/4x4-Lucas/4x4.net.xml', |
| 16 | + route_file='nets/4x4-Lucas/4x4c1c2c1c2.rou.xml', |
| 17 | + out_csv_name='outputs/4x4grid/test', |
| 18 | + use_gui=False, |
| 19 | + num_seconds=80000) |
| 20 | + |
| 21 | +env = ss.frame_stack_v1(env, 3) |
| 22 | +env = ss.pettingzoo_env_to_vec_env_v0(env) |
| 23 | +env = ss.concat_vec_envs_v0(env, n_envs, num_cpus=1, base_class='stable_baselines3') |
| 24 | +env = VecMonitor(env) |
| 25 | + |
| 26 | +""" eval_env = sumo_rl.parallel_env(net_file='nets/4x4-Lucas/4x4.net.xml', |
| 27 | + route_file='nets/4x4-Lucas/4x4c1c2c1c2.rou.xml', |
| 28 | + out_csv_name='outputs/4x4grid/test', |
| 29 | + use_gui=False, |
| 30 | + num_seconds=80000) |
| 31 | +
|
| 32 | +eval_env = ss.frame_stack_v1(eval_env, 3) |
| 33 | +eval_env = ss.pettingzoo_env_to_vec_env_v0(eval_env) |
| 34 | +eval_env = ss.concat_vec_envs_v0(eval_env, 1, num_cpus=1, base_class='stable_baselines3') |
| 35 | +eval_env = VecMonitor(eval_env) """ |
| 36 | + |
| 37 | +eval_freq = int(n_timesteps / n_evaluations) |
| 38 | +eval_freq = max(eval_freq // (n_envs*n_agents), 1) |
| 39 | + |
| 40 | +model = PPO("MlpPolicy", env, verbose=3, gamma=0.95, n_steps=256, ent_coef=0.0905168, learning_rate=0.00062211, vf_coef=0.042202, max_grad_norm=0.9, gae_lambda=0.99, n_epochs=5, clip_range=0.3, batch_size=256) |
| 41 | +#eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/', log_path='./logs/', eval_freq=eval_freq, deterministic=True, render=False) |
| 42 | +model.learn(total_timesteps=n_timesteps) #callback=eval_callback) |
| 43 | + |
| 44 | +model = PPO.load("./logs/best_model") |
| 45 | + |
| 46 | +mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10) |
| 47 | + |
| 48 | +print(mean_reward) |
| 49 | +print(std_reward) |
| 50 | + |
| 51 | +""" render_env = sumo_rl.env(net_file='nets/4x4-Lucas/4x4.net.xml', |
| 52 | + route_file='nets/4x4-Lucas/4x4c1c2c1c2.rou.xml', |
| 53 | + out_csv_name='outputs/4x4grid/test', |
| 54 | + use_gui=False, |
| 55 | + num_seconds=80000) |
| 56 | +
|
| 57 | +render_env = render_env.parallel_env() |
| 58 | +render_env = ss.color_reduction_v0(render_env, mode='B') |
| 59 | +render_env = ss.resize_v0(render_env, x_size=84, y_size=84) |
| 60 | +render_env = ss.frame_stack_v1(render_env, 3) |
| 61 | +
|
| 62 | +obs_list = [] |
| 63 | +i = 0 |
| 64 | +render_env.reset() |
| 65 | +
|
| 66 | +
|
| 67 | +while True: |
| 68 | + for agent in render_env.agent_iter(): |
| 69 | + observation, _, done, _ = render_env.last() |
| 70 | + action = model.predict(observation, deterministic=True)[0] if not done else None |
| 71 | +
|
| 72 | + render_env.step(action) |
| 73 | + i += 1 |
| 74 | + if i % (len(render_env.possible_agents)) == 0: |
| 75 | + obs_list.append(np.transpose(render_env.render(mode='rgb_array'), axes=(1, 0, 2))) |
| 76 | + render_env.close() |
| 77 | + break |
| 78 | +
|
| 79 | +print('Writing gif') |
| 80 | +write_gif(obs_list, 'kaz.gif', fps=15) """ |
0 commit comments