forked from ray-project/ray
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathopen_spiel.py
131 lines (112 loc) · 4.58 KB
/
open_spiel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from typing import Optional
import numpy as np
import gymnasium as gym
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.utils import try_import_pyspiel
pyspiel = try_import_pyspiel(error=True)
class OpenSpielEnv(MultiAgentEnv):
def __init__(self, env):
super().__init__()
self.env = env
# Agent IDs are ints, starting from 0.
self.num_agents = self.env.num_players()
# Store the open-spiel game type.
self.type = self.env.get_type()
# Stores the current open-spiel game state.
self.state = None
self.observation_space = gym.spaces.Dict(
{
aid: gym.spaces.Box(
float("-inf"),
float("inf"),
(self.env.observation_tensor_size(),),
dtype=np.float32,
)
for aid in range(self.num_agents)
}
)
self.action_space = gym.spaces.Dict(
{
aid: gym.spaces.Discrete(self.env.num_distinct_actions())
for aid in range(self.num_agents)
}
)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
self.state = self.env.new_initial_state()
return self._get_obs(), {}
def step(self, action):
# Before applying action(s), there could be chance nodes.
# E.g. if env has to figure out, which agent's action should get
# resolved first in a simultaneous node.
self._solve_chance_nodes()
penalties = {}
# Sequential game:
if str(self.type.dynamics) == "Dynamics.SEQUENTIAL":
curr_player = self.state.current_player()
assert curr_player in action
try:
self.state.apply_action(action[curr_player])
# TODO: (sven) resolve this hack by publishing legal actions
# with each step.
except pyspiel.SpielError:
self.state.apply_action(np.random.choice(self.state.legal_actions()))
penalties[curr_player] = -0.1
# Compile rewards dict.
rewards = {ag: r for ag, r in enumerate(self.state.returns())}
# Simultaneous game.
else:
assert self.state.current_player() == -2
# Apparently, this works, even if one or more actions are invalid.
self.state.apply_actions([action[ag] for ag in range(self.num_agents)])
# Now that we have applied all actions, get the next obs.
obs = self._get_obs()
# Compile rewards dict and add the accumulated penalties
# (for taking invalid actions).
rewards = {ag: r for ag, r in enumerate(self.state.returns())}
for ag, penalty in penalties.items():
rewards[ag] += penalty
# Are we done?
is_terminated = self.state.is_terminal()
terminateds = dict(
{ag: is_terminated for ag in range(self.num_agents)},
**{"__all__": is_terminated}
)
truncateds = dict(
{ag: False for ag in range(self.num_agents)}, **{"__all__": False}
)
return obs, rewards, terminateds, truncateds, {}
def render(self, mode=None) -> None:
if mode == "human":
print(self.state)
def _get_obs(self):
# Before calculating an observation, there could be chance nodes
# (that may have an effect on the actual observations).
# E.g. After reset, figure out initial (random) positions of the
# agents.
self._solve_chance_nodes()
if self.state.is_terminal():
return {}
# Sequential game:
if str(self.type.dynamics) == "Dynamics.SEQUENTIAL":
curr_player = self.state.current_player()
return {
curr_player: np.reshape(self.state.observation_tensor(), [-1]).astype(
np.float32
)
}
# Simultaneous game.
else:
assert self.state.current_player() == -2
return {
ag: np.reshape(self.state.observation_tensor(ag), [-1]).astype(
np.float32
)
for ag in range(self.num_agents)
}
def _solve_chance_nodes(self):
# Chance node(s): Sample a (non-player) action and apply.
while self.state.is_chance_node():
assert self.state.current_player() == -1
actions, probs = zip(*self.state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.state.apply_action(action)