Skip to content

Commit

Permalink
Merge pull request Grid2op#140 from BDonnot/bd_dev
Browse files Browse the repository at this point in the history
Merging in preparation of version 1.6.1, that will be used for icaps 2021
  • Loading branch information
BDonnot authored Jul 27, 2021
2 parents d516e1c + 91c3c8e commit 57676ef
Show file tree
Hide file tree
Showing 15 changed files with 297 additions and 127 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ OpponentCalibration.ipynb
grid2op/data_test/l2rpn_neurips_2020_track1_with_alert/_statistics_do_nothing/
save/
shorten_env.py
test_hash_env.py

# profiling files
**.prof
12 changes: 12 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,18 @@ Change Log
- [???] "asynch" multienv
- [???] properly model interconnecting powerlines

[1.6.1] - 2021-07-xx
---------------------
- [FIXED] a bug in the "env.get_path_env()" in case `env` was a multimix (it returned the path of the current mix
instead of the path of the multimix environment)
- [FIXED] a bug in the `backend.get_action_to_set()` and `backend.update_from_obs()` in case of disconnected shunt
with backend that supported shunts (values for `p` and `q` were set even if the shunt was disconnected, which
could lead to undefined behaviour)
- [IMPROVED] now grid2op is able to check if an environment needs to be updated when calling `grid2op.update_env()`
thanks to the use of registered hash values.
- [IMPROVED] now grid2op will check if an update is available when an environment is being downloaded for the
first time.

[1.6.0] (hotfix) - 2021-06-23
------------------------------
- [FIXED] issue `Issue#235 <https://github.com/rte-france/Grid2Op/issues/235>`_ issue when using the "simulate"
Expand Down
28 changes: 20 additions & 8 deletions grid2op/Backend/Backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -1442,9 +1442,15 @@ def get_action_to_set(self):
}}
if self.shunts_data_available:
p_s, q_s, sh_v, bus_s = self.shunt_info()
p_s *= (self._sh_vnkv / sh_v)**2
q_s *= (self._sh_vnkv / sh_v)**2
dict_["shunt"] = {"shunt_p": p_s, "shunt_q": q_s, "shunt_bus": bus_s}
dict_["shunt"] = {"shunt_bus": bus_s}
if np.sum(bus_s >= 1):
p_s *= (self._sh_vnkv / sh_v)**2
q_s *= (self._sh_vnkv / sh_v)**2
p_s[bus_s == -1] = np.NaN
q_s[bus_s == -1] = np.NaN
dict_["shunt"]["shunt_p"] = p_s
dict_["shunt"]["shunt_q"] = q_s

set_me.update(dict_)
return set_me

Expand Down Expand Up @@ -1494,12 +1500,18 @@ def update_from_obs(self, obs):
if "_shunt_bus" not in type(obs).attr_list_set:
raise BackendError("Impossible to set the backend to the state given by the observation: shunts data "
"are not present in the observation.")
mults = (self._sh_vnkv / obs._shunt_v)**2
dict_["shunt"] = {"shunt_p": obs._shunt_p * mults,
"shunt_q": obs._shunt_q * mults,
"shunt_bus": obs._shunt_bus}

act.update(dict_)
dict_["shunt"] = {"shunt_bus": obs._shunt_bus}
shunt_co = obs._shunt_bus >= 1
if np.sum(shunt_co):
mults = (self._sh_vnkv / obs._shunt_v) ** 2
sh_p = obs._shunt_p * mults
sh_q = obs._shunt_q * mults
sh_p[~shunt_co] = np.NaN
sh_q[~shunt_co] = np.NaN
dict_["shunt"]["shunt_p"] = sh_p
dict_["shunt"]["shunt_q"] = sh_q
act.update(dict_)
backend_action += act
self.apply_action(backend_action)

Expand Down
4 changes: 4 additions & 0 deletions grid2op/Download/DownloadDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,10 @@ def _aux_download(url, dataset_name, path_data, ds_name_dl=None):
# bug in the AWS file... named ".tar.tar.bz2" ...
os.remove(output_path)

# check for update (if any)
from grid2op.MakeEnv.UpdateEnv import _update_files
_update_files(dataset_name)

print("You may now use the environment \"{}\" with the available data by invoking:\n"
"\tenv = grid2op.make(\"{}\")"
"".format(dataset_name, dataset_name))
Expand Down
13 changes: 13 additions & 0 deletions grid2op/Environment/BaseEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -1763,6 +1763,19 @@ def step(self, action):
self._sum_curtailment_mw = -self._sum_curtailment_mw_prev
self._sum_curtailment_mw_prev = dt_float(0.)

# case where the action modifies load (TODO maybe make a different env for that...)
for inj_key in ["load_p", "prod_p", "load_q"]:
# modification of the injections in the action, this erases the actions in the environment
if inj_key in action._dict_inj:
if inj_key in self._env_modification._dict_inj:
this_p_load = 1. * self._env_modification._dict_inj[inj_key]
act_modif = action._dict_inj[inj_key]
this_p_load[np.isfinite(act_modif)] = act_modif[np.isfinite(act_modif)]
self._env_modification._dict_inj[inj_key][:] = this_p_load
else:
self._env_modification._dict_inj[inj_key] = 1.0 * action._dict_inj[inj_key]
self._env_modification._modif_inj = True

if self.n_storage > 0:
# TODO limit here if the ramps are too low !
# TODO in the above case, action should not be implemented !
Expand Down
11 changes: 11 additions & 0 deletions grid2op/Environment/MultiMixEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ def __init__(self,
self.current_env = None
self.env_index = None
self.mix_envs = []
self._env_dir = os.path.abspath(envs_dir)

# Special case handling for backend
# TODO: with backend.copy() instead !
Expand Down Expand Up @@ -211,6 +212,16 @@ def __init__(self,
self.__class__ = self.init_grid(self.current_env)
self.current_env.env_name = save_env_name

def get_path_env(self):
"""
Get the path that allows to create this environment.
It can be used for example in `grid2op.utils.underlying_statistics` to save the information directly inside
the environment data.
"""
return self._env_dir

@property
def current_index(self):
return self.env_index
Expand Down
21 changes: 11 additions & 10 deletions grid2op/MakeEnv/Make.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@
_REQUEST_FAIL_EXHAUSTED_ERR = "Impossible to retrieve data at \"{}\".\n" \
"If the problem persists, please contact grid2op developers by sending an issue at " \
"https://github.com/rte-france/Grid2Op/issues"
_REQUEST_FAIL_RETRY_ERR = "Failure to get a reponse from the url \"{}\".\n" \
"Retrying.. {} attempt(s) remaining"
_REQUEST_FAIL_RETRY_ERR = "Failure to get a response from the url \"{}\".\n" \
"Retrying... {} attempt(s) remaining"
_REQUEST_EXCEPT_RETRY_ERR = "Exception in getting an answer from \"{}\".\n" \
"Retrying.. {} attempt(s) remaining"
"Retrying... {} attempt(s) remaining"

_LIST_REMOTE_URL = "https://api.github.com/repos/bdonnot/grid2op-datasets/contents/datasets.json"
_LIST_REMOTE_KEY = "download_url"
Expand All @@ -60,7 +60,8 @@
"Parsing error:\n {}"
_LIST_REMOTE_CORRUPTED_CONTENT_JSON_ERR = "Corrupted json retrieved from github api. " \
"Please wait a few minutes and try again. " \
"If the error persist, contact grid2op organizers"
"If the error persist, contact grid2op devs by making an issue at " \
"\n\thttps://github.com/rte-france/Grid2Op/issues/new/choose"
_LIST_REMOTE_INVALID_DATASETS_JSON_ERR = "Impossible to retrieve available datasets. " \
"File could not be converted to json. " \
"The error was \n\"{}\""
Expand Down Expand Up @@ -110,7 +111,7 @@ def _send_request_retry(url, nb_retry=10, gh_session=None):
raise
except KeyboardInterrupt:
raise
except:
except Exception as exc_:
warnings.warn(_REQUEST_EXCEPT_RETRY_ERR.format(url, nb_retry-1))
time.sleep(1)
return _send_request_retry(url, nb_retry=nb_retry-1, gh_session=gh_session)
Expand All @@ -123,7 +124,7 @@ def _retrieve_github_content(url, is_json=True):
except Exception as e:
raise Grid2OpException(_LIST_REMOTE_INVALID_CONTENT_JSON_ERR.format(e))

if not _LIST_REMOTE_KEY in answer_json:
if _LIST_REMOTE_KEY not in answer_json:
raise Grid2OpException(_LIST_REMOTE_CORRUPTED_CONTENT_JSON_ERR)
time.sleep(1)
avail_datasets = _send_request_retry(answer_json[_LIST_REMOTE_KEY])
Expand Down Expand Up @@ -175,13 +176,13 @@ def _extract_ds_name(dataset_path):

try:
dataset_path = str(dataset_path)
except:
raise Grid2OpException(_EXTRACT_DS_NAME_CONVERT_ERR.format(dataset_path))
except Exception as exc_:
raise Grid2OpException(_EXTRACT_DS_NAME_CONVERT_ERR.format(dataset_path)) from exc_

try:
dataset_name = os.path.split(dataset_path)[-1]
except:
raise UnknownEnv(_EXTRACT_DS_NAME_RECO_ERR.format(dataset_path))
except Exception as exc_:
raise UnknownEnv(_EXTRACT_DS_NAME_RECO_ERR.format(dataset_path)) from exc_
dataset_name = dataset_name.lower().rstrip().lstrip()
dataset_name = os.path.splitext(dataset_name)[0]
return dataset_name
Expand Down
84 changes: 78 additions & 6 deletions grid2op/MakeEnv/UpdateEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from grid2op.MakeEnv.Make import _retrieve_github_content

_LIST_REMOTE_URL = "https://api.github.com/repos/bdonnot/grid2op-datasets/contents/updates.json"
_LIST_REMOTE_ENV_HASH = "https://api.github.com/repos/bdonnot/grid2op-datasets/contents/env_hashes.json"


def _write_file(path_local_env, new_config, file_name):
Expand Down Expand Up @@ -76,10 +77,26 @@ def _update_file(dict_, env_name, file_name):
_write_file(mix_dir, new_config, file_name=file_name)
else:
_write_file(path_local_env, new_config, file_name=file_name)
print("Successfully updated file \"{}\" for environment \"{}\"".format(file_name, env_name))
print("\t Successfully updated file \"{}\" for environment \"{}\"".format(file_name, env_name))


def _update_files(env_name=None):
def _do_env_need_update(env_name, env_hashes):
if env_name not in env_hashes:
# no hash for this environment is provided, i don't know, so in doubt i need to update it (old behaviour)
return True
else:
# i check if "my" hash is different that the remote hash
base_path = grid2op.get_current_local_dir()
hash_remote_hex = env_hashes[env_name]
hash_local = _hash_env(os.path.join(base_path, env_name))
hash_local_hex = hash_local.hexdigest()
res = hash_remote_hex != hash_local_hex
return res


def _update_files(env_name=None,
answer_json=None,
env_hashes=None):
"""
INTERNAL
Expand All @@ -95,14 +112,24 @@ def _update_files(env_name=None):
"""
avail_envs = list_available_local_env()

if answer_json is None:
# optimization to retrieve only once this file
answer_json = _retrieve_github_content(_LIST_REMOTE_URL)

if env_hashes is None:
# optimization to retrieve only once this file
env_hashes = _retrieve_github_content(_LIST_REMOTE_ENV_HASH)

if env_name is None:
# i update all the files for all the environments
for env_name in avail_envs:
_update_files(env_name)
_update_files(env_name, answer_json=answer_json, env_hashes=env_hashes)
else:
# i update the files for only an environment
if env_name in avail_envs:
answer_json = _retrieve_github_content(_LIST_REMOTE_URL)

if env_name in answer_json:
need_update = _do_env_need_update(env_name, env_hashes)
if env_name in answer_json and need_update:
dict_main = answer_json[env_name]
for k, dict_ in dict_main.items():
_update_file(dict_, env_name, file_name=k)
Expand All @@ -112,3 +139,48 @@ def _update_files(env_name=None):
else:
raise UnknownEnv("Impossible to locate the environment named \"{}\". Have you downlaoded it?"
"".format(env_name))


# TODO make that a method of the environment maybe ?
def _hash_env(path_local_env,
hash_=None,
blocksize=64, # TODO is this correct ?
):
import hashlib # lazy import
if hash_ is None:
# we use this as it is supposedly faster than md5
# we don't really care about the "secure" part of it (though it's a nice tool to have)
hash_ = hashlib.blake2b()
if os.path.exists(os.path.join(path_local_env, ".multimix")):
# this is a multi mix, so i need to run through all sub env
mixes = sorted(os.listdir(path_local_env))
for mix in mixes:
mix_dir = os.path.join(path_local_env, mix)
if os.path.isdir(mix_dir):
hash_ = _hash_env(mix_dir, hash_=hash_, blocksize=blocksize)
else:
# i am hashing a regular environment
# first i hash the config files
for fn_ in ["alerts_info.json",
"config.py",
"difficulty_levels.json",
"grid.json",
"grid_layout.json",
"prods_charac.csv"]: # list the file we want to hash (we don't hash everything
full_path_file = os.path.join(path_local_env, fn_)
import re
if os.path.exists(full_path_file):
with open(full_path_file, "r", encoding="utf-8") as f:
text_ = f.read()
text_ = re.sub("\s|\n|\r", "", text_) # this is done to ensure a compatibility between platform
# sometime git replaces the "\r\n" in windows with "\n" on linux / macos and it messes
# up the hash
hash_.update(text_.encode("utf-8"))

# now I hash the chronics
# but as i don't want to read every chronics (for time purposes) i will only hash the names
# of all the chronics
path_chronics = os.path.join(path_local_env, "chronics")
for chron_name in sorted(os.listdir(path_chronics)):
hash_.update(chron_name.encode("utf-8"))
return hash_
8 changes: 6 additions & 2 deletions grid2op/Opponent/WeightedRandomOpponent.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ def init(self, partial_env, lines_attacked=[], rho_normalization=[], attack_peri

# Opponent's attack period
self._attack_period = attack_period
if self._attack_period <= 0:
raise OpponentError("Opponent attack cooldown need to be > 0")

def reset(self, initial_budget):
self._next_attack_time = None
Expand Down Expand Up @@ -157,13 +159,15 @@ def attack(self, observation, agent_action, env_action,

# If all attackable lines are disconnected, do not attack
status = observation.line_status[self._lines_ids]
if np.all(~status):
if not np.sum(status):
return None, 0

available_attacks = self._attacks[status]
rho = observation.rho[self._lines_ids][status] / self._rho_normalization[status]
rho_sum = rho.sum()
if rho_sum <= 0.:
return None
# this case can happen if a powerline has a flow of 0.0 but is connected, and it's the only one
# that can be attacked... Pretty rare hey !
return None, 0
attack = self.space_prng.choice(available_attacks, p=rho / rho_sum)
return attack, None
13 changes: 13 additions & 0 deletions grid2op/tests/test_MakeEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,5 +643,18 @@ def test_create_from_path(self):
assert isinstance(env, MultiMixEnvironment)


class TestHashEnv(unittest.TestCase):
def test_hash(self):
from grid2op.MakeEnv.UpdateEnv import _hash_env

with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make("l2rpn_case14_sandbox", test=True)
path_ = env.get_path_env()
hash_this_env = _hash_env(path_)
assert hash_this_env.hexdigest() == "35791e669b84c5da16061ab6aaf3f4748d32871a16fd97ebfc7acbf83104dbc00bc7878481fe35e14236f14eb86610700734f756295675dd5d8d0d918cec3770", \
f"wrong hash digest. It's \n\t{hash_this_env.hexdigest()}"


if __name__ == "__main__":
unittest.main()
8 changes: 8 additions & 0 deletions grid2op/tests/test_MultiMix.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,14 @@ def test_creation(self):
assert mme.current_obs is not None
assert mme.current_env is not None

def test_get_path_env(self):
mme = MultiMixEnvironment(PATH_DATA_MULTIMIX)
path_mme = mme.get_path_env()
for mix in mme:
path_mix = mix.get_path_env()
assert path_mme != path_mix
assert os.path.split(path_mix)[0] == path_mme

def test_create_fail(self):
with self.assertRaises(EnvError):
mme = MultiMixEnvironment("/tmp/error")
Expand Down
Loading

0 comments on commit 57676ef

Please sign in to comment.