Skip to content

Commit

Permalink
Adding example submission
Browse files Browse the repository at this point in the history
  • Loading branch information
maxstanden committed Dec 16, 2022
1 parent e1573b7 commit 3045f16
Show file tree
Hide file tree
Showing 17 changed files with 1,071 additions and 44 deletions.
90 changes: 46 additions & 44 deletions CybORG/Evaluation/evaluation.py
Original file line number Diff line number Diff line change
@@ -1,80 +1,82 @@
import inspect
import subprocess
import time
from statistics import mean, stdev

from CybORG import CybORG, CYBORG_VERSION
from CybORG.Agents import RandomAgent
from CybORG.Agents.Wrappers.PettingZooParallelWrapper import PettingZooParallelWrapper
from CybORG.Simulator.Scenarios import DroneSwarmScenarioGenerator

MAX_EPS = 100
from datetime import datetime

# this imports a submissions agents
from CybORG.Evaluation.submission.submission import agents, wrap

def wrap(env):
return PettingZooParallelWrapper(env=env)

def run_evaluation(name, team, name_of_agent, max_eps, write_to_file=True):

def get_git_revision_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()


if __name__ == "__main__":
cyborg_version = CYBORG_VERSION
scenario = 'Scenario3'
# commit_hash = get_git_revision_hash()
# ask for a name
name = input('Name: ')
# ask for a team
team = input("Team: ")
# ask for a name for the agent
name_of_agent = input("Name of technique: ")

lines = inspect.getsource(wrap)
wrap_line = lines.split('\n')[1].split('return ')[1]

sg = DroneSwarmScenarioGenerator()
cyborg = CybORG(sg, 'sim')
wrapped_cyborg = wrap(cyborg)

# Change this line to load your agents
agents = {agent: RandomAgent() for agent in wrapped_cyborg.possible_agents}

print(f'Using agents {agents}, if this is incorrect please update the code to load in your agent')

file_name = str(inspect.getfile(CybORG))[:-7] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S") + '.txt'
print(f'Saving evaluation results to {file_name}')
with open(file_name, 'a+') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f"wrappers: {wrap_line}\n")
data.write(f"agent assignment: {agents}")
if write_to_file:
file_name = str(inspect.getfile(CybORG))[:-7] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S")
print(f'Saving evaluation results to {file_name}_summary.txt and {file_name}_full.txt')
start = datetime.now()

print(f'using CybORG v{cyborg_version}, {scenario}\n')

cyborg = CybORG(sg, 'sim')
wrapped_cyborg = wrap(cyborg)


total_reward = []
actions_log = []
for i in range(MAX_EPS):
obs_log = []
for i in range(max_eps):
observations = wrapped_cyborg.reset()
action_spaces = wrapped_cyborg.action_spaces
r = []
a = []
o = []
# cyborg.env.env.tracker.render()
count = 0
for j in range(500):
actions = {agent_name: agents[agent_name].get_action(observations[agent_name], action_spaces[agent_name]) for agent_name in wrapped_cyborg.agents}
actions = {agent_name: agent.get_action(observations[agent_name], action_spaces[agent_name]) for agent_name, agent in agents.items() if agent_name in wrapped_cyborg.agents}
observations, rew, done, info = wrapped_cyborg.step(actions)
if all(done.values()):
break
r.append(mean(rew.values()))
a.append({agent_name: str(cyborg.get_last_action(agent_name)) for agent_name in wrapped_cyborg.agents})
if write_to_file:
#a.append({agent_name: str(cyborg.get_last_action(agent_name)) for agent_name in wrapped_cyborg.agents})
a.append({agent_name: wrapped_cyborg.get_action_space(agent_name)[actions[agent_name]] for agent_name in actions.keys()})
o.append({agent_name: observations[agent_name] for agent_name in observations.keys()})
total_reward.append(sum(r))
actions_log.append(a)
if write_to_file:
actions_log.append(a)
obs_log.append(o)
end=datetime.now()
difference = end-start
print(f'Average reward is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
with open(file_name, 'a+') as data:
data.write(f'mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, sum_rew in zip(actions_log, total_reward):
data.write(f'actions: {act}, total reward: {sum_rew}\n')
print(f'file took {difference} amount of time to finish evaluation')
if write_to_file:
with open(file_name+'_summary.txt', 'w') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f'Average reward is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
data.write(f'Using agents {agents}')

with open(file_name+'_full.txt', 'w') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f'mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, obs, sum_rew in zip(actions_log, obs_log, total_reward):
data.write(f'actions: {act},\n observations: {obs} \n total reward: {sum_rew}\n')


if __name__ == "__main__":
# ask for a name
name = input('Name: ')
# ask for a team
team = input("Team: ")
# ask for a name for the agent
technique = input("Name of technique: ")
run_evaluation(name, team, technique, 100)
103 changes: 103 additions & 0 deletions CybORG/Evaluation/submission/20221123_120807_full.txt

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions CybORG/Evaluation/submission/20221123_120807_summary.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
CybORG v2.0, Scenario3
author: Max, team: Example Submission, technique: Random Agents
Average reward is: -4383.83 with a standard deviation of 1603.0438159509508Using agents {'blue_agent_0': RandomAgent, 'blue_agent_1': RandomAgent, 'blue_agent_2': RandomAgent, 'blue_agent_3': RandomAgent, 'blue_agent_4': RandomAgent, 'blue_agent_5': RandomAgent, 'blue_agent_6': RandomAgent, 'blue_agent_7': RandomAgent, 'blue_agent_8': RandomAgent, 'blue_agent_9': RandomAgent, 'blue_agent_10': RandomAgent, 'blue_agent_11': RandomAgent, 'blue_agent_12': RandomAgent, 'blue_agent_13': RandomAgent, 'blue_agent_14': RandomAgent, 'blue_agent_15': RandomAgent, 'blue_agent_16': RandomAgent, 'blue_agent_17': RandomAgent}
58 changes: 58 additions & 0 deletions CybORG/Evaluation/submission/RandomAgent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from inspect import signature
from typing import Union

from gym import Space

from CybORG.Agents.SimpleAgents.BaseAgent import BaseAgent

from CybORG.Simulator.Actions import Sleep


class RandomAgent(BaseAgent):
"""Takes a random action or a test action based on the epsilon value"""

def __init__(self, test_action=None, epsilon=1.0, np_random = None):
super().__init__(np_random)
self.test_action = test_action
self.epsilon = epsilon
self.action_params = None

def train(self, results):
pass

def get_action(self, observation: dict, action_space: Union[Space, dict]):
if (self.np_random.random() < self.epsilon) or (self.test_action is None):
# select random action
if isinstance(action_space, Space):
return action_space.sample()
elif type(action_space) is dict:
invalid_actions = []
while True:
options = [i for i, v in action_space['action'].items() if v and i not in invalid_actions]
if len(options) > 0:
action_class = self.np_random.choice(options)
else:
return Sleep()
# select random options
action_params = {}
for param_name in self.action_params[action_class]:
options = [i for i, v in action_space[param_name].items() if v]
if len(options) > 0:
action_params[param_name] = self.np_random.choice(options)
else:
invalid_actions.append(action_class)
action_params = None
break
if action_params is not None:
return action_class(**action_params)
else:
raise ValueError("Random agent can only handle Space or dict action space")
else:
return self.test_action

def end_episode(self):
pass

def set_initial_values(self, action_space, observation):
if type(action_space) is dict:
self.action_params = {action_class: signature(action_class).parameters for action_class in action_space['action'].keys()}
Empty file.
65 changes: 65 additions & 0 deletions CybORG/Evaluation/submission/docker_instructions.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Docker Instructions

These instructions cover how to build and run a Docker container from a Dockerfile for the purpose of evaluating agents in CybORG.

These instructions assume a basic familiarity with Docker. If you are unfamiliar with Docker, please see https://docs.docker.com/get-started/ for further information.

## Dockerfile

The Dockerfile contains a list of instructions for creating the environment required to run your agent.

```dockerfile
# Set Ubuntu and Python versions from pre-built images
FROM ubuntu:22.10
FROM python:3.7.9

# Set working directory to /cage
WORKDIR /cage

# Copy local package requirements and init script into container's /cage folder
COPY . /cage

# Install packages
RUN pip install -e .

# Example of adding additional instructions
# RUN pip install stable_baselines3

# Run evaluation script
ENTRYPOINT ["python", "/cage/CybORG/Evaluation/validation.py"]
```

We have included this [example Dockerfile](../../../Dockerfile) in the base of the repo. You can use this file as a basis when creating your submission. Edit this file as necessary to create the environment for running your agents. E.g. add additional pip install instructions, change the version of python, or change the operating system.

## Building the container

It is important that the Dockerfile is located at the base of the CybORG repository. From here you can create an image by entering the following into a terminal:

```
docker build -t {IMAGE NAME} {PATH TO THIS DIRECTORY}
```

with the arguments in brackets being entered manually.

For example, if you want to create an image named "cage", and the CybORG repository is located at "/home/username/cyborg", you would use:

```
docker build -t cage /home/username/cyborg/
```

## Running the container
After creating an image, you create a container which will automatically run the evaluation script.

To run the container, enter the following:

```
docker run {IMAGE NAME}
```

For example, using the cage image created earlier, you would use:

```
docker run cage
```

Please check that these instructions work with your Dockerfile before submitting.
38 changes: 38 additions & 0 deletions CybORG/Evaluation/submission/main_readme_changes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
Was
## How to submit responses

Any queries re the challenge can be submitted via email to: [email protected]

We will update this section with further details on how to submit Blue agents in coming weeks. Please watch this repo for updates.

Submissions should include at least the following:
* A team name and contact details.
* The code implementing the agent, with a list of dependencies.
* A description of your approach in developing a Blue agent.
* The files and terminal output of the evaluation function.


Now:
## How to submit responses

Submissions can be made to [email protected]. This submission can be an email with all of the required files attached or you can add cage-challenge with read access to a private repo.

Please submit the following:
- A team name and contact details
- All files required to run the agents
- submission.py with:
- agents dict
- wrap function
- submission_name string
- submission_team string
- submission_technique string
- Dockerfile
- Description of approach
- Summary of evaluation results
- Full evaluation results

For more detailed instructions please see the [submission readme])CybORG/Evaluation/submission/submission_readme.md)

We are also imposing a time limit on submissions. All submissions should complete 100 episodes within 1 hour. We will use an AWS C4 EC2 instance to run the validation.

Any queries re the challenge can be submitted via email to: [email protected]
11 changes: 11 additions & 0 deletions CybORG/Evaluation/submission/submission.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from CybORG.Agents.Wrappers import PettingZooParallelWrapper
from .RandomAgent import RandomAgent

agents = {f"blue_agent_{agent}": RandomAgent() for agent in range(18)}

def wrap(env):
return PettingZooParallelWrapper(env=env)

submission_name = 'example'
submission_team = 'example_team'
submission_technique = 'Random Agents'
59 changes: 59 additions & 0 deletions CybORG/Evaluation/submission/submission_readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Example submission
This folder contains an example of a valid submission. It provides examples of the files that you need to submit in your submission.

## Validation of results

This folder should contain all the files needed to run agents, to enable us to validate the results your submission.
There should be a file called submission.py which has a dictionary called _agents_.
You should define the blue agent names: blue_agent_0, blue_agent_0, blue_agent_1, ... blue_agent_17, as keys in your _agents_ dictionary and assign your blue agent objects as values for these keys. We will assign the [SleepAgent](../../Agents/SimpleAgents/ConstantAgent.py) to any undefined agent names.
Your blue agent class should inherit from the [BaseAgent class](../../Agents/SimpleAgents/BaseAgent.py) and provide an implementation for the functions in the BaseAgent class.

The example submission.py file in this folder has code that creates a RandomAgent object for each of the blue agents in the environment. The code snippet below illustrates how to instantiate a Python dictionary containing 18 RandomAgent objects.

```python3
from .RandomAgent import RandomAgent

agents = {f"blue_agent_{agent}": RandomAgent() for agent in range(18)}
```


The submission.py also contains the wrap function, which wraps the CybORG environment to alter the interface. The following example illustrates a wrap function for the PettingZooParallelWrapper.
```python3
from CybORG.Agents.Wrappers import PettingZooParallelWrapper

def wrap(env):
return PettingZooParallelWrapper(env=env)
```

Other important aspects of the submission are the full evaluation results and the results summary printout. A summary of the results are printed to terminal and written to a date and time stamped file with the format date_time_summary. The full results are also writen to a file with the format date_time_full. Please include both of these files in your submission.

Finally, please include a Dockerfile that creates a container to run your agents.
This will help us ensure that your agents run as intended.
We have included an example [Dockerfile](../../../Dockerfile) in the base of this repo, together with [instructions](docker_instructions.md) on how to use Docker for the purpose of evaluating agents in CybORG.

# Description of approach

As part of your submission, we request that you share a description of the methods/techniques used in developing your agents.
We will use this information as part of our in-depth analysis and comparison of the various techniques submitted to the challenge.
In hosting the CAGE challenges, one of our main goals is to understand the techniques that lead to effective autonomous cyber defensive agents, as well as those that are not as effective.
We are planning on publishing the analysis and taxonomy of the different approaches that create autonomous cyber defensive agents.
To that end, we encourage you to also share details on any unsuccessful approaches taken. Please also feel free to share any interesting discoveries and thoughts regarding future work to help us shape the future of the CAGE Challenges.

We provide a [latex template](submission_template_example/template_readme.md) as a guide for writing your description.
An examplar description can be found [here](https://arxiv.org/pdf/2211.15557.pdf).

# Checklist for submission

Please include all of the following in your submission:

- All files required to run the agents
- a file named submission.py containing the following:
- agents dictionary
- wrap function
- submission_name string
- submission_team string
- submission_technique string
- A Dockerfile that creates the environment required to run your agents
- Description of approach
- Summary of evaluation results
- Full evaluation results
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2020 George Kour

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Loading

0 comments on commit 3045f16

Please sign in to comment.