Skip to content

Commit

Permalink
[tune] added average scope to experiment analysis (ray-project#8445)
Browse files Browse the repository at this point in the history
  • Loading branch information
krfricke authored May 14, 2020
1 parent ef20564 commit 4633d81
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 16 deletions.
20 changes: 14 additions & 6 deletions python/ray/tune/analysis/experiment_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,10 @@ def get_best_trial(self, metric, mode="max", scope="all"):
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
scope (str): One of [all, last, avg]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=avg`, consider the
simple average over all steps for `metric` and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
Expand All @@ -231,11 +233,11 @@ def get_best_trial(self, metric, mode="max", scope="all"):
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for mode {} not in [\"max\", \"min\"]".format(
metric, mode))
if scope not in ["all", "last"]:
if scope not in ["all", "last", "avg"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for scope {} not in [\"all\", \"last\"]".format(
metric, scope))
"metric {} for scope {} not in [\"all\", \"last\", \"avg\"]".
format(metric, scope))
best_trial = None
best_metric_score = None
for trial in self.trials:
Expand All @@ -244,6 +246,8 @@ def get_best_trial(self, metric, mode="max", scope="all"):

if scope == "last":
metric_score = trial.metric_analysis[metric]["last"]
elif scope == "avg":
metric_score = trial.metric_analysis[metric]["avg"]
else:
metric_score = trial.metric_analysis[metric][mode]

Expand All @@ -269,8 +273,10 @@ def get_best_config(self, metric, mode="max", scope="all"):
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
scope (str): One of [all, last, avg]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=avg`, consider the
simple average over all steps for `metric` and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
Expand All @@ -286,8 +292,10 @@ def get_best_logdir(self, metric, mode="max", scope="all"):
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
scope (str): One of [all, last, avg]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=avg`, consider the
simple average over all steps for `metric` and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
Expand Down
27 changes: 18 additions & 9 deletions python/ray/tune/tests/test_experiment_analysis_mem.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import tempfile
import random
import pandas as pd
import numpy as np

import ray
from ray.tune import run, Trainable, sample_from, Analysis, grid_search
Expand All @@ -12,16 +13,17 @@
class ExperimentAnalysisInMemorySuite(unittest.TestCase):
def setUp(self):
class MockTrainable(Trainable):
scores_dict = {
0: [5, 4, 0],
1: [4, 3, 1],
2: [2, 1, 8],
3: [9, 7, 6],
4: [7, 5, 3]
}

def _setup(self, config):
self.id = config["id"]
self.idx = 0
self.scores_dict = {
0: [5, 0],
1: [4, 1],
2: [2, 8],
3: [9, 6],
4: [7, 3]
}

def _train(self):
val = self.scores_dict[self.id][self.idx]
Expand All @@ -43,14 +45,15 @@ def tearDown(self):

def testCompareTrials(self):
self.test_dir = tempfile.mkdtemp()
scores_all = [5, 4, 2, 9, 7, 0, 1, 8, 6, 3]
scores = np.asarray(list(self.MockTrainable.scores_dict.values()))
scores_all = scores.flatten("F")
scores_last = scores_all[5:]

ea = run(
self.MockTrainable,
name="analysis_exp",
local_dir=self.test_dir,
stop={"training_iteration": 2},
stop={"training_iteration": 3},
num_samples=1,
config={"id": grid_search(list(range(5)))})

Expand All @@ -60,9 +63,15 @@ def testCompareTrials(self):
"min").metric_analysis["score"]["min"]
max_last = ea.get_best_trial("score", "max",
"last").metric_analysis["score"]["last"]
max_avg = ea.get_best_trial("score", "max",
"avg").metric_analysis["score"]["avg"]
min_avg = ea.get_best_trial("score", "min",
"avg").metric_analysis["score"]["avg"]
self.assertEqual(max_all, max(scores_all))
self.assertEqual(min_all, min(scores_all))
self.assertEqual(max_last, max(scores_last))
self.assertAlmostEqual(max_avg, max(np.mean(scores, axis=1)))
self.assertAlmostEqual(min_avg, min(np.mean(scores, axis=1)))
self.assertNotEqual(max_last, max(scores_all))


Expand Down
7 changes: 6 additions & 1 deletion python/ray/tune/trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def __init__(self,
self.last_result = {}
self.last_update_time = -float("inf")

# stores in memory max/min/last result for each metric by trial
# stores in memory max/min/avg/last result for each metric by trial
self.metric_analysis = {}

self.export_formats = export_formats
Expand Down Expand Up @@ -476,13 +476,18 @@ def update_last_result(self, result, terminate=False):
self.metric_analysis[metric] = {
"max": value,
"min": value,
"avg": value,
"last": value
}
else:
step = result["training_iteration"] or 1
self.metric_analysis[metric]["max"] = max(
value, self.metric_analysis[metric]["max"])
self.metric_analysis[metric]["min"] = min(
value, self.metric_analysis[metric]["min"])
self.metric_analysis[metric]["avg"] = 1 / step * (
value +
(step - 1) * self.metric_analysis[metric]["avg"])
self.metric_analysis[metric]["last"] = value

def get_trainable_cls(self):
Expand Down

0 comments on commit 4633d81

Please sign in to comment.