From 8c6399ed1a973715bd843e711db08883cf349087 Mon Sep 17 00:00:00 2001 From: Julian Date: Sat, 11 Aug 2018 09:23:38 -0400 Subject: [PATCH] VERSION 0.2.0 --- README.rst | 78 ++++---------- docs/source/_installation.rst | 10 +- docs/source/_usage.rst | 66 ++---------- pymoo/algorithms/MOEAD.py | 39 ++++--- pymoo/algorithms/ernsga2.py | 2 +- pymoo/algorithms/genetic_algorithm.py | 4 +- pymoo/algorithms/nsga2.py | 35 +++--- pymoo/algorithms/nsga3.py | 32 ++++-- pymoo/algorithms/rnsga3.py | 42 ++++---- pymoo/algorithms/so_DE.py | 17 +-- pymoo/algorithms/so_genetic_algorithm.py | 4 +- pymoo/indicators/gd.py | 4 +- pymoo/indicators/igd.py | 6 +- pymoo/model/algorithm.py | 77 +++++++------ pymoo/model/population.py | 35 ++++-- .../real_differental_evolution_crossover.py | 2 +- .../survival/reference_line_survival.py | 8 +- .../survival/rnsga_reference_line_survival.py | 21 ++-- pymoo/optimize.py | 35 +++--- pymoo/rand/random.py | 1 + pymoo/rand/random_generator.py | 5 + pymoo/run_algorithm.py | 101 +++--------------- pymoo/run_experiment.py | 6 +- pymoo/util/decomposition.py | 10 ++ pymoo/util/display.py | 2 + pymoo/util/misc.py | 6 -- pymoo/util/normalization.py | 37 +++++++ pymoo/util/plotting.py | 76 +++++++++++++ setup.py | 4 +- tests/rmetric.py | 6 +- 30 files changed, 400 insertions(+), 371 deletions(-) create mode 100644 pymoo/util/decomposition.py create mode 100644 pymoo/util/plotting.py diff --git a/README.rst b/README.rst index 2f515bf6d..67e0f7afe 100644 --- a/README.rst +++ b/README.rst @@ -10,6 +10,14 @@ The test problems are uploaded to the PyPi Repository. pip install pymoo +For the current development version: + +.. code:: bash + + git clone https://github.com/msu-coinlab/pymoo + cd pymoo + python setup.py install + Implementations ================================== @@ -32,10 +40,6 @@ many-objective problems. The survival selection uses the perpendicular distance to the reference directions. As normalization the boundary intersection method is used [5]. -**R-NSGA-III** : A reference-point based algorithm for solving -many-objective problems. An extension on the NSGA-III algorithm -R-NSGA-III is used to find solutions around a user specified region. - **MOEAD/D** : The classical MOEAD\D implementation using the Tchebichew decomposition function. @@ -59,26 +63,22 @@ Usage import time - from matplotlib import animation - import matplotlib.pyplot as plt import numpy as np + from pymoo.util.plotting import plot, animate if __name__ == '__main__': # load the problem instance - from pymop.zdt import ZDT1 + from pymop.problems.zdt import ZDT1 problem = ZDT1(n_var=30) # create the algorithm instance by specifying the intended parameters - from pymoo.algorithms.NSGAII import NSGAII - algorithm = NSGAII("real", pop_size=100, verbose=True) + from pymoo.algorithms.nsga2 import NSGA2 + algorithm = NSGA2(pop_size=100) start_time = time.time() - # save the history in an object to observe the convergence over generations - history = [] - # number of generations to run it n_gen = 200 @@ -88,60 +88,20 @@ Usage seed=2, return_only_feasible=False, return_only_non_dominated=False, - history=history) + disp=True, + save_history=True) print("--- %s seconds ---" % (time.time() - start_time)) scatter_plot = True save_animation = True - # get the problem dimensionality - is_2d = problem.n_obj == 2 - is_3d = problem.n_obj == 3 - - if scatter_plot and is_2d: - plt.scatter(F[:, 0], F[:, 1]) - plt.show() - - if scatter_plot and is_3d: - fig = plt.figure() - from mpl_toolkits.mplot3d import Axes3D - ax = fig.add_subplot(111, projection='3d') - ax.scatter(F[:, 0], F[:, 1], F[:, 2]) - plt.show() - - # create an animation to watch the convergence over time - if is_2d and save_animation: - - fig = plt.figure() - ax = plt.gca() - - _F = history[0]['F'] - pf = problem.pareto_front() - plt.scatter(pf[:,0], pf[:,1], label='Pareto Front', s=60, facecolors='none', edgecolors='r') - scat = plt.scatter(_F[:, 0], _F[:, 1]) - - - def update(frame_number): - _F = history[frame_number]['F'] - scat.set_offsets(_F) - - # get the bounds for plotting and add padding - min = np.min(_F, axis=0) - 0.1 - max = np.max(_F, axis=0) + 0. - - # set the scatter object with padding - ax.set_xlim(min[0], max[0]) - ax.set_ylim(min[1], max[1]) - - - # create the animation - ani = animation.FuncAnimation(fig, update, frames=range(n_gen)) + if scatter_plot: + plot(F) - # write the file - Writer = animation.writers['ffmpeg'] - writer = Writer(fps=6, bitrate=1800) - ani.save('%s.mp4' % problem.name(), writer=writer) + if save_animation: + H = np.concatenate([e['pop'].F[None, :, :] for e in algorithm.history], axis=0) + animate('%s.mp4' % problem.name(), H, problem) Contact ================================== diff --git a/docs/source/_installation.rst b/docs/source/_installation.rst index e58a4ac0c..e02954bb4 100644 --- a/docs/source/_installation.rst +++ b/docs/source/_installation.rst @@ -3,4 +3,12 @@ The test problems are uploaded to the PyPi Repository. .. code:: bash - pip install pymoo \ No newline at end of file + pip install pymoo + +For the current development version: + +.. code:: bash + + git clone https://github.com/msu-coinlab/pymoo + cd pymoo + python setup.py install \ No newline at end of file diff --git a/docs/source/_usage.rst b/docs/source/_usage.rst index c687ffda3..8e072dc47 100644 --- a/docs/source/_usage.rst +++ b/docs/source/_usage.rst @@ -2,26 +2,22 @@ import time - from matplotlib import animation - import matplotlib.pyplot as plt import numpy as np + from pymoo.util.plotting import plot, animate if __name__ == '__main__': # load the problem instance - from pymop.zdt import ZDT1 + from pymop.problems.zdt import ZDT1 problem = ZDT1(n_var=30) # create the algorithm instance by specifying the intended parameters - from pymoo.algorithms.NSGAII import NSGAII - algorithm = NSGAII("real", pop_size=100, verbose=True) + from pymoo.algorithms.nsga2 import NSGA2 + algorithm = NSGA2(pop_size=100) start_time = time.time() - # save the history in an object to observe the convergence over generations - history = [] - # number of generations to run it n_gen = 200 @@ -31,57 +27,17 @@ seed=2, return_only_feasible=False, return_only_non_dominated=False, - history=history) + disp=True, + save_history=True) print("--- %s seconds ---" % (time.time() - start_time)) scatter_plot = True save_animation = True - # get the problem dimensionality - is_2d = problem.n_obj == 2 - is_3d = problem.n_obj == 3 - - if scatter_plot and is_2d: - plt.scatter(F[:, 0], F[:, 1]) - plt.show() - - if scatter_plot and is_3d: - fig = plt.figure() - from mpl_toolkits.mplot3d import Axes3D - ax = fig.add_subplot(111, projection='3d') - ax.scatter(F[:, 0], F[:, 1], F[:, 2]) - plt.show() - - # create an animation to watch the convergence over time - if is_2d and save_animation: - - fig = plt.figure() - ax = plt.gca() - - _F = history[0]['F'] - pf = problem.pareto_front() - plt.scatter(pf[:,0], pf[:,1], label='Pareto Front', s=60, facecolors='none', edgecolors='r') - scat = plt.scatter(_F[:, 0], _F[:, 1]) - - - def update(frame_number): - _F = history[frame_number]['F'] - scat.set_offsets(_F) - - # get the bounds for plotting and add padding - min = np.min(_F, axis=0) - 0.1 - max = np.max(_F, axis=0) + 0. - - # set the scatter object with padding - ax.set_xlim(min[0], max[0]) - ax.set_ylim(min[1], max[1]) - - - # create the animation - ani = animation.FuncAnimation(fig, update, frames=range(n_gen)) + if scatter_plot: + plot(F) - # write the file - Writer = animation.writers['ffmpeg'] - writer = Writer(fps=6, bitrate=1800) - ani.save('%s.mp4' % problem.name(), writer=writer) + if save_animation: + H = np.concatenate([e['pop'].F[None, :, :] for e in algorithm.history], axis=0) + animate('%s.mp4' % problem.name(), H, problem) diff --git a/pymoo/algorithms/MOEAD.py b/pymoo/algorithms/MOEAD.py index 755b0cfe7..df2576715 100644 --- a/pymoo/algorithms/MOEAD.py +++ b/pymoo/algorithms/MOEAD.py @@ -2,24 +2,36 @@ from scipy.spatial.distance import cdist from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm -from pymoo.model.survival import Survival -from pymoo.operators.default_operators import set_default_if_none +from pymoo.operators.crossover.real_differental_evolution_crossover import DifferentialEvolutionCrossover +from pymoo.operators.default_operators import set_if_none +from pymoo.operators.sampling.real_random_sampling import RealRandomSampling from pymoo.rand import random +from pymoo.util.decomposition import tchebi from pymoo.util.display import disp_multi_objective from pymop.util import get_uniform_weights class MOEAD(GeneticAlgorithm): def __init__(self, - var_type="real", + pop_size=100, n_neighbors=15, + prob=0.5, + weight=0.8, + method="binomial", **kwargs): - #set_if_none(kwargs, "crossover", DifferentialEvolutionCrossover()) - set_default_if_none(var_type, kwargs) + self.n_neighbors = n_neighbors + + set_if_none(kwargs, 'pop_size', pop_size) + set_if_none(kwargs, 'sampling', RealRandomSampling()) + set_if_none(kwargs, 'crossover', DifferentialEvolutionCrossover(prob=prob, weight=weight, method=method)) + set_if_none(kwargs, 'selection', None) + set_if_none(kwargs, 'mutation', None) + set_if_none(kwargs, 'survival', None) super().__init__(**kwargs) - self.n_neighbors = n_neighbors + + self.func_display_attrs = disp_multi_objective # initialized when problem is known self.weights = None @@ -34,12 +46,8 @@ def _initialize(self): # neighbours includes the entry by itself intentionally for the survival method self.neighbours = np.argsort(cdist(self.weights, self.weights), axis=1)[:, :self.n_neighbors + 1] - # survival selection is included in the _mating method - self.survival = Survival() - - pop = super()._initialize() - # set the initial ideal point + pop = super()._initialize() self.ideal_point = np.min(pop.F, axis=0) return pop @@ -55,7 +63,6 @@ def _next(self, pop): # do recombination and create an offspring X = self.crossover.do(self.problem, pop.X[None, parents, :], X=pop.X[[i], :]) - X = self.mutation.do(self.problem, X) # evaluate the offspring F, _ = self.evaluator.eval(self.problem, X) @@ -76,11 +83,3 @@ def _next(self, pop): off_is_better = self.neighbours[i][np.where(off_FV < FV)[0]] pop.F[off_is_better, :] = F[k, :] pop.X[off_is_better, :] = X[k, :] - - def _display_attrs(self, D): - return disp_multi_objective(self.problem, self.evaluator, D) - - -def tchebi(F, weights, ideal_point): - v = np.abs((F - ideal_point) * weights) - return np.max(v, axis=1) diff --git a/pymoo/algorithms/ernsga2.py b/pymoo/algorithms/ernsga2.py index 33e260beb..16203fcf4 100644 --- a/pymoo/algorithms/ernsga2.py +++ b/pymoo/algorithms/ernsga2.py @@ -5,7 +5,7 @@ from pymoo.operators.survival.reference_point_survival import ReferencePointSurvival -class ERNSGAII(GeneticAlgorithm): +class ERNSGA2(GeneticAlgorithm): def __init__(self, var_type, diff --git a/pymoo/algorithms/genetic_algorithm.py b/pymoo/algorithms/genetic_algorithm.py index b124fee01..2a5951188 100644 --- a/pymoo/algorithms/genetic_algorithm.py +++ b/pymoo/algorithms/genetic_algorithm.py @@ -99,7 +99,7 @@ def _solve(self, problem, evaluator): # initialize the first population and evaluate it pop = self._initialize() - self._each_iteration({'algorithm': self, 'n_gen': n_gen, 'pop': pop, **self.D}, first=True) + self._each_iteration({'n_gen': n_gen, 'pop': pop, **self.D}, first=True) # while there are functions evaluations left while evaluator.has_remaining(): @@ -109,7 +109,7 @@ def _solve(self, problem, evaluator): self._next(pop) # execute the callback function in the end of each generation - self._each_iteration({'algorithm': self, 'n_gen': n_gen, 'pop': pop, **self.D}) + self._each_iteration({'n_gen': n_gen, 'pop': pop, **self.D}) return pop.X, pop.F, pop.G diff --git a/pymoo/algorithms/nsga2.py b/pymoo/algorithms/nsga2.py index 0dec2b746..ab3d6de46 100644 --- a/pymoo/algorithms/nsga2.py +++ b/pymoo/algorithms/nsga2.py @@ -1,8 +1,10 @@ import numpy as np from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm -from pymoo.indicators.igd import IGD -from pymoo.operators.default_operators import set_default_if_none, set_if_none +from pymoo.operators.crossover.real_simulated_binary_crossover import SimulatedBinaryCrossover +from pymoo.operators.default_operators import set_if_none +from pymoo.operators.mutation.real_polynomial_mutation import PolynomialMutation +from pymoo.operators.sampling.real_random_sampling import RealRandomSampling from pymoo.operators.selection.tournament_selection import TournamentSelection from pymoo.operators.survival.rank_and_crowding import RankAndCrowdingSurvival from pymoo.rand import random @@ -11,11 +13,23 @@ class NSGA2(GeneticAlgorithm): - def __init__(self, var_type, **kwargs): + + def __init__(self, + pop_size=100, + prob_cross=0.9, + eta_cross=20, + prob_mut=None, + eta_mut=15, + **kwargs): + set_if_none(kwargs, 'pop_size', pop_size) + set_if_none(kwargs, 'sampling', RealRandomSampling()) set_if_none(kwargs, 'selection', TournamentSelection(f_comp=comp_by_dom_and_crowding)) + set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob_cross=prob_cross, eta_cross=eta_cross)) + set_if_none(kwargs, 'mutation', PolynomialMutation(prob_mut=prob_mut, eta_mut=eta_mut)) set_if_none(kwargs, 'survival', RankAndCrowdingSurvival()) - set_default_if_none(var_type, kwargs) + set_if_none(kwargs, 'eliminated_duplicates', False) super().__init__(**kwargs) + self.func_display_attrs = disp_multi_objective def _initialize(self): pop = super()._initialize() @@ -23,19 +37,11 @@ def _initialize(self): self.survival.do(pop, None, self.pop_size, out=self.D, **self.D) return pop - def _display_attrs(self, D): - return disp_multi_objective(self.problem, self.evaluator, D) - - - -def comp_by_rank_and_crowding(pop, P, **kwargs): +def comp_by_rank_and_crowding(pop, P, rank, crowding, **kwargs): if P.shape[1] != 2: raise ValueError("Only implemented for binary tournament!") - rank = kwargs['data']['rank'] - crowding = kwargs['data']['crowding'] - # the winner of the tournament selection S = np.zeros((P.shape[0], 1), dtype=np.int) @@ -59,7 +65,6 @@ def comp_by_rank_and_crowding(pop, P, **kwargs): def comp_by_dom_and_crowding(pop, P, crowding, **kwargs): - if P.shape[1] != 2: raise ValueError("Only implemented for binary tournament!") @@ -83,4 +88,4 @@ def comp_by_dom_and_crowding(pop, P, crowding, **kwargs): S[i, 0] = P[i, 1] else: S[i, 0] = P[i, random.randint(0, 2)] - return S \ No newline at end of file + return S diff --git a/pymoo/algorithms/nsga3.py b/pymoo/algorithms/nsga3.py index 0b2a9a461..fdd74bed2 100644 --- a/pymoo/algorithms/nsga3.py +++ b/pymoo/algorithms/nsga3.py @@ -1,17 +1,38 @@ from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm -from pymoo.operators.default_operators import set_default_if_none, set_if_none +from pymoo.operators.crossover.real_simulated_binary_crossover import SimulatedBinaryCrossover +from pymoo.operators.default_operators import set_if_none +from pymoo.operators.mutation.real_polynomial_mutation import PolynomialMutation +from pymoo.operators.sampling.real_random_sampling import RealRandomSampling +from pymoo.operators.selection.random_selection import RandomSelection from pymoo.operators.survival.reference_line_survival import ReferenceLineSurvival from pymoo.util.display import disp_multi_objective from pymoo.util.reference_directions import get_uniform_weights class NSGA3(GeneticAlgorithm): - def __init__(self, var_type, ref_dirs=None, **kwargs): + + def __init__(self, + pop_size=100, + ref_dirs=None, + prob_cross=0.9, + eta_cross=20, + prob_mut=None, + eta_mut=15, + **kwargs): + self.ref_dirs = ref_dirs - set_default_if_none(var_type, kwargs) + + set_if_none(kwargs, 'pop_size', pop_size) + set_if_none(kwargs, 'sampling', RealRandomSampling()) + set_if_none(kwargs, 'selection', RandomSelection()) + set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob_cross=prob_cross, eta_cross=eta_cross)) + set_if_none(kwargs, 'mutation', PolynomialMutation(prob_mut=prob_mut, eta_mut=eta_mut)) set_if_none(kwargs, 'survival', None) + set_if_none(kwargs, 'eliminated_duplicates', False) super().__init__(**kwargs) + self.func_display_attrs = disp_multi_objective + def _initialize(self): pop = super()._initialize() @@ -26,8 +47,3 @@ def _initialize(self): self.survival = ReferenceLineSurvival(self.ref_dirs, self.problem.n_obj) return pop - - def _display_attrs(self, D): - return disp_multi_objective(self.problem, self.evaluator, D) - - diff --git a/pymoo/algorithms/rnsga3.py b/pymoo/algorithms/rnsga3.py index 5e4dc0d3c..45153daee 100644 --- a/pymoo/algorithms/rnsga3.py +++ b/pymoo/algorithms/rnsga3.py @@ -1,51 +1,49 @@ -from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm -from pymoo.operators.default_operators import set_default_if_none, set_if_none +from pymoo.algorithms.nsga3 import NSGA3 from pymoo.operators.survival.rnsga_reference_line_survival import ReferenceLineSurvival -class RNSGAIII(GeneticAlgorithm): - def __init__(self, var_type, ref_points=None, mu=0.1, ref_pop_size=None, method='uniform', p=None, **kwargs): +class RNSGA3(NSGA3): + + def __init__(self, + ref_points=None, + mu=0.1, + method="uniform", + ref_pop_size=None, + p=None, + **kwargs): + """ Parameters ---------- - var_type : string - Variable type which must be real in this case ref_points : numpy.array Reference points to be focused on during the evolutionary computation. - mu : double The shrink factor - + method : str + Weight sampling method ref_pop_size : int If the structured reference lines should be based off of a different population size than the actual population size. Default value is pop size. - p : double If the structured reference directions should be based off of p gaps specify a p value, otherwise reference directions will be based on the population size. - - ref_sampling_method : string - Reference direction generation method. Currently only 'uniform' or 'random'. - """ + super().__init__(**kwargs) + self.ref_points = ref_points - self.ref_dirs = None - self.mu = mu self.method = method - set_default_if_none(var_type, kwargs) - set_if_none(kwargs, 'survival', None) + self.mu = mu self.ref_pop_size = ref_pop_size self.p = p - super().__init__(**kwargs) - def _initialize(self, problem): - super()._initialize(problem) + def _initialize(self): + pop = super()._initialize() if self.ref_pop_size is None: self.ref_pop_size = self.pop_size - if self.survival is None: - self.survival = ReferenceLineSurvival(self.ref_dirs, problem.n_obj) + self.survival = ReferenceLineSurvival(self.ref_dirs, self.ref_points, self.ref_pop_size, self.mu, self.method, self.p, self.problem.n_obj) + return pop diff --git a/pymoo/algorithms/so_DE.py b/pymoo/algorithms/so_DE.py index e8fe8ddcc..254af41f0 100644 --- a/pymoo/algorithms/so_DE.py +++ b/pymoo/algorithms/so_DE.py @@ -12,13 +12,21 @@ def __init__(self, **kwargs): set_default_if_none("real", kwargs) super().__init__(**kwargs) - self.crossover = DifferentialEvolutionCrossover(prob=0.5, weight=0.75, variant="DE/rand/1", method="binomial") + self.crossover = DifferentialEvolutionCrossover(prob=0.5, weight=0.75, variant="DE/best/1", method="binomial") + self.func_display_attrs = disp_single_objective def _next(self, pop): # all neighbors shuffled (excluding the individual itself) P = RandomSelection().do(pop, self.pop_size, self.crossover.n_parents - 1) - P = np.concatenate([np.arange(self.pop_size)[:, None], P], axis=1) + + origin = None + if "rand" in self.crossover.variant: + origin = np.arange(self.pop_size) + elif "best" in self.crossover.variant: + origin = np.full(self.pop_size, np.argmin(pop.F)) + + P = np.concatenate([origin[:, None], P], axis=1) # do recombination and create an offspring X = self.crossover.do(self.problem, pop.X[P, :]) @@ -27,7 +35,4 @@ def _next(self, pop): # replace whenever offspring is better than population member off_is_better = np.where(F < pop.F)[0] pop.F[off_is_better, :] = F[off_is_better, :] - pop.X[off_is_better, :] = X[off_is_better, :] - - def _display_attrs(self, D): - return disp_single_objective(self.problem, self.evaluator, D) + pop.X[off_is_better, :] = X[off_is_better, :] \ No newline at end of file diff --git a/pymoo/algorithms/so_genetic_algorithm.py b/pymoo/algorithms/so_genetic_algorithm.py index de155c632..b610a90aa 100644 --- a/pymoo/algorithms/so_genetic_algorithm.py +++ b/pymoo/algorithms/so_genetic_algorithm.py @@ -11,6 +11,4 @@ def __init__(self, var_type, **kwargs): set_if_none(kwargs, 'survival', FitnessSurvival()) set_default_if_none(var_type, kwargs) super().__init__(**kwargs) - - def _display_attrs(self, D): - return disp_single_objective(self.problem, self.evaluator, D) + self.func_display_attrs = disp_single_objective \ No newline at end of file diff --git a/pymoo/indicators/gd.py b/pymoo/indicators/gd.py index 34765360c..461ad4b02 100644 --- a/pymoo/indicators/gd.py +++ b/pymoo/indicators/gd.py @@ -7,8 +7,8 @@ class GD(Indicator): def __init__(self, true_front): Indicator.__init__(self) - self.true_front = true_front + self.pareto_front = true_front def _calc(self, F): - v = cdist(F, self.true_front) + v = cdist(F, self.pareto_front) return np.mean(np.min(v, axis=1)) diff --git a/pymoo/indicators/igd.py b/pymoo/indicators/igd.py index 58d2076a7..d19a0f150 100644 --- a/pymoo/indicators/igd.py +++ b/pymoo/indicators/igd.py @@ -5,10 +5,10 @@ class IGD(Indicator): - def __init__(self, pf): + def __init__(self, pareto_front): Indicator.__init__(self) - self.pf = pf + self.pareto_front = pareto_front def _calc(self, F): - v = cdist(self.pf, F) + v = cdist(self.pareto_front, F) return np.mean(np.min(v, axis=1)) diff --git a/pymoo/model/algorithm.py b/pymoo/model/algorithm.py index 12823943b..425e9e1af 100644 --- a/pymoo/model/algorithm.py +++ b/pymoo/model/algorithm.py @@ -1,3 +1,4 @@ +import copy from abc import abstractmethod import numpy as np @@ -19,34 +20,23 @@ class Algorithm: """ - def __init__(self, - disp=False, - callback=None, - ): - """ - Parameters - ---------- - disp : bool - If it is true than information during the algorithm execution are displayed - - callback : func - A callback function can be passed that is executed every generation. The parameters for the function - are the algorithm itself, the number of evaluations so far and the current population. - - def callback(algorithm, n_evals, pop): - print() - """ - self.disp = disp - self.callback = callback - + def __init__(self, **kwargs) -> None: + super().__init__() + self.disp = None + self.func_display_attrs = None + self.callback = None + self.history = [] def solve(self, problem, evaluator, seed=1, + disp=False, + callback=None, return_only_feasible=True, return_only_non_dominated=True, - history=None, + save_history=False, + **kwargs ): """ @@ -66,6 +56,16 @@ def solve(self, seed: int Random seed for this run. Before the algorithm starts this seed is set. + disp : bool + If it is true than information during the algorithm execution are displayed + + callback : func + A callback function can be passed that is executed every generation. The parameters for the function + are the algorithm itself, the number of evaluations so far and the current population. + + def callback(algorithm, n_evals, pop): + print() + return_only_feasible : bool If true, only feasible solutions are returned. @@ -73,6 +73,9 @@ def solve(self, If true, only the non dominated solutions are returned. Otherwise, it might be - dependend on the algorithm - the final population + save_history : bool + If true, a current snapshot of each generation is saved. + Returns ------- X: matrix @@ -94,16 +97,20 @@ def solve(self, random.seed(seed) np.random.seed(seed) + self.disp = disp + self.callback = callback + self.save_history = save_history + # this allows to provide only an integer instead of an evaluator object if not isinstance(evaluator, Evaluator): evaluator = Evaluator(evaluator) # call the algorithm to solve the problem - X, F, G = self._solve(problem, evaluator) + X, F, G = self._solve(problem, evaluator, **kwargs) if return_only_feasible: if G is not None and G.shape[0] == len(F) and G.shape[1] > 0: - cv = Problem.calc_constraint_violation(G)[:,0] + cv = Problem.calc_constraint_violation(G)[:, 0] X = X[cv <= 0, :] F = F[cv <= 0, :] if G is not None: @@ -122,8 +129,8 @@ def solve(self, def _each_iteration(self, D, first=False, **kwargs): # display the output if defined by the algorithm - if self.disp: - disp = self._display_attrs(D) + if self.disp and self.func_display_attrs is not None: + disp = self.func_display_attrs(self.problem, self.evaluator, D) if disp is not None: self._display(disp, header=first) @@ -131,18 +138,18 @@ def _each_iteration(self, D, first=False, **kwargs): if self.callback is not None: self.callback(D) + if self.save_history: + self.history.append(copy.deepcopy(D)) + # attributes are a list of tuples of length 3: (name, val, width) def _display(self, disp, header=False): - regex = " | ".join(["{}"] * len(disp)) - if header: - print("=" * 40) - print(regex.format(*[name.ljust(width) for name, _, width in disp])) - print("=" * 40) - print(regex.format(*[str(val).ljust(width) for _, val, width in disp])) - - def _display_attrs(self, attrs): - return None + regex = " | ".join(["{}"] * len(disp)) + if header: + print("=" * 40) + print(regex.format(*[name.ljust(width) for name, _, width in disp])) + print("=" * 40) + print(regex.format(*[str(val).ljust(width) for _, val, width in disp])) @abstractmethod def _solve(self, problem, evaluator): - pass \ No newline at end of file + pass diff --git a/pymoo/model/population.py b/pymoo/model/population.py index 1bf22bbfe..66d81aae7 100644 --- a/pymoo/model/population.py +++ b/pymoo/model/population.py @@ -1,23 +1,38 @@ +import copy + import numpy as np class Population: def __init__(self, **kwargs): - D = dict(kwargs) - self.D = D + self.D = dict(kwargs) def __getattr__(self, name): + + # if internal function method just return the one whats usually done + if str.startswith(name, '__'): + return super().__getattr__(name) + + # if we really ask for the dictionary if name == "D": - return self.D + return self.__dict__.get("D", None) + # else we ask for an entry in the dictionary else: - return self.D[name] + if "D" in self.__dict__: + return self.__dict__["D"].get(name, None) + else: + print(name) + return {} def __setattr__(self, name, value): - if name == "D": - self.__dict__[name] = value + if name == 'D': + self.__dict__['D'] = value else: - self.D[name] = value + self.__dict__['D'][name] = value + + def __deepcopy__(self, a): + return Population(**copy.deepcopy(self.D)) def merge(self, other): D = {} @@ -32,6 +47,12 @@ def merge(self, other): D[key] = np.concatenate([self.D[key], other.D[key]]) except: D[key] = self.D[key] + + # all values in other that were not present in self + for key, value in other.D.items(): + if key not in self.D: + D[key] = other.D[key] + self.D = D def size(self): diff --git a/pymoo/operators/crossover/real_differental_evolution_crossover.py b/pymoo/operators/crossover/real_differental_evolution_crossover.py index f91c210c2..b5cb0d331 100644 --- a/pymoo/operators/crossover/real_differental_evolution_crossover.py +++ b/pymoo/operators/crossover/real_differental_evolution_crossover.py @@ -46,7 +46,7 @@ def _do(self, p, parents, children, **kwargs): children[:, :] = parents[:, 0] if self.variant == "DE/rand/1": - trial = parents[:, 3] + self.weight * (parents[:, 1] - parents[:, 2]) + trial = parents[:, 1] + self.weight * (parents[:, 2] - parents[:, 3]) else: raise Exception("DE variant %s not known." % self.variant) diff --git a/pymoo/operators/survival/reference_line_survival.py b/pymoo/operators/survival/reference_line_survival.py index 2822966e2..269dc9659 100644 --- a/pymoo/operators/survival/reference_line_survival.py +++ b/pymoo/operators/survival/reference_line_survival.py @@ -3,7 +3,7 @@ from pymoo.model.survival import Survival from pymoo.rand import random -from pymoo.util.normalization import normalize_by_asf_interceptions +from pymoo.util.normalization import normalize_by_asf_interceptions, normalize_by_asf_interceptions_ from pymoo.util.non_dominated_rank import NonDominatedRank @@ -17,6 +17,7 @@ def __init__(self, ref_dirs, n_obj): def _do(self, pop, off, n_survive, return_only_index=False, out=None, **kwargs): + pop.merge(off) fronts = NonDominatedRank.calc_as_fronts(pop.F, pop.G) # all indices to survive @@ -32,11 +33,16 @@ def _do(self, pop, off, n_survive, return_only_index=False, out=None, **kwargs): survival = list(range(0, len(survival))) last_front = np.arange(len(survival), pop.size()) + """ N, self.asf, self.extreme, F_min, F_max = normalize_by_asf_interceptions(pop.F, len(fronts[0]), prev_asf=self.asf, prev_S=self.extreme, return_bounds=True) + """ + + N, F_min, F_max = normalize_by_asf_interceptions_(pop.F, return_bounds=True) + # if the last front needs to be splitted n_remaining = n_survive - len(survival) if n_remaining > 0: diff --git a/pymoo/operators/survival/rnsga_reference_line_survival.py b/pymoo/operators/survival/rnsga_reference_line_survival.py index 14c3eefc5..d9a6d9b3c 100644 --- a/pymoo/operators/survival/rnsga_reference_line_survival.py +++ b/pymoo/operators/survival/rnsga_reference_line_survival.py @@ -2,25 +2,30 @@ from pymoo.model.survival import Survival from pymoo.rand import random -from pymoo.util.misc import normalize_by_asf_interceptions from pymoo.util.non_dominated_rank import NonDominatedRank +from pymoo.util.normalization import normalize_by_asf_interceptions from pymoo.util.reference_directions import get_ref_dirs_from_points class ReferenceLineSurvival(Survival): - def __init__(self, ref_dirs, n_obj): + def __init__(self, ref_dirs, ref_points, ref_pop_size, mu, method, p, n_obj): super().__init__() self.ref_dirs = ref_dirs + self.ref_points = ref_points + self.ref_pop_size = ref_pop_size + self.mu = mu + self.method = method + self.p = p self.n_obj = n_obj self.extreme = None self.asf = None def _do(self, pop, off, n_survive, return_only_index=False, **kwargs): - data = kwargs['data'] - + pop.merge(off) fronts = NonDominatedRank.calc_as_fronts(pop.F, pop.G) + # all indices to survive survival = [] @@ -34,16 +39,14 @@ def _do(self, pop, off, n_survive, return_only_index=False, **kwargs): survival = list(range(0, len(survival))) last_front = np.arange(len(survival), pop.size()) - N, self.asf, self.extreme, F_min, F_max = normalize_by_asf_interceptions(np.vstack((pop.F, data.ref_points)), + N, self.asf, self.extreme, F_min, F_max = normalize_by_asf_interceptions(np.vstack((pop.F, self.ref_points)), len(fronts[0]), prev_asf=self.asf, prev_S=self.extreme, return_bounds=True) - z_ = (data.ref_points - F_min)/(F_max - F_min) # Normalized reference points - data.F_min, data.F_max = F_min, F_max - self.ref_dirs = get_ref_dirs_from_points(z_, self.n_obj, data.ref_pop_size, alpha=data.mu, method=data.method, p=data.p) - data.ref_dirs = self.ref_dirs + z_ = (self.ref_points - F_min)/(F_max - F_min) # Normalized reference points + self.ref_dirs = get_ref_dirs_from_points(z_, self.n_obj, self.ref_pop_size, mu=self.mu, method=self.method, p=self.p) # if the last front needs to be split n_remaining = n_survive - len(survival) diff --git a/pymoo/optimize.py b/pymoo/optimize.py index 2daaeabfe..8c83945d9 100644 --- a/pymoo/optimize.py +++ b/pymoo/optimize.py @@ -3,13 +3,15 @@ from pymoo.algorithms.moead import MOEAD from pymoo.algorithms.nsga2 import NSGA2 from pymoo.algorithms.nsga3 import NSGA3 +from pymoo.algorithms.rnsga3 import RNSGA3 from pymoo.algorithms.so_DE import DifferentialEvolution from pymoo.algorithms.so_genetic_algorithm import SingleObjectiveGeneticAlgorithm from pymoo.model.evaluator import Evaluator from pymop.problem import Problem -def minimize(fun, xl=None, xu=None, termination=('n_eval', 10000), n_var=None, fun_args={}, method='auto', method_args={}, +def minimize(fun, xl=None, xu=None, termination=('n_eval', 10000), n_var=None, fun_args={}, method='auto', + method_args={}, seed=None, callback=None, disp=True): """ @@ -27,30 +29,25 @@ def minimize(fun, xl=None, xu=None, termination=('n_eval', 10000), n_var=None, f A function that gets X which is a 2d array as input. Each row represents a solution to evaluate and each column a variable. The function needs to return also the same number of rows and each column is one objective to optimize. In case of constraints, a second 2d array can be returned. - xl : numpy.array or number The lower boundaries of variables as a 1d array - xu : numpy.array or number The upper boundaries of variables as a 1d array - n_var : int If xl or xu is only a number, this is used to create the boundary numpy arrays. Other it remains unused. - termination : tuple The termination criterium that is used to stop the algorithm when the result is satisfying. - fun_args : dict Additional arguments if necessary to evaluate the solutions (constants, random seed, ...) - - method : string Algorithm that is used to solve the problem. - + method_args : dict + Additional arguments to initialize the algorithm object + seed : int + Random seed to be used for the run callback : callable, optional Called after each iteration, as ``callback(D)``, where ``D`` is a dictionary with algorithm information, such as the current design, objective and constraint space. - disp : bool Whether to display each generation the current result or not. @@ -62,8 +59,6 @@ def minimize(fun, xl=None, xu=None, termination=('n_eval', 10000), n_var=None, f """ - problem = None - if isinstance(fun, Problem): problem = fun @@ -120,7 +115,6 @@ def _evaluate(self, x, f, g=None): else: raise Exception('Unknown Termination criterium: %s' % termination_criterium) - return minimize_(problem, evaluator, method=method, method_args=method_args, seed=seed, callback=callback, disp=disp) @@ -138,18 +132,21 @@ def minimize_(problem, evaluator, method='auto', method_args={}, seed=None, # choose the algorithm implementation given the string if method == 'nsga2': - algorithm = NSGA2("real", disp=disp, **method_args) + algorithm = NSGA2(**method_args) elif method == 'nsga3': - algorithm = NSGA3("real", disp=disp, **method_args) + algorithm = NSGA3(**method_args) + elif method == 'rnsga3': + algorithm = RNSGA3(**method_args) elif method == 'moead': - algorithm = MOEAD("real", disp=disp, **method_args) + algorithm = MOEAD(**method_args) elif method == 'de': - algorithm = DifferentialEvolution(disp=disp, **method_args) + algorithm = DifferentialEvolution(**method_args) elif method == 'ga': - algorithm = SingleObjectiveGeneticAlgorithm("real", disp=disp, **method_args) + algorithm = SingleObjectiveGeneticAlgorithm("real", **method_args) else: raise Exception('Unknown method: %s' % method) - X, F, G = algorithm.solve(problem, evaluator) + + X, F, G = algorithm.solve(problem, evaluator, disp=disp, callback=callback, seed=seed) return {'problem': problem, 'X': X, 'F': F, 'G': G} diff --git a/pymoo/rand/random.py b/pymoo/rand/random.py index 5e7f30d86..57351fe62 100644 --- a/pymoo/rand/random.py +++ b/pymoo/rand/random.py @@ -3,6 +3,7 @@ from pymoo.configuration import Configuration +# use a singleton class to set the random generator globally class Singleton: __instance = None diff --git a/pymoo/rand/random_generator.py b/pymoo/rand/random_generator.py index e8128891e..e72e37557 100644 --- a/pymoo/rand/random_generator.py +++ b/pymoo/rand/random_generator.py @@ -2,6 +2,11 @@ class RandomGenerator: + """ + Implementation of a random generator used for all algorithm. This is just the base class which needs to + be inherited from. + """ + @abstractmethod def seed(self, s): pass diff --git a/pymoo/run_algorithm.py b/pymoo/run_algorithm.py index 1c15c51f9..7964ab180 100644 --- a/pymoo/run_algorithm.py +++ b/pymoo/run_algorithm.py @@ -1,55 +1,21 @@ import time -import matplotlib.pyplot as plt import numpy as np -from matplotlib import animation -from pymop.problems.dtlz import DTLZ2 -from pymop.problems.rastrigin import Rastrigin -from pymop.problems.zdt import ZDT1 +from pymoo.util.plotting import plot, animate if __name__ == '__main__': - def fun(x): - return ZDT1(n_var=30).evaluate(x) - - n_samples = x.shape[0] - - f = np.full((n_samples, 2), np.inf) - f[:, 0] = np.sum(np.square(x - 0.25), axis=1) - f[:, 1] = np.sum(np.square(x - 0.75), axis=1) - - g = np.full((n_samples, 1), np.inf) - g[:, 0] = 0.1 - f[:, 0] - - return f, g - - - from pymoo.optimize import minimize - - res = minimize(Rastrigin(n_var=30), method="de", termination=('n_eval', 90000), disp=True) - - plt.scatter(res['F'][:,0], res['F'][:,1]) - plt.show() - - exit() - # load the problem instance - # from pymop.problems.zdt import ZDT4 - # problem = ZDT4() - # problem = Rastrigin(n_var=30) - problem = DTLZ2(n_var=10) + from pymop.problems.zdt import ZDT1 + problem = ZDT1(n_var=30) # create the algorithm instance by specifying the intended parameters - from pymoo.algorithms.nsga3 import NSGA3 - - algorithm = NSGA3("real", pop_size=100, verbose=True) + from pymoo.algorithms.moead import MOEAD + algorithm = MOEAD(pop_size=100) start_time = time.time() - # save the history in an object to observe the convergence over generations - history = [] - # number of generations to run it n_gen = 200 @@ -59,58 +25,17 @@ def fun(x): seed=2, return_only_feasible=False, return_only_non_dominated=False, - history=history) + disp=True, + save_history=True) print("--- %s seconds ---" % (time.time() - start_time)) - print(F) scatter_plot = True - save_animation = False - - # get the problem dimensionality - is_2d = problem.n_obj == 2 - is_3d = problem.n_obj == 3 - - if scatter_plot and is_2d: - plt.scatter(F[:, 0], F[:, 1]) - plt.show() - - if scatter_plot and is_3d: - from mpl_toolkits.mplot3d import Axes3D - - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - ax.scatter(F[:, 0], F[:, 1], F[:, 2]) - plt.show() - - # create an animation to watch the convergence over time - if is_2d and save_animation: - fig = plt.figure() - ax = plt.gca() - - _F = history[0]['F'] - pf = problem.pareto_front() - plt.scatter(pf[:, 0], pf[:, 1], label='Pareto Front', s=60, facecolors='none', edgecolors='r') - scat = plt.scatter(_F[:, 0], _F[:, 1]) - - - def update(frame_number): - _F = history[frame_number]['F'] - scat.set_offsets(_F) - - # get the bounds for plotting and add padding - min = np.min(_F, axis=0) - 0.1 - max = np.max(_F, axis=0) + 0. - - # set the scatter object with padding - ax.set_xlim(min[0], max[0]) - ax.set_ylim(min[1], max[1]) - + save_animation = True - # create the animation - ani = animation.FuncAnimation(fig, update, frames=range(n_gen)) + if scatter_plot: + plot(F) - # write the file - Writer = animation.writers['ffmpeg'] - writer = Writer(fps=6, bitrate=1800) - ani.save('%s.mp4' % problem.name(), writer=writer) + if save_animation: + H = np.concatenate([e['pop'].F[None, :, :] for e in algorithm.history], axis=0) + animate('%s.mp4' % problem.name(), H, problem) diff --git a/pymoo/run_experiment.py b/pymoo/run_experiment.py index 4326e8403..55856116b 100644 --- a/pymoo/run_experiment.py +++ b/pymoo/run_experiment.py @@ -6,7 +6,7 @@ from pymop.zdt import ZDT1, ZDT2, ZDT3, ZDT4, ZDT6 from pymop.dtlz import DTLZ2 -from pymoo.algorithms.rnsga3 import RNSGAIII +from pymoo.algorithms.rnsga3 import RNSGA3 from pymoo.model.evaluator import Evaluator from pymoo.operators.crossover.real_simulated_binary_crossover import SimulatedBinaryCrossover @@ -25,7 +25,7 @@ def ZDT_Test(): for problem, ref_points in ref_dirs.items(): for points in ref_points: - for algorithm in [RNSGAIII("real", pop_size=100, ep=0.001, crossover=crossover, ref_dirs=points, verbose=0)]: + for algorithm in [RNSGA3("real", pop_size=100, ep=0.001, crossover=crossover, ref_dirs=points, verbose=0)]: for run in range(1, n_runs + 1): yield (algorithm, problem, run) @@ -43,7 +43,7 @@ def DTLZ_test(): print(problem) prob = problem(**parameters) for points in params["ref_points"]: - for algorithm in [RNSGAIII("real", pop_size=100, ep=0.01, crossover=crossover, ref_dirs=points, verbose=0)]: + for algorithm in [RNSGA3("real", pop_size=100, ep=0.01, crossover=crossover, ref_dirs=points, verbose=0)]: for run in range(1, n_runs + 1): yield (algorithm, prob, run) diff --git a/pymoo/util/decomposition.py b/pymoo/util/decomposition.py new file mode 100644 index 000000000..ac9c24b52 --- /dev/null +++ b/pymoo/util/decomposition.py @@ -0,0 +1,10 @@ +import numpy as np + + +def tchebi(F, weights, ideal_point): + v = np.abs((F - ideal_point) * weights) + return np.max(v, axis=1) + + +def weighted_sum(F, weights): + return np.sum(F * weights, axis=1) diff --git a/pymoo/util/display.py b/pymoo/util/display.py index 0142bde70..2a642f7de 100644 --- a/pymoo/util/display.py +++ b/pymoo/util/display.py @@ -1,3 +1,4 @@ +from pymoo.indicators.gd import GD from pymoo.indicators.igd import IGD import numpy as np @@ -18,5 +19,6 @@ def disp_multi_objective(problem, evaluator, D): pf = problem.pareto_front() if pf is not None: attrs.append(('igd', "%.5f" % IGD(pf).calc(D['pop'].F), 8)) + attrs.append(('gd', "%.5f" % GD(pf).calc(D['pop'].F), 8)) return attrs diff --git a/pymoo/util/misc.py b/pymoo/util/misc.py index 9d6a2b56f..63a428814 100644 --- a/pymoo/util/misc.py +++ b/pymoo/util/misc.py @@ -3,12 +3,6 @@ from pymoo.rand import random -# returns only the unique rows from a given matrix X -def unique_rows(X): - y = np.ascontiguousarray(X).view(np.dtype((np.void, X.dtype.itemsize * X.shape[1]))) - _, idx = np.unique(y, return_index=True) - return idx - # repairs a numpy array to be in bounds def repair(X, xl, xu): diff --git a/pymoo/util/normalization.py b/pymoo/util/normalization.py index a846a6eb9..12d8736e0 100644 --- a/pymoo/util/normalization.py +++ b/pymoo/util/normalization.py @@ -22,6 +22,43 @@ def normalize(x, x_min=None, x_max=None, return_bounds=False): return res, x_min, x_max +def normalize_by_asf_interceptions_(x, return_bounds=False): + + # find the x_min point + n_obj = x.shape[1] + x_min = np.min(x, axis=0) + # transform the objective that 0 means the best + F = x - x_min + + # calculate the asf matrix + asf = np.eye(n_obj) + asf = asf + (asf == 0) * 1e-16 + + # result matrix with the selected points + S = np.zeros((n_obj, n_obj)) + + # find for each direction the best + for i in range(len(asf)): + val = np.max(F / asf[i, :], axis=1) + 0.0001 * np.sum(F / asf[i, :]) + S[i, :] = F[np.argmin(val), :] + + try: + b = np.ones(n_obj) + A = np.linalg.solve(S, b) + A = 1 / A + A = A.T + except LinAlgError: + A = np.max(S, axis=0) + + F = F / A + + if not return_bounds: + return F + else: + x_max = A + x_min + return F, x_min, x_max + + def normalize_by_asf_interceptions(X, first_front, prev_S=None, prev_asf=None, return_bounds=False): """ diff --git a/pymoo/util/plotting.py b/pymoo/util/plotting.py new file mode 100644 index 000000000..1e691ff84 --- /dev/null +++ b/pymoo/util/plotting.py @@ -0,0 +1,76 @@ +import matplotlib.pyplot as plt +import numpy as np +from matplotlib import animation + + +def plot(F): + + if F.ndim == 1: + print("Cannot plot a one dimensional array.") + return + + n_dim = F.shape[1] + + if n_dim == 2: + plot_2d(F) + elif n_dim == 3: + plot_3d(F) + else: + print("Cannot plot a %s dimensional array." % n_dim) + return + + +def plot_3d(F): + fig = plt.figure() + from mpl_toolkits.mplot3d import Axes3D + ax = fig.add_subplot(111, projection='3d') + ax.scatter(F[:, 0], F[:, 1], F[:, 2]) + plt.show() + +def plot_2d(F): + plt.scatter(F[:, 0], F[:, 1]) + plt.show() + +def animate(path_to_file, H, problem=None): + + if H.ndim != 3 or H.shape[2] != 2: + print("Can only animate a two dimensional set of arrays.") + return + + fig = plt.figure() + ax = plt.gca() + + # plot the pareto front if it is known for the problem + if problem is not None: + pf = problem.pareto_front() + plt.scatter(pf[:, 0], pf[:, 1], label='Pareto Front', s=60, facecolors='none', edgecolors='r') + + # plot the initial population + _F = H[0, :, :] + scat = plt.scatter(_F[:, 0], _F[:, 1]) + plt.title("0") + + # the update method + def update(n): + _F = H[n, :, :] + scat.set_offsets(_F) + + # get the bounds for plotting and add padding + min = np.min(_F, axis=0) - 0.1 + max = np.max(_F, axis=0) + 0. + + # set the scatter object with padding + ax.set_xlim(min[0], max[0]) + ax.set_ylim(min[1], max[1]) + plt.title(n) + + # create the animation + ani = animation.FuncAnimation(fig, update, frames=H.shape[0]) + + # write the file + Writer = animation.writers['ffmpeg'] + writer = Writer(fps=6, bitrate=1800) + ani.save(path_to_file, writer=writer) + + + diff --git a/setup.py b/setup.py index e8508a8b9..d224cbc37 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup __author__ = "Julian Blank" -__version__ = '0.1.2' +__version__ = '0.2.0' try: import pypandoc @@ -19,7 +19,7 @@ author_email="blankjul@egr.msu.edu", description="Multi-Objective Optimization Algorithms", long_description=long_description, - url="https://github.com/julesy89/pymoo", + url="https://github.com/msu-coinlab/pymoo", license='MIT', keywords="optimization", packages=setuptools.find_packages(), diff --git a/tests/rmetric.py b/tests/rmetric.py index 3547a9851..079bfe93a 100644 --- a/tests/rmetric.py +++ b/tests/rmetric.py @@ -4,7 +4,7 @@ import numpy as np -from pymoo.algorithms.rnsga3 import RNSGAIII +from pymoo.algorithms.rnsga3 import RNSGA3 from pymoo.indicators.rmetric import RMetric from pymoo.model.evaluator import Evaluator from pymoo.operators.crossover.real_simulated_binary_crossover import SimulatedBinaryCrossover @@ -29,7 +29,7 @@ def ZDT_Test(): for i, points in enumerate(ref_points): sublist = [] - for algorithm in [RNSGAIII("real", pop_size=100, ep=0.001, crossover=crossover, ref_dirs=points, verbose=0)]: + for algorithm in [RNSGA3("real", pop_size=100, ep=0.001, crossover=crossover, ref_dirs=points, verbose=0)]: for run in range(1, n_runs + 1): name = problem.__class__.__name__ + '_' + str(i) + '_' + str(run) sublist.append((algorithm, problem, run, points, name)) @@ -52,7 +52,7 @@ def DTLZ_test(): prob = problem(**parameters) for i, points in enumerate(params["ref_points"]): sublist = [] - for algorithm in [RNSGAIII("real", pop_size=100, ep=0.01, crossover=crossover, ref_dirs=points, verbose=0)]: + for algorithm in [RNSGA3("real", pop_size=100, ep=0.01, crossover=crossover, ref_dirs=points, verbose=0)]: for run in range(1, n_runs + 1): name = problem.__class__.__name__ + '_' + str(i) + '_' + str(run) sublist.append((algorithm, prob, run, points, name))