diff --git a/deap/base.py b/deap/base.py index c88ee461..e0df3fcb 100644 --- a/deap/base.py +++ b/deap/base.py @@ -268,3 +268,91 @@ def __repr__(self): """Return the Python code to build a copy of the object.""" return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, self.values if self.valid else tuple()) + + +def _violates_constraint(fitness): + return not fitness.valid \ + and fitness.constraint_violation is not None \ + and sum(fitness.constraint_violation) > 0 + + +class ConstrainedFitness(Fitness): + def __init__(self, values=(), constraint_violation=None): + super(ConstrainedFitness, self).__init__(values) + self.constraint_violation = constraint_violation + + @Fitness.values.deleter + def values(self): + self.wvalues = () + self.constraint_violation = None + + def __gt__(self, other): + return not self.__le__(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __le__(self, other): + self_violates_constraints = _violates_constraint(self) + other_violates_constraints = _violates_constraint(other) + + if self_violates_constraints and other_violates_constraints: + return True + elif self_violates_constraints: + return True + elif other_violates_constraints: + return False + + return self.wvalues <= other.wvalues + + def __lt__(self, other): + self_violates_constraints = _violates_constraint(self) + other_violates_constraints = _violates_constraint(other) + + if self_violates_constraints and other_violates_constraints: + return False + elif self_violates_constraints: + return True + elif other_violates_constraints: + return False + + return self.wvalues < other.wvalues + + def __eq__(self, other): + self_violates_constraints = _violates_constraint(self) + other_violates_constraints = _violates_constraint(other) + + if self_violates_constraints and other_violates_constraints: + return True + elif self_violates_constraints: + return False + elif other_violates_constraints: + return False + + return self.wvalues == other.wvalues + + def __ne__(self, other): + return not self.__eq__(other) + + def dominates(self, other): + self_violates_constraints = _violates_constraint(self) + other_violates_constraints = _violates_constraint(other) + + if self_violates_constraints and other_violates_constraints: + return False + elif self_violates_constraints: + return False + elif other_violates_constraints: + return True + + return super(ConstrainedFitness, self).dominates(other) + + def __str__(self): + """Return the values of the Fitness object.""" + return str((self.values if self.valid else tuple(), self.constraint_violation)) + + def __repr__(self): + """Return the Python code to build a copy of the object.""" + return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__, + self.values if self.valid else tuple(), + self.constraint_violation) \ No newline at end of file diff --git a/deap/cma.py b/deap/cma.py index a886b81f..9b6a2050 100644 --- a/deap/cma.py +++ b/deap/cma.py @@ -22,6 +22,8 @@ """ import copy from math import sqrt, log, exp +from itertools import cycle + import numpy from . import tools @@ -545,3 +547,321 @@ def update(self, population): self.A = [A[i] if ind._ps[0] == "o" else self.A[ind._ps[1]] for i, ind in enumerate(chosen)] self.pc = [pc[i] if ind._ps[0] == "o" else self.pc[ind._ps[1]] for i, ind in enumerate(chosen)] self.psucc = [psucc[i] if ind._ps[0] == "o" else self.psucc[ind._ps[1]] for i, ind in enumerate(chosen)] + + +class StrategyActiveOnePlusLambda(object): + """A CMA-ES strategy that combines the :math:`(1 + \\lambda)` paradigm + [Igel2007]_, the mixed integer modification [Hansen2011]_, active + covariance update [Arnold2010]_ and constraint handling [Arnold2012]_. + This version of CMA-ES requires the random vector and the mutation + that created each individual. The vector and mutation are stored in each + individual as :attr:`_z` and :attr:`_y` respectively. Updating with + individuals not containing these attributes will result in an + :class:`AttributeError`. + Notes: + When using this strategy (especially when using constraints) you should + monitor the strategy :attr:`condition_number`. If it goes above a given + threshold (say :math:`10^{12}`), you should think of restarting the + optimization as the covariance matrix is going degenerate. See the + constrained active CMA-ES example for a simple example of restart. + :param parent: An iterable object that indicates where to start the + evolution. The parent requires a fitness attribute. + :param sigma: The initial standard deviation of the distribution. + :param step: The minimal step size for each dimension. Use 0 for + continuous dimensions. + :param lambda_: Number of offspring to produce from the parent. + (optional, defaults to 1) + :param **kwargs: One or more parameter to pass to the strategy as + described in the following table. (optional) + +----------------+---------------------------+------------------------------+ + | Parameter | Default | Details | + +================+===========================+==============================+ + | ``d`` | ``1.0 + N / (2.0 * | Damping for step-size. | + | | lambda_)`` | | + +----------------+---------------------------+------------------------------+ + | ``ptarg`` | ``1.0 / (5 + sqrt(lambda_)| Taget success rate | + | | / 2.0)`` | (from 1 + lambda algorithm). | + +----------------+---------------------------+------------------------------+ + | ``cp`` | ``ptarg * lambda_ / (2.0 +| Step size learning rate. | + | | ptarg * lambda_)`` | | + +----------------+---------------------------+------------------------------+ + | ``cc`` | ``2.0 / (N + 2.0)`` | Cumulation time horizon. | + +----------------+---------------------------+------------------------------+ + | ``ccov`` | ``2.0 / (N**2 + 6.0)`` | Covariance matrix learning | + | | | rate. | + +----------------+---------------------------+------------------------------+ + | ``ccovn`` | ``0.4 / (N**1.6 + 1.0)`` | Covariance matrix negative | + | | | learning rate. | + +----------------+---------------------------+------------------------------+ + | ``cconst`` | ``1.0 / (N + 2.0)`` | Constraint vectors learning | + | | | rate. | + +----------------+---------------------------+------------------------------+ + | ``beta`` | ``0.1 / (lambda_ * (N + | Covariance matrix learning | + | | 2.0))`` | rate for constraints. | + | | | | + +----------------+---------------------------+------------------------------+ + | ``pthresh`` | ``0.44`` | Threshold success rate. | + +----------------+---------------------------+------------------------------+ + .. [Igel2007] Igel, Hansen and Roth. Covariance matrix adaptation for + multi-objective optimization. 2007 + .. [Arnold2010] Arnold and Hansen. Active covariance matrix adaptation for + the (1+1)-CMA-ES. 2010. + .. [Hansen2011] Hansen. A CMA-ES for Mixed-Integer Nonlinear Optimization. + Research Report] RR-7751, INRIA. 2011 + .. [Arnold2012] Arnold and Hansen. A (1+1)-CMA-ES for Constrained Optimisation. + 2012 + """ + def __init__(self, parent, sigma, steps, **kargs): + self.parent = parent + self.sigma = sigma + self.dim = len(self.parent) + + self.A = numpy.identity(self.dim) + self.invA = numpy.identity(self.dim) + self.condition_number = numpy.linalg.cond(self.A) + + self.pc = numpy.zeros(self.dim) + + # Save parameters + self.params = kargs.copy() + + # Covariance matrix adaptation + self.cc = self.params.get("cc", 2.0 / (self.dim + 2.0)) + self.ccovp = self.params.get("ccovp", 2.0 / (self.dim ** 2 + 6.0)) + self.ccovn = self.params.get("ccovn", 0.4 / (self.dim ** 1.6 + 1.0)) + self.cconst = self.params.get("cconst", 1.0 / (self.dim + 2.0)) + self.pthresh = self.params.get("pthresh", 0.44) + + self.lambda_ = self.params.get("lambda_", 1) + + self.psucc = self.ptarg + self.S_int = numpy.array(steps) + self.i_I_R = numpy.flatnonzero(2 * self.sigma * numpy.diag(self.A)**0.5 + < self.S_int) + + self.constraint_vecs = None + self.ancestors_fitness = list() + + @property + def lambda_(self): + return self._lambda + + @lambda_.setter + def lambda_(self, value): + self._lambda = value + self._compute_lambda_parameters() + + def _compute_lambda_parameters(self): + """Computes the parameters depending on :math:`\lambda`. It needs to + be called again if :math:`\lambda` changes during evolution. + """ + # Step size control : + self.d = self.params.get("d", 1.0 + self.dim / (2.0 * self.lambda_)) + self.ptarg = self.params.get("ptarg", 1.0 / (5 + numpy.sqrt(self.lambda_) + / 2.0)) + self.cp = self.params.get("cp", (self.ptarg * self.lambda_ + / (2 + self.ptarg * self.lambda_))) + + self.beta = self.params.get("beta", 0.1 / (self.lambda_ * (self.dim + 2.0))) + + def generate(self, ind_init): + """Generate a population of :math:`\lambda` individuals of type + *ind_init* from the current strategy. + :param ind_init: A function object that is able to initialize an + individual from a list. + :returns: A list of individuals. + """ + # Generate individuals + z = numpy.random.standard_normal((self.lambda_, self.dim)) + y = numpy.dot(self.A, z.T).T + x = self.parent + self.sigma * y + self.S_int * self._integer_mutation() + + if any(self.S_int > 0): + # Bring values to the integer steps + round_values = numpy.tile(self.S_int > 0, (self.lambda_, 1)) + steps = numpy.tile(self.S_int, (self.lambda_, 1)) + x[round_values] = steps[round_values] * numpy.around(x[round_values] + / steps[round_values]) + + # The update method requires to remember the y of each individual + population = list(map(ind_init, x)) + for ind, yi, zi in zip(population, y, z): + ind._y = yi + ind._z = zi + + return population + + def _integer_mutation(self): + n_I_R = self.i_I_R.shape[0] + + # Mixed integer CMA-ES is developped for (mu/mu , lambda) + # We have a (1 + lambda) setting, thus we make the integer mutation + # probabilistic. The integer mutation is lambda / 2 if all dimensions + # are integers or min(lambda / 2 - 1, lambda / 10 + n_I_R + 1). The minus + # 1 accounts for the last new candidate getting its integer mutation from + # the last best solution. We skip this last best solution part. + if n_I_R == 0: + return numpy.zeros((self.lambda_, self.dim)) + elif n_I_R == self.dim: + p = self.lambda_ / 2.0 / self.lambda_ + # lambda_int = int(numpy.floor(self.lambda_ / 2)) + else: + p = (min(self.lambda_ / 2.0, self.lambda_ / 10.0 + n_I_R / self.dim) + / self.lambda_) + # lambda_int = int(min(numpy.floor(self.lambda_ / 10) + n_I_R + 1, + # numpy.floor(self.lambda_ / 2) - 1)) + + Rp = numpy.zeros((self.lambda_, self.dim)) + Rpp = numpy.zeros((self.lambda_, self.dim)) + + # Ri' has exactly one of its components set to one. + # The Ri' are dependent in that the number of mutations for each coordinate + # differs at most by one + for i, j in zip(range(self.lambda_), cycle(self.i_I_R)): + # Probabilistically choose lambda_int individuals + if numpy.random.rand() < p: + Rp[i, j] = 1 + Rpp[i, j] = numpy.random.geometric(p=0.7**(1.0/n_I_R)) - 1 + + I_pm1 = (-1)**numpy.random.randint(0, 2, (self.lambda_, self.dim)) + R_int = I_pm1 * (Rp + Rpp) + + # Usually in mu/mu, lambda the last individual is set to the step taken. + # We don't use this sheme in the 1 + lambda scheme + # if self.update_count > 0: + # R_int[-1, :] = (numpy.floor(-self.S_int - self.last_best) + # - numpy.floor(-self.S_int - self.centroid)) + + return R_int + + def _rank1update(self, individual, p_succ): + update_cov = False + self.psucc = (1 - self.cp) * self.psucc + self.cp * p_succ + + if not hasattr(self.parent, "fitness") \ + or self.parent.fitness <= individual.fitness: + self.parent = copy.deepcopy(individual) + self.ancestors_fitness.append(copy.deepcopy(individual.fitness)) + if len(self.ancestors_fitness) > 5: + self.ancestors_fitness.pop() + + # Must guard if pc is all 0 to prevent w_norm_sqrd to be 0 + if self.psucc < self.pthresh or numpy.allclose(self.pc, 0): + self.pc = (1 - self.cc) * self.pc + (numpy.sqrt(self.cc * (2 - self.cc)) + * individual._y) + + a = numpy.sqrt(1 - self.ccovp) + w = numpy.dot(self.invA, self.pc) + w_norm_sqrd = numpy.linalg.norm(w) ** 2 + b = numpy.sqrt(1 - self.ccovp) / w_norm_sqrd \ + * (numpy.sqrt(1 + self.ccovp / (1 - self.ccovp) * w_norm_sqrd) + - 1) + + else: + self.pc = (1 - self.cc) * self.pc + + d = self.ccovp * (1 + self.cc * (2 - self.cc)) + a = numpy.sqrt(1 - d) + w = numpy.dot(self.invA, self.pc) + w_norm_sqrd = numpy.linalg.norm(w) ** 2 + b = numpy.sqrt(1 - d) \ + * (numpy.sqrt(1 + self.ccovp * w_norm_sqrd / (1 - d)) - 1) \ + / w_norm_sqrd + + update_cov = True + + elif len(self.ancestors_fitness) >= 5 \ + and individual.fitness < self.ancestors_fitness[0] \ + and self.psucc < self.pthresh: + # Active covariance update requires w = z and not w = inv(A)s + w = individual._z + w_norm_sqrd = numpy.linalg.norm(w) ** 2 + if 1 < self.ccovn * (2 * w_norm_sqrd - 1): + ccovn = 1 / (2 * w_norm_sqrd - 1) + else: + ccovn = self.ccovn + + a = numpy.sqrt(1 + ccovn) + b = numpy.sqrt(1 + ccovn) / w_norm_sqrd \ + * (numpy.sqrt(1 - ccovn / (1 + ccovn) * w_norm_sqrd) - 1) + update_cov = True + + if update_cov: + self.A = self.A * a + b * numpy.outer(numpy.dot(self.A, w), w) + self.invA = (1 / a * self.invA + - b / (a ** 2 + a * b * w_norm_sqrd) + * numpy.dot(self.invA, numpy.outer(w, w))) + + # TODO: Add integer mutation i_I_R component + self.sigma = self.sigma * numpy.exp(1.0 / self.d + * ((self.psucc - self.ptarg) + / (1.0 - self.ptarg))) + + def _infeasible_update(self, individual): + if not hasattr(individual.fitness, "constraint_violation"): + return + + if self.constraint_vecs is None: + shape = len(individual.fitness.constraint_violation), self.dim + self.constraint_vecs = numpy.zeros(shape) + + for i in range(self.constraint_vecs.shape[0]): + if individual.fitness.constraint_violation[i]: + self.constraint_vecs[i] = (1 - self.cconst) * self.constraint_vecs[i] \ + + self.cconst * individual._y + + W = numpy.dot(self.invA, self.constraint_vecs.T).T # M x N + constraint_violation = numpy.sum(individual.fitness.constraint_violation) + + A_prime = ( + self.A - self.beta / constraint_violation + * numpy.sum( + list( + numpy.outer(self.constraint_vecs[i], W[i]) + / numpy.dot(W[i], W[i]) + for i in range(self.constraint_vecs.shape[0]) + if individual.fitness.constraint_violation[i] + ), + axis=0 + ) + ) + + try: + self.invA = numpy.linalg.inv(A_prime) + except numpy.linalg.LinAlgError: + warnings.warn("Singular matrix inversion, " + "invalid update in CMA-ES ignored", RuntimeWarning) + else: + self.A = A_prime + + def update(self, population): + """Update the current covariance matrix strategy from the *population*. + :param population: A list of individuals from which to update the + parameters. + """ + valid_population = [ind for ind in population if ind.fitness.valid] + invalid_population = [ind for ind in population if not ind.fitness.valid] + + if len(valid_population) > 0: + # Rank 1 update + valid_population.sort(key=lambda ind: ind.fitness, reverse=True) + if not hasattr(self.parent, "fitness"): + lambda_succ = len(valid_population) + else: + lambda_succ = sum(self.parent.fitness <= ind.fitness + for ind in valid_population) + # Use len(valid) to not account for individuals violating constraints + self._rank1update(valid_population[0], + float(lambda_succ) / len(valid_population)) + + if len(invalid_population) > 0 : + # Learn constraint from all invalid individuals + for ind in invalid_population: + self._infeasible_update(ind) + + # Used to monitor the convariance matrix conditioning + self.condition_number = numpy.linalg.cond(self.A) + + C = numpy.dot(self.A, self.A.T) + self.i_I_R = numpy.flatnonzero(2 * self.sigma * numpy.diag(C)**0.5 + < self.S_int) \ No newline at end of file diff --git a/deap/tools/mutation.py b/deap/tools/mutation.py index 1f0fefce..e13a7e42 100644 --- a/deap/tools/mutation.py +++ b/deap/tools/mutation.py @@ -2,7 +2,6 @@ import random from itertools import repeat -from past.builtins import xrange try: from collections.abc import Sequence diff --git a/tests/test_algorithms.py b/tests/test_algorithms.py index 62e17a75..75cc4f66 100644 --- a/tests/test_algorithms.py +++ b/tests/test_algorithms.py @@ -13,10 +13,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with DEAP. If not, see . -from nose import with_setup import random import numpy +import pytest from deap import algorithms from deap import base @@ -32,25 +32,37 @@ HV_THRESHOLD = 116.0 # 120.777 is Optimal value -def setup_func_single_obj(): +def teardown_(): + # Messy way to remove a class from the creator + del creator.__dict__[FITCLSNAME] + del creator.__dict__[INDCLSNAME] + + +@pytest.fixture +def setup_teardown_single_obj(): creator.create(FITCLSNAME, base.Fitness, weights=(-1.0,)) creator.create(INDCLSNAME, list, fitness=creator.__dict__[FITCLSNAME]) + yield + teardown_() + -def setup_func_multi_obj(): +@pytest.fixture +def setup_teardown_multi_obj(): creator.create(FITCLSNAME, base.Fitness, weights=(-1.0, -1.0)) creator.create(INDCLSNAME, list, fitness=creator.__dict__[FITCLSNAME]) + yield + teardown_() -def setup_func_multi_obj_numpy(): + +@pytest.fixture +def setup_teardown_multi_obj_numpy(): creator.create(FITCLSNAME, base.Fitness, weights=(-1.0, -1.0)) creator.create(INDCLSNAME, numpy.ndarray, fitness=creator.__dict__[FITCLSNAME]) + yield + teardown_() -def teardown_func(): - # Messy way to remove a class from the creator - del creator.__dict__[FITCLSNAME] - del creator.__dict__[INDCLSNAME] -@with_setup(setup_func_single_obj, teardown_func) -def test_cma(): +def test_cma(setup_teardown_single_obj): NDIM = 5 strategy = cma.Strategy(centroid=[0.0]*NDIM, sigma=1.0) @@ -65,8 +77,8 @@ def test_cma(): assert best.fitness.values < (1e-8,), "CMA algorithm did not converged properly." -@with_setup(setup_func_multi_obj, teardown_func) -def test_nsga2(): + +def test_nsga2(setup_teardown_multi_obj): NDIM = 5 BOUND_LOW, BOUND_UP = 0.0, 1.0 MU = 16 @@ -116,8 +128,7 @@ def test_nsga2(): assert not (any(numpy.asarray(ind) < BOUND_LOW) or any(numpy.asarray(ind) > BOUND_UP)) -@with_setup(setup_func_multi_obj_numpy, teardown_func) -def test_mo_cma_es(): +def test_mo_cma_es(setup_teardown_multi_obj_numpy): def distance(feasible_ind, original_ind): """A distance function to the feasibility region.""" @@ -186,8 +197,7 @@ def valid(individual): assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (hv, HV_THRESHOLD) -@with_setup(setup_func_multi_obj, teardown_func) -def test_nsga3(): +def test_nsga3(setup_teardown_multi_obj): NDIM = 5 BOUND_LOW, BOUND_UP = 0.0, 1.0 MU = 16 @@ -211,7 +221,7 @@ def test_nsga3(): ind.fitness.values = fit pop = toolbox.select(pop, len(pop)) - # Begin the generational process + # Begin the generational process for gen in range(1, NGEN): offspring = algorithms.varAnd(pop, toolbox, 1.0, 1.0) diff --git a/tests/test_convergence.py b/tests/test_convergence.py index 1d1856d5..1309392d 100644 --- a/tests/test_convergence.py +++ b/tests/test_convergence.py @@ -42,6 +42,7 @@ def tearDown(self): del creator.__dict__[FITCLSNAME] del creator.__dict__[INDCLSNAME] + class TestSingleObjective(TearDownCreatorTestCase): def setUp(self): creator.create(FITCLSNAME, base.Fitness, weights=(-1.0,)) @@ -51,7 +52,7 @@ def test_cma(self): NDIM = 5 NGEN = 100 - strategy = cma.BasicStrategy(centroid=[0.0]*NDIM, sigma=1.0) + strategy = cma.Strategy(centroid=[0.0]*NDIM, sigma=1.0) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) @@ -59,8 +60,9 @@ def test_cma(self): toolbox.register("update", strategy.update) # Consume the algorithm until NGEN - state = next(islice(algorithms.GenerateUpdateAlgorithm(toolbox), NGEN, None)) - best, = tools.selBest(state.population, k=1) + population, _ = algorithms.eaGenerateUpdate(toolbox, NGEN) + + best, = tools.selBest(population, k=1) self.assertLess(best.fitness.values[0], 1e-8) @@ -73,7 +75,7 @@ def test_cma_mixed_integer_1_p_1_no_constraint(self): parent = (numpy.random.rand(N) * 2) + 1 - strategy = cma.ActiveOnePlusLambdaStrategy(parent, 0.5, [0, 0, 0.1], lambda_=1) + strategy = cma.StrategyActiveOnePlusLambda(parent, 0.5, [0, 0, 0.1], lambda_=1) toolbox.register("generate", strategy.generate, ind_init=creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) @@ -109,7 +111,7 @@ def test_cma_mixed_integer_1_p_20_no_constraint(self): parent = (numpy.random.rand(N) * 2) + 1 - strategy = cma.ActiveOnePlusLambdaStrategy(parent, 0.5, [0, 0, 0.1], lambda_=20) + strategy = cma.StrategyActiveOnePlusLambda(parent, 0.5, [0, 0, 0.1], lambda_=20) toolbox.register("generate", strategy.generate, ind_init=creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) @@ -166,7 +168,7 @@ def c2(individual): while restarts > 0: parent = (numpy.random.rand(N) * 2) + 1 - strategy = cma.ActiveOnePlusLambdaStrategy(parent, 0.5, [0, 0, 0.1, 0, 0], lambda_=1) + strategy = cma.StrategyActiveOnePlusLambda(parent, 0.5, [0, 0, 0.1, 0, 0], lambda_=1) toolbox.register("generate", strategy.generate, ind_init=creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) @@ -226,7 +228,7 @@ def c2(individual): while restarts > 0: parent = (numpy.random.rand(N) * 2) + 1 - strategy = cma.ActiveOnePlusLambdaStrategy(parent, 0.5, [0, 0, 0.1, 0, 0], lambda_=20) + strategy = cma.StrategyActiveOnePlusLambda(parent, 0.5, [0, 0, 0.1, 0, 0], lambda_=20) toolbox.register("generate", strategy.generate, ind_init=creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) @@ -345,7 +347,7 @@ def test_nsga3(self): # Begin the generational process for gen in range(1, NGEN): # Vary the individuals - offspring = list(islice(algorithms.and_variation(pop, toolbox, 1.0, 1.0), len(pop))) + offspring = list(islice(algorithms.varAnd(pop, toolbox, 1.0, 1.0), len(pop))) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] @@ -407,7 +409,7 @@ def valid(individual): for ind in population: ind.fitness.values = toolbox.evaluate(ind) - strategy = cma.MultiObjectiveStrategy(population, sigma=1.0, mu=MU, lambda_=LAMBDA) + strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA) toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) diff --git a/tests/test_mutation.py b/tests/test_mutation.py index 2822f8a8..65c7742e 100644 --- a/tests/test_mutation.py +++ b/tests/test_mutation.py @@ -1,5 +1,5 @@ import unittest -import mock +from unittest import mock from deap.tools.mutation import mutInversion diff --git a/tests/test_operators.py b/tests/test_operators.py index d4d0ba24..27b10eab 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -1,8 +1,5 @@ import unittest -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock import random from deap.tools import crossover diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 873db8f4..b8e54054 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -13,9 +13,11 @@ from deap import gp from deap import tools + def func(): return "True" + class Pickling(unittest.TestCase): def setUp(self):