Skip to content

Commit

Permalink
Merge branch 'master' of github.com:DEAP/deap
Browse files Browse the repository at this point in the history
  • Loading branch information
fmder committed Dec 20, 2016
2 parents 47641fb + a90d3d5 commit 8b8c821
Show file tree
Hide file tree
Showing 5 changed files with 233 additions and 16 deletions.
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,7 @@ Authors of scientific papers including results generated using DEAP are encourag
* François-Michel De Rainville, Félix-Antoine Fortin, Marc-André Gardner, Marc Parizeau and Christian Gagné, "DEAP: A Python Framework for Evolutionary Algorithms", in !EvoSoft Workshop, Companion proc. of the Genetic and Evolutionary Computation Conference (GECCO 2012), July 07-11 2012. [Paper](http://goo.gl/pXXug)

## Projects using DEAP
* Van Geit, W., M. Gevaert, G. Chindemi, C. Rössert, J.-D. Courcol, E. Muller, F. Schürmann, I. Segev, and H. Markram (2016, March). BluePyOpt: Leveraging open source software and cloud infrastructure to optimise model parameters in neuroscience. ArXiv e-prints.
http://arxiv.org/abs/1603.00500
* Van Geit W, Gevaert M, Chindemi G, Rössert C, Courcol J, Muller EB, Schürmann F, Segev I and Markram H (2016). BluePyOpt: Leveraging open source software and cloud infrastructure to optimise model parameters in neuroscience. Front. Neuroinform. 10:17. doi: 10.3389/fninf.2016.00017 https://github.com/BlueBrain/BluePyOpt
* Lara-Cabrera, R., Cotta, C. and Fernández-Leiva, A.J. (2014). Geometrical vs topological measures for the evolution of aesthetic maps in a rts game, Entertainment Computing,
* Macret, M. and Pasquier, P. (2013). Automatic Tuning of the OP-1 Synthesizer Using a Multi-objective Genetic Algorithm. In Proceedings of the 10th Sound and Music Computing Conference (SMC). (pp 614-621).
* Fortin, F. A., Grenier, S., & Parizeau, M. (2013, July). Generalizing the improved run-time complexity algorithm for non-dominated sorting. In Proceeding of the fifteenth annual conference on Genetic and evolutionary computation conference (pp. 615-622). ACM.
Expand Down
30 changes: 19 additions & 11 deletions deap/tools/constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,17 @@ class DeltaPenalty(object):
individual.
:param delta: Constant or array of constants returned for an invalid individual.
:param distance: A function returning the distance between the individual
and a given valid point (optional, defaults to 0).
and a given valid point. The distance function can also return a sequence
of length equal to the number of objectives to affect multi-objective
fitnesses differently (optional, defaults to 0).
:returns: A decorator for evaluation function.
This function relies on the fitness weights to add correctly the distance.
The fitness value of the ith objective is defined as
.. math::
f^\mathrm{penalty}_i(\mathbf{x}) = \Delta_i - w_i d(\mathbf{x})
f^\mathrm{penalty}_i(\mathbf{x}) = \Delta_i - w_i d_i(\mathbf{x})
where :math:`\mathbf{x}` is the individual, :math:`\Delta_i` is a user defined
constant and :math:`w_i` is the weight of the ith objective. :math:`\Delta`
Expand All @@ -48,10 +50,12 @@ def wrapper(individual, *args, **kwargs):

weights = tuple(1 if w >= 0 else -1 for w in individual.fitness.weights)

dist = 0
dists = tuple(0 for w in individual.fitness.weights)
if self.dist_fct is not None:
dist = self.dist_fct(individual)
return tuple(d - w * dist for d, w in zip(self.delta, weights))
dists = self.dist_fct(individual)
if not isinstance(dists, Sequence):
dists = repeat(dists)
return tuple(d - w * dist for d, w, dist in zip(self.delta, weights, dists))

return wrapper

Expand All @@ -71,22 +75,24 @@ class ClosestValidPenalty(object):
:param alpha: Multiplication factor on the distance between the valid and
invalid individual.
:param distance: A function returning the distance between the individual
and a given valid point (optional, defaults to 0).
and a given valid point. The distance function can also return a sequence
of length equal to the number of objectives to affect multi-objective
fitnesses differently (optional, defaults to 0).
:returns: A decorator for evaluation function.
This function relies on the fitness weights to add correctly the distance.
The fitness value of the ith objective is defined as
.. math::
f^\mathrm{penalty}_i(\mathbf{x}) = f_i(\operatorname{valid}(\mathbf{x})) - \\alpha w_i d(\operatorname{valid}(\mathbf{x}), \mathbf{x})
f^\mathrm{penalty}_i(\mathbf{x}) = f_i(\operatorname{valid}(\mathbf{x})) - \\alpha w_i d_i(\operatorname{valid}(\mathbf{x}), \mathbf{x})
where :math:`\mathbf{x}` is the individual,
:math:`\operatorname{valid}(\mathbf{x})` is a function returning the closest
valid individual to :math:`\mathbf{x}`, :math:`\\alpha` is the distance
multiplicative factor and :math:`w_i` is the weight of the ith objective.
"""

def __init__(self, feasibility, feasible, alpha, distance=None):
self.fbty_fct = feasibility
self.fbl_fct = feasible
Expand All @@ -109,12 +115,14 @@ def wrapper(individual, *args, **kwargs):
if len(weights) != len(f_fbl):
raise IndexError("Fitness weights and computed fitness are of different size.")

dist = 0
dists = tuple(0 for w in individual.fitness.weights)
if self.dist_fct is not None:
dist = self.dist_fct(f_ind, individual)

if not isinstance(dists, Sequence):
dists = repeat(dists)

# print("returned", tuple(f - w * self.alpha * dist for f, w in zip(f_fbl, weights)))
return tuple(f - w * self.alpha * dist for f, w in zip(f_fbl, weights))
return tuple(f - w * self.alpha * d for f, w, d in zip(f_fbl, weights, dists))

return wrapper

Expand Down
113 changes: 112 additions & 1 deletion deap/tools/selection.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import division
import random
import numpy as np

from functools import partial
from operator import attrgetter
Expand Down Expand Up @@ -204,6 +205,116 @@ def selStochasticUniversalSampling(individuals, k):

return chosen

def selLexicase(individuals, k):
"""Returns an individual that does the best on the fitness cases when
considered one at a time in random order.
http://faculty.hampshire.edu/lspector/pubs/lexicase-IEEE-TEC.pdf
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
"""
selected_individuals = []

for i in range(k):
fit_weights = individuals[0].fitness.weights

candidates = individuals
cases = list(range(len(individuals[0].fitness.values)))
random.shuffle(cases)

while len(cases) > 0 and len(candidates) > 1:
f = min
if fit_weights[cases[0]] > 0:
f = max

best_val_for_case = f(map(lambda x: x.fitness.values[cases[0]], candidates))

candidates = list(filter(lambda x: x.fitness.values[cases[0]] == best_val_for_case, candidates))
cases.pop(0)

selected_individuals.append(random.choice(candidates))

return selected_individuals


def selEpsilonLexicase(individuals, k, epsilon):
"""
Returns an individual that does the best on the fitness cases when
considered one at a time in random order. Requires a epsilon parameter.
https://push-language.hampshire.edu/uploads/default/original/1X/35c30e47ef6323a0a949402914453f277fb1b5b0.pdf
Implemented epsilon_y implementation.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
"""
selected_individuals = []

for i in range(k):
fit_weights = individuals[0].fitness.weights

candidates = individuals
cases = list(range(len(individuals[0].fitness.values)))
random.shuffle(cases)

while len(cases) > 0 and len(candidates) > 1:
if fit_weights[cases[0]] > 0:
best_val_for_case = max(map(lambda x: x.fitness.values[cases[0]], candidates))
min_val_to_survive_case = best_val_for_case - epsilon
candidates = list(filter(lambda x: x.fitness.values[cases[0]] >= min_val_to_survive_case, candidates))
else :
best_val_for_case = min(map(lambda x: x.fitness.values[cases[0]], candidates))
max_val_to_survive_case = best_val_for_case + epsilon
candidates = list(filter(lambda x: x.fitness.values[cases[0]] <= max_val_to_survive_case, candidates))

cases.pop(0)

selected_individuals.append(random.choice(candidates))

return selected_individuals

def selAutomaticEpsilonLexicase(individuals, k):
"""
Returns an individual that does the best on the fitness cases when considered one at a
time in random order.
https://push-language.hampshire.edu/uploads/default/original/1X/35c30e47ef6323a0a949402914453f277fb1b5b0.pdf
Implemented lambda_epsilon_y implementation.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
"""
selected_individuals = []

for i in range(k):
fit_weights = individuals[0].fitness.weights

candidates = individuals
cases = list(range(len(individuals[0].fitness.values)))
random.shuffle(cases)

while len(cases) > 0 and len(candidates) > 1:
errors_for_this_case = [x.fitness.values[cases[0]] for x in candidates]
median_val = np.median(errors_for_this_case)
median_absolute_deviation = np.median([abs(x - median_val) for x in errors_for_this_case])
if fit_weights[cases[0]] > 0:
best_val_for_case = max(errors_for_this_case)
min_val_to_survive = best_val_for_case - median_absolute_deviation
candidates = list(filter(lambda x: x.fitness.values[cases[0]] >= min_val_to_survive, candidates))
else :
best_val_for_case = min(errors_for_this_case)
max_val_to_survive = best_val_for_case + median_absolute_deviation
candidates = list(filter(lambda x: x.fitness.values[cases[0]] <= max_val_to_survive, candidates))

cases.pop(0)

selected_individuals.append(random.choice(candidates))

return selected_individuals


__all__ = ['selRandom', 'selBest', 'selWorst', 'selRoulette',
'selTournament', 'selDoubleTournament', 'selStochasticUniversalSampling']
'selTournament', 'selDoubleTournament', 'selStochasticUniversalSampling',
'selLexicase', 'selEpsilonLexicase', 'selAutomaticEpsilonLexicase']

11 changes: 9 additions & 2 deletions doc/api/tools.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ Here is a list of the implemented operators in DEAP,
.. :func:`cxESBlend` .. :func:`selTournamentDCD` ..
.. :func:`cxESTwoPoint` .. :func:`selDoubleTournament` ..
.. :func:`cxSimulatedBinary` .. :func:`selStochasticUniversalSampling` ..
.. :func:`cxSimulatedBinaryBounded` .. .. ..
.. :func:`cxMessyOnePoint` .. .. ..
.. :func:`cxSimulatedBinaryBounded` .. :func:`selLexicase` ..
.. :func:`cxMessyOnePoint` .. :func:`selEpsilonLexicase` ..
.. .. .. :func:`selAutomaticEpsilonLexicase` ..
============================ =========================================== ========================================= ========================================= ================

and genetic programming specific operators.
Expand Down Expand Up @@ -147,6 +148,12 @@ Selection

.. autofunction:: deap.tools.selTournamentDCD

.. autofunction:: deap.tools.selLexicase

.. autofunction:: deap.tools.selEpsilonLexicase

.. autofunction:: deap.tools.selAutomaticEpsilonLexicase

.. autofunction:: deap.tools.sortNondominated

.. autofunction:: deap.tools.sortLogNondominated
Expand Down
92 changes: 92 additions & 0 deletions examples/gp/symbreg_epsilon_lexicase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# This file is part of EAP.
#
# EAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# EAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with EAP. If not, see <http://www.gnu.org/licenses/>.

import operator
import math
import random

import numpy

from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp

# Define new functions
def protectedDiv(left, right):
try:
return left / right
except ZeroDivisionError:
return 1

pset = gp.PrimitiveSet("MAIN", 1)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(protectedDiv, 2)
pset.addPrimitive(operator.neg, 1)
pset.addPrimitive(math.cos, 1)
pset.addPrimitive(math.sin, 1)
pset.addEphemeralConstant("rand101", lambda: random.randint(-1,1))
pset.renameArguments(ARG0='x')

creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)

def evalSymbReg(individual, points):
# Transform the tree expression in a callable function
func = toolbox.compile(expr=individual)
# Evaluate the mean squared error between the expression
# and the real function : x**4 + x**3 + x**2 + x
sqerrors = ((func(x) - x**4 - x**3 - x**2 - x)**2 for x in points)
return math.fsum(sqerrors) / len(points),

toolbox.register("evaluate", evalSymbReg, points=[x/10. for x in range(-10,10)])
toolbox.register("select", tools.selAutomaticEpsilonLexicase)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

def main():
#random.seed(318)

pop = toolbox.population(n=300)
hof = tools.HallOfFame(1)

stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", numpy.mean)
mstats.register("std", numpy.std)
mstats.register("min", numpy.min)
mstats.register("max", numpy.max)

pop, log = algorithms.eaSimple(pop, toolbox, 0.5, 0.1, 40, stats=mstats,
halloffame=hof, verbose=True)
# print log
return pop, log, hof

if __name__ == "__main__":
main()

0 comments on commit 8b8c821

Please sign in to comment.