Skip to content

Commit

Permalink
Support of NSGA-II
Browse files Browse the repository at this point in the history
  • Loading branch information
ahmedfgad committed Sep 4, 2023
1 parent 3163d7f commit 6aeb685
Show file tree
Hide file tree
Showing 5 changed files with 137 additions and 45 deletions.
7 changes: 4 additions & 3 deletions pygad/helper/nsga2.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def get_non_dominated_set(curr_solutions):
# Checking for if any solution dominates the current solution by applying the 2 conditions.
# le_eq (less than or equal): All elements must be True.
# le (less than): Only 1 element must be True.
le_eq = two_solutions[:, 1] <= two_solutions[:, 0]
le = two_solutions[:, 1] < two_solutions[:, 0]
le_eq = two_solutions[:, 1] >= two_solutions[:, 0]
le = two_solutions[:, 1] > two_solutions[:, 0]

# If the 2 conditions hold, then a solution dominates the current solution.
# The current solution is not considered a member of the dominated set.
Expand Down Expand Up @@ -175,8 +175,9 @@ def crowding_distance(pareto_front, fitness):
# If there are only 2 solutions in the current pareto front, then do not proceed.
# The crowding distance for such 2 solutions is infinity.
if len(obj_sorted) <= 2:
obj_crowding_dist_list.append(obj_sorted)
break

for idx in range(1, len(obj_sorted)-1):
# Calculate the crowding distance.
crowding_dist = obj_sorted[idx+1][1] - obj_sorted[idx-1][1]
Expand Down
2 changes: 1 addition & 1 deletion pygad/pygad.py
Original file line number Diff line number Diff line change
Expand Up @@ -2099,7 +2099,7 @@ def run(self):
:] = self.last_generation_offspring_mutation
else:
self.last_generation_elitism, self.last_generation_elitism_indices = self.steady_state_selection(self.last_generation_fitness,
num_parents=self.keep_elitism)
num_parents=self.keep_elitism)
self.population[0:self.last_generation_elitism.shape[0],
:] = self.last_generation_elitism
self.population[self.last_generation_elitism.shape[0]:, :] = self.last_generation_offspring_mutation
Expand Down
37 changes: 32 additions & 5 deletions pygad/utils/mutation.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,8 @@ def adaptive_mutation_population_fitness(self, offspring):
if len(fitness.shape) > 1:
# TODO This is a multi-objective optimization problem.
# fitness[first_idx:last_idx] = [0]*(last_idx - first_idx)
raise ValueError('Edit adaptive mutation to work with multi-objective optimization problems.')
fitness[first_idx:last_idx] = numpy.zeros(shape=(last_idx - first_idx, fitness.shape[1]))
# raise ValueError('Edit adaptive mutation to work with multi-objective optimization problems.')
else:
# This is a single-objective optimization problem.
fitness[first_idx:last_idx] = [0]*(last_idx - first_idx)
Expand Down Expand Up @@ -514,7 +515,13 @@ def adaptive_mutation_population_fitness(self, offspring):
for idx in range(batch_first_index, batch_last_index):
fitness[idx] = fitness_temp[idx - batch_first_index]

average_fitness = numpy.mean(fitness)
if len(fitness.shape) > 1:
# TODO This is a multi-objective optimization problem.
# Calculate the average of each objective's fitness across all solutions in the population.
average_fitness = numpy.mean(fitness, axis=0)
else:
# This is a single-objective optimization problem.
average_fitness = numpy.mean(fitness)

return average_fitness, fitness[len(parents_to_keep):]

Expand Down Expand Up @@ -690,10 +697,30 @@ def adaptive_mutation_randomly(self, offspring):
# Adaptive random mutation changes one or more genes in each offspring randomly.
# The number of genes to mutate depends on the solution's fitness value.
for offspring_idx in range(offspring.shape[0]):
if offspring_fitness[offspring_idx] < average_fitness:
adaptive_mutation_num_genes = self.mutation_num_genes[0]
## TODO Make edits to work with multi-objective optimization.
# Compare the fitness of each offspring to the average fitness of each objective function.
fitness_comparison = offspring_fitness[offspring_idx] < average_fitness
# Check if the problem is single or multi-objective optimization.
if type(fitness_comparison) is bool:
# Single-objective optimization problem.
if fitness_comparison:
adaptive_mutation_num_genes = self.mutation_num_genes[0]
else:
adaptive_mutation_num_genes = self.mutation_num_genes[1]
else:
adaptive_mutation_num_genes = self.mutation_num_genes[1]
# Multi-objective optimization problem.

# Get the sum of the pool array (result of comparison).
# True is considered 1 and False is 0.
fitness_comparison_sum = sum(fitness_comparison)
# Check if more than or equal to 50% of the objectives have fitness greater than the average.
# If True, then use the first percentage.
# If False, use the second percentage.
if fitness_comparison_sum >= len(fitness_comparison)/2:
adaptive_mutation_num_genes = self.mutation_num_genes[0]
else:
adaptive_mutation_num_genes = self.mutation_num_genes[1]

mutation_indices = numpy.array(random.sample(range(0, self.num_genes), adaptive_mutation_num_genes))
for gene_idx in mutation_indices:

Expand Down
134 changes: 99 additions & 35 deletions pygad/utils/parent_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,23 @@

class ParentSelection:
def steady_state_selection(self, fitness, num_parents):

"""
Selects the parents using the steady-state selection technique. Later, these parents will mate to produce the offspring.
Selects the parents using the steady-state selection technique.
This is by sorting the solutions based on the fitness and select the best ones as parents.
Later, these parents will mate to produce the offspring.
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])
fitness_sorted.reverse()
# Return the indices of the sorted solutions (all solutions in the population).
# This function works with both single- and multi-objective optimization problems.
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)

# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
if self.gene_type_single == True:
Expand All @@ -38,11 +44,14 @@ def rank_selection(self, fitness, num_parents):
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

# This has the index of each solution in the population.
fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])
# Return the indices of the sorted solutions (all solutions in the population).
# This function works with both single- and multi-objective optimization problems.
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)

# Rank the solutions based on their fitness. The worst is gives the rank 1. The best has the rank N.
rank = numpy.arange(1, self.sol_per_pop+1)
Expand Down Expand Up @@ -74,7 +83,9 @@ def random_selection(self, fitness, num_parents):
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

if self.gene_type_single == True:
Expand All @@ -96,35 +107,68 @@ def tournament_selection(self, fitness, num_parents):
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""


# Return the indices of the sorted solutions (all solutions in the population).
# This function works with both single- and multi-objective optimization problems.
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)

if self.gene_type_single == True:
parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])
else:
parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)

parents_indices = []

for parent_num in range(num_parents):
# Generate random indices for the candiadate solutions.
rand_indices = numpy.random.randint(low=0.0, high=len(fitness), size=self.K_tournament)
K_fitnesses = fitness[rand_indices]
selected_parent_idx = numpy.where(K_fitnesses == numpy.max(K_fitnesses))[0][0]
# K_fitnesses = fitness[rand_indices]
# selected_parent_idx = numpy.where(K_fitnesses == numpy.max(K_fitnesses))[0][0]

# Find the rank of the candidate solutions. The lower the rank, the better the solution.
rand_indices_rank = [fitness_sorted.index(rand_idx) for rand_idx in rand_indices]
# Select the solution with the lowest rank as a parent.
selected_parent_idx = rand_indices_rank.index(min(rand_indices_rank))

# Append the index of the selected parent.
parents_indices.append(rand_indices[selected_parent_idx])
# Insert the selected parent.
parents[parent_num, :] = self.population[rand_indices[selected_parent_idx], :].copy()

return parents, numpy.array(parents_indices)

def roulette_wheel_selection(self, fitness, num_parents):

"""
Selects the parents using the roulette wheel selection technique. Later, these parents will mate to produce the offspring.
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""


## Make edits to work with multi-objective optimization.
## The objective is to convert the fitness from M-D array to just 1D array.
## There are 2 ways:
# 1) By summing the fitness values of each solution.
# 2) By using only 1 objective to create the roulette wheel and excluding the others.

# Take the sum of the fitness values of each solution.
if len(fitness.shape) > 1:
# Multi-objective optimization problem.
# Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
fitness = numpy.sum(fitness, axis=1)
else:
# Single-objective optimization problem.
pass

# Reaching this step extends that fitness is a 1D array.
fitness_sum = numpy.sum(fitness)
if fitness_sum == 0:
self.logger.error("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")
Expand Down Expand Up @@ -170,7 +214,9 @@ def wheel_cumulative_probs(self, probs, num_parents):
probs_start[min_probs_idx] = curr
curr = curr + probs[min_probs_idx]
probs_end[min_probs_idx] = curr
probs[min_probs_idx] = 99999999999
# Replace 99999999999 by float('inf')
# probs[min_probs_idx] = 99999999999
probs[min_probs_idx] = float('inf')

# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
if self.gene_type_single == True:
Expand All @@ -187,14 +233,34 @@ def stochastic_universal_selection(self, fitness, num_parents):
It accepts 2 parameters:
-fitness: The fitness values of the solutions in the current population.
-num_parents: The number of parents to be selected.
It returns an array of the selected parents.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

## Make edits to work with multi-objective optimization.
## The objective is to convert the fitness from M-D array to just 1D array.
## There are 2 ways:
# 1) By summing the fitness values of each solution.
# 2) By using only 1 objective to create the roulette wheel and excluding the others.

# Take the sum of the fitness values of each solution.
if len(fitness.shape) > 1:
# Multi-objective optimization problem.
# Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
fitness = numpy.sum(fitness, axis=1)
else:
# Single-objective optimization problem.
pass

# Reaching this step extends that fitness is a 1D array.
fitness_sum = numpy.sum(fitness)
if fitness_sum == 0:
self.logger.error("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")
raise ZeroDivisionError("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")

probs = fitness / fitness_sum

probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.
probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.

Expand All @@ -206,7 +272,9 @@ def stochastic_universal_selection(self, fitness, num_parents):
probs_start[min_probs_idx] = curr
curr = curr + probs[min_probs_idx]
probs_end[min_probs_idx] = curr
probs[min_probs_idx] = 99999999999
# Replace 99999999999 by float('inf')
# probs[min_probs_idx] = 99999999999
probs[min_probs_idx] = float('inf')

pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.
first_pointer = numpy.random.uniform(low=0.0,
Expand Down Expand Up @@ -234,8 +302,6 @@ def stochastic_universal_selection(self, fitness, num_parents):
def tournament_selection_nsga2(self,
fitness,
num_parents
# pareto_fronts,
# solutions_fronts_indices,
):

"""
Expand All @@ -253,7 +319,9 @@ def tournament_selection_nsga2(self,
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
It returns an array of the selected parents alongside their indices in the population.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

if self.gene_type_single == True:
Expand All @@ -263,19 +331,15 @@ def tournament_selection_nsga2(self,

# The indices of the selected parents.
parents_indices = []

# TODO If there is only a single objective, each pareto front is expected to have only 1 solution.
# TODO Make a test to check for that behaviour.
# TODO Make a test to check for that behaviour and add it to the GitHub actions tests.
pareto_fronts, solutions_fronts_indices = nsga2.non_dominated_sorting(fitness)

# Randomly generate pairs of indices to apply for NSGA-II tournament selection for selecting the parents solutions.
rand_indices = numpy.random.randint(low=0.0,
high=len(solutions_fronts_indices),
size=(num_parents, self.K_tournament))
# rand_indices[0, 0] = 5
# rand_indices[0, 1] = 3
# rand_indices[1, 0] = 1
# rand_indices[1, 1] = 6

for parent_num in range(num_parents):
# Return the indices of the current 2 solutions.
Expand Down Expand Up @@ -346,7 +410,7 @@ def tournament_selection_nsga2(self,
else:
# If the random number is >= 0.5, then select the second solution.
selected_parent_idx = current_indices[1]

# Insert the selected parent index.
parents_indices.append(selected_parent_idx)
# Insert the selected parent.
Expand All @@ -358,8 +422,6 @@ def tournament_selection_nsga2(self,
def nsga2_selection(self,
fitness,
num_parents
# pareto_fronts,
# solutions_fronts_indices
):

"""
Expand All @@ -378,7 +440,9 @@ def nsga2_selection(self,
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
It returns an array of the selected parents alongside their indices in the population.
It returns:
-An array of the selected parents.
-The indices of the selected solutions.
"""

if self.gene_type_single == True:
Expand Down
2 changes: 1 addition & 1 deletion pygad/visualize/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from pygad.visualize import plot

__version__ = "1.0.0"
__version__ = "1.1.0"

0 comments on commit 6aeb685

Please sign in to comment.