Skip to content

Commit b58d524

Browse files
committed
Enhancements
1 parent 2f7914c commit b58d524

File tree

6 files changed

+75
-138
lines changed

6 files changed

+75
-138
lines changed

pygad/cnn/cnn.py

Lines changed: 3 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import numpy
22
import functools
33
import logging
4+
from ..helper.activations import sigmoid, relu, softmax
45

56
"""
67
Convolutional neural network implementation using NumPy
@@ -14,56 +15,8 @@
1415
# Supported activation functions by the cnn.py module.
1516
supported_activation_functions = ("sigmoid", "relu", "softmax")
1617

17-
def sigmoid(sop):
18-
19-
"""
20-
Applies the sigmoid function.
21-
22-
sop: The input to which the sigmoid function is applied.
23-
24-
Returns the result of the sigmoid function.
25-
"""
26-
27-
if type(sop) in [list, tuple]:
28-
sop = numpy.array(sop)
29-
30-
return 1.0 / (1 + numpy.exp(-1 * sop))
31-
32-
def relu(sop):
33-
34-
"""
35-
Applies the rectified linear unit (ReLU) function.
36-
37-
sop: The input to which the relu function is applied.
38-
39-
Returns the result of the ReLU function.
40-
"""
41-
42-
if not (type(sop) in [list, tuple, numpy.ndarray]):
43-
if sop < 0:
44-
return 0
45-
else:
46-
return sop
47-
elif type(sop) in [list, tuple]:
48-
sop = numpy.array(sop)
49-
50-
result = sop
51-
result[sop < 0] = 0
52-
53-
return result
54-
55-
def softmax(layer_outputs):
56-
57-
"""
58-
Applies the sotmax function.
59-
60-
sop: The input to which the softmax function is applied.
61-
62-
Returns the result of the softmax function.
63-
"""
64-
return layer_outputs / (numpy.sum(layer_outputs) + 0.000001)
65-
66-
def layers_weights(model, initial=True):
18+
def layers_weights(model,
19+
initial=True):
6720

6821
"""
6922
Creates a list holding the weights of all layers in the CNN.

pygad/helper/activations.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import numpy
2+
3+
def sigmoid(sop):
4+
"""
5+
Applies the sigmoid function.
6+
7+
sop: The input to which the sigmoid function is applied.
8+
9+
Returns the result of the sigmoid function.
10+
"""
11+
12+
if type(sop) in [list, tuple]:
13+
sop = numpy.array(sop)
14+
15+
return 1.0 / (1 + numpy.exp(-1 * sop))
16+
17+
def relu(sop):
18+
"""
19+
Applies the ReLU function.
20+
21+
sop: The input to which the relu function is applied.
22+
23+
Returns the result of the ReLU function.
24+
"""
25+
26+
if not (type(sop) in [list, tuple, numpy.ndarray]):
27+
if sop < 0:
28+
return 0
29+
else:
30+
return sop
31+
elif type(sop) in [list, tuple]:
32+
sop = numpy.array(sop)
33+
34+
result = sop
35+
result[sop < 0] = 0
36+
37+
return result
38+
39+
def softmax(layer_outputs):
40+
"""
41+
Applies the softmax function.
42+
43+
sop: The input to which the softmax function is applied.
44+
45+
Returns the result of the softmax function.
46+
"""
47+
return layer_outputs / (numpy.sum(layer_outputs) + 0.000001)

pygad/nn/nn.py

Lines changed: 2 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy
22
import functools
3+
from ..helper.activations import sigmoid, relu, softmax
34

45
"""
56
This project creates a neural network where the architecture has input and dense layers only. More layers will be added in the future.
@@ -140,57 +141,7 @@ def layers_activations(last_layer):
140141
activations.reverse()
141142
return activations
142143

143-
def sigmoid(sop):
144-
145-
"""
146-
Applies the sigmoid function.
147-
148-
sop: The input to which the sigmoid function is applied.
149-
150-
Returns the result of the sigmoid function.
151-
"""
152-
153-
if type(sop) in [list, tuple]:
154-
sop = numpy.array(sop)
155-
156-
return 1.0 / (1 + numpy.exp(-1 * sop))
157-
158-
def relu(sop):
159-
160-
"""
161-
Applies the rectified linear unit (ReLU) function.
162-
163-
sop: The input to which the relu function is applied.
164-
165-
Returns the result of the ReLU function.
166-
"""
167-
168-
if not (type(sop) in [list, tuple, numpy.ndarray]):
169-
if sop < 0:
170-
return 0
171-
else:
172-
return sop
173-
elif type(sop) in [list, tuple]:
174-
sop = numpy.array(sop)
175-
176-
result = sop
177-
result[sop < 0] = 0
178-
179-
return result
180-
181-
def softmax(layer_outputs):
182-
183-
"""
184-
Applies the sotmax function.
185-
186-
sop: The input to which the softmax function is applied.
187-
188-
Returns the result of the softmax function.
189-
"""
190-
return layer_outputs / (numpy.sum(layer_outputs) + 0.000001)
191-
192-
def train(num_epochs,
193-
last_layer,
144+
def train(num_epochs, last_layer,
194145
data_inputs,
195146
data_outputs,
196147
problem_type="classification",

pygad/utils/crossover.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -198,13 +198,11 @@ def uniform_crossover(self, parents, offspring_size):
198198
# Index of the second parent to mate.
199199
parent2_idx = (k+1) % parents.shape[0]
200200

201-
for gene_idx in range(offspring_size[1]):
202-
if (genes_sources[k, gene_idx] == 0):
203-
# The gene will be copied from the first parent if the current gene index is 0.
204-
offspring[k, gene_idx] = parents[parent1_idx, gene_idx]
205-
elif (genes_sources[k, gene_idx] == 1):
206-
# The gene will be copied from the second parent if the current gene index is 1.
207-
offspring[k, gene_idx] = parents[parent2_idx, gene_idx]
201+
# The gene will be copied from the first parent if the current gene index is 0.
202+
# The gene will be copied from the second parent if the current gene index is 1.
203+
offspring[k, :] = numpy.where(genes_sources[k] == 0,
204+
parents[parent1_idx, :],
205+
parents[parent2_idx, :])
208206

209207
if self.allow_duplicate_genes == False:
210208
if self.gene_space is None:
@@ -268,6 +266,8 @@ def scattered_crossover(self, parents, offspring_size):
268266
# Index of the second parent to mate.
269267
parent2_idx = (k+1) % parents.shape[0]
270268

269+
# The gene will be copied from the first parent if the current gene index is 0.
270+
# The gene will be copied from the second parent if the current gene index is 1.
271271
offspring[k, :] = numpy.where(genes_sources[k] == 0,
272272
parents[parent1_idx, :],
273273
parents[parent2_idx, :])

pygad/utils/engine.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,12 @@
66
class GAEngine:
77

88
def round_genes(self, solutions):
9-
for gene_idx in range(self.num_genes):
10-
if self.gene_type_single:
11-
if not self.gene_type[1] is None:
12-
solutions[:, gene_idx] = numpy.round(solutions[:, gene_idx],
13-
self.gene_type[1])
14-
else:
9+
if self.gene_type_single:
10+
if not self.gene_type[1] is None:
11+
solutions = numpy.round(numpy.asarray(solutions, dtype=self.gene_type[0]),
12+
self.gene_type[1])
13+
else:
14+
for gene_idx in range(self.num_genes):
1515
if not self.gene_type[gene_idx][1] is None:
1616
solutions[:, gene_idx] = numpy.round(numpy.asarray(solutions[:, gene_idx],
1717
dtype=self.gene_type[gene_idx][0]),
@@ -77,6 +77,7 @@ def initialize_population(self,
7777
sample_size=1)
7878

7979
# 2) Change the data type and round all genes within the initial population.
80+
# This step is necessary before applying the gene constraints since the right gene value must be used for accuracy.
8081
self.population = self.change_population_dtype_and_round(self.population)
8182

8283
# Note that gene_constraint is not validated yet.

pygad/utils/parent_selection.py

Lines changed: 9 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -112,12 +112,14 @@ def tournament_selection(self, fitness, num_parents):
112112
parents = self.initialize_parents_array((num_parents, self.population.shape[1]))
113113
parents_indices = []
114114

115+
rank_lookup = {sol_idx: rank for rank, sol_idx in enumerate(fitness_sorted)}
116+
115117
for parent_num in range(num_parents):
116118
# Generate random indices for the candidate solutions.
117119
rand_indices = numpy.random.randint(low=0, high=len(fitness), size=self.K_tournament)
118120

119121
# Find the rank of the candidate solutions. The lower the rank, the better the solution.
120-
rand_indices_rank = [fitness_sorted.index(rand_idx) for rand_idx in rand_indices]
122+
rand_indices_rank = [rank_lookup[rand_idx] for rand_idx in rand_indices]
121123
# Select the solution with the lowest rank as a parent.
122124
selected_parent_idx = rand_indices_rank.index(min(rand_indices_rank))
123125

@@ -196,17 +198,10 @@ def wheel_cumulative_probs(self, probs, num_parents):
196198
probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.
197199
probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.
198200

199-
curr = 0.0
200-
201-
# Calculating the probabilities of the solutions to form a roulette wheel.
202-
for _ in range(probs.shape[0]):
203-
min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]
204-
probs_start[min_probs_idx] = curr
205-
curr = curr + probs[min_probs_idx]
206-
probs_end[min_probs_idx] = curr
207-
# Replace 99999999999 by float('inf')
208-
# probs[min_probs_idx] = 99999999999
209-
probs[min_probs_idx] = float('inf')
201+
sorted_indices = numpy.argsort(probs)
202+
cumulative = numpy.cumsum(probs[sorted_indices])
203+
probs_start[sorted_indices] = numpy.concatenate([[0.0], cumulative[:-1]])
204+
probs_end[sorted_indices] = cumulative
210205

211206
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
212207
parents = self.initialize_parents_array((num_parents, self.population.shape[1]))
@@ -248,18 +243,8 @@ def stochastic_universal_selection(self, fitness, num_parents):
248243

249244
probs = fitness / fitness_sum
250245

251-
probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.
252-
probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.
253-
254-
curr = 0.0
255-
256-
# Calculating the probabilities of the solutions to form a roulette wheel.
257-
for _ in range(probs.shape[0]):
258-
min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]
259-
probs_start[min_probs_idx] = curr
260-
curr = curr + probs[min_probs_idx]
261-
probs_end[min_probs_idx] = curr
262-
probs[min_probs_idx] = float('inf')
246+
probs_start, probs_end, parents = self.wheel_cumulative_probs(probs=probs.copy(),
247+
num_parents=num_parents)
263248

264249
pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.
265250
first_pointer = numpy.random.uniform(low=0.0,

0 commit comments

Comments
 (0)