Skip to content

Commit

Permalink
Merge pull request #79 from ArtLinkov/master
Browse files Browse the repository at this point in the history
Fixed for Crystal 0.25.0
  • Loading branch information
ArtLinkov authored Jun 17, 2018
2 parents 7adac95 + a079996 commit 0fc1b5f
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 44 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
/.shards/
/spec/test_data/*.csv
/spec/*.csv
my_net.nn
*.nn
# Libraries don't need dependency lock
# Dependencies will be locked in application that uses them
/shard.lock
8 changes: 4 additions & 4 deletions spec/network_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,11 @@ describe SHAInet::Network do
xor = SHAInet::Network.new

xor.add_layer(:input, 2, "memory", SHAInet.sigmoid)
1.times { |x| xor.add_layer(:hidden, 2, "memory", SHAInet.sigmoid) }
1.times { |x| xor.add_layer(:hidden, 3, "memory", SHAInet.sigmoid) }
xor.add_layer(:output, 1, "memory", SHAInet.sigmoid)
xor.fully_connect

xor.learning_rate = 0.1
xor.learning_rate = 0.7
xor.momentum = 0.3

xor.train(
Expand Down Expand Up @@ -557,5 +557,5 @@ end

# Remove train data
system("cd #{__DIR__}/test_data && rm *.csv")
File.delete("my_net.nn")
File.delete("xor.nn")
File.delete("my_net.nn") rescue nil
File.delete("xor.nn") rescue nil
20 changes: 12 additions & 8 deletions src/shainet/basic/network.cr
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,9 @@ module SHAInet

@total_error = @error_signal.reduce(0.0) { |acc, i| acc + i } # Sum up all the errors from output layer

# puts "@error_signal: #{@error_signal}"
# puts "@total_error: #{@total_error}"


rescue e : Exception
raise NeuralNetRunError.new("Error in evaluate: #{e}")
Expand Down Expand Up @@ -338,6 +341,7 @@ module SHAInet
proc = get_cost_proc(cost_function.to_s)
cost_function = proc
end

counter = 0_i64
loop do
# Show training progress of epochs
Expand All @@ -363,8 +367,8 @@ module SHAInet

# batch_mean = [] of Float64
# all_errors = [] of Float64
batch_mean = 0.0
all_errors = 0.0
batch_mean = 0.0_f64
all_errors = 0.0_f64

# Save gradients from entire batch before updating weights & biases
@w_gradient = Array(Float64).new(@all_synapses.size) { 0.0 }
Expand Down Expand Up @@ -395,13 +399,13 @@ module SHAInet

# Calculate MSE per data point
if @error_signal.size == 1
error_avg = 0.0
error_avg = 0.0_f64
else
error_avg = @total_error/@output_layers.last.neurons.size
end
# sqrd_dists = [] of Float64
# @error_signal.each { |e| sqrd_dists << (e - error_avg)**2 }
sqrd_dists = 0.0
sqrd_dists = 0.0_f64
@error_signal.each { |e| sqrd_dists += (e - error_avg)**2 }

# @mse = (sqrd_dists.reduce { |acc, i| acc + i })/@output_layers.last.neurons.size
Expand Down Expand Up @@ -433,8 +437,8 @@ module SHAInet
@logger.info("Slice: (#{i} / #{slices}), MSE: #{@mse}") if show_slice
# @logger.info("@error_signal: #{@error_signal}")
end
counter += 1
end
counter += 1
end
end

Expand Down Expand Up @@ -586,7 +590,7 @@ module SHAInet

# Counters for disply
i = 0
slices = data.size / mini_batch_size
slices = (data.size.to_f64 / mini_batch_size).ceil.to_i

raw_data.each_slice(batch_size, reuse = false) do |data_slice|
verify_data(data_slice)
Expand All @@ -603,7 +607,7 @@ module SHAInet

# Go over each data points and collect errors
# based on each specific example in the batch
batch_mse_sum = 0.0
batch_mse_sum = 0.0_f64
batch_errors_sum = Array(Float64).new(@output_layers.last.neurons.size) { 0.0 }

data_slice.each do |data_point|
Expand All @@ -627,8 +631,8 @@ module SHAInet
@logger.info("Slice: (#{i} / #{slices}), MSE: #{@mse}") if show_slice
# @logger.info("@error_signal: #{@error_signal}")
end
counter += 1
end
counter += 1
end
end

Expand Down
34 changes: 17 additions & 17 deletions src/shainet/basic/neuron.cr
Original file line number Diff line number Diff line change
Expand Up @@ -15,26 +15,26 @@ module SHAInet
raise NeuralNetInitalizationError.new("Must choose currect neuron types, if you're not sure choose 'memory' as a standard neuron") unless NEURON_TYPES.includes?(@n_type)
@synapses_in = [] of Synapse
@synapses_out = [] of Synapse
@activation = Float64.new(0) # Activation of neuron after squashing function (a)
@gradient = Float64.new(0) # Error of the neuron, sometimes refered to as delta
@bias = rand(-1..1).to_f64 # Activation threshhold (b)
@prev_bias = rand(-1..1).to_f64 # Needed for delta rule improvement using momentum
@activation = 0_f64 # Activation of neuron after squashing function (a)
@gradient = 0_f64 # Error of the neuron, sometimes refered to as delta
@bias = rand(-1_f64..1_f64) # Activation threshhold (b)
@prev_bias = rand(-1_f64..1_f64) # Needed for delta rule improvement using momentum

@input_sum = Float64.new(0) # Sum of activations*weights from input neurons (z)
@sigma_prime = Float64.new(1) # derivative of input_sum based on activation function used (s')
@gradient_sum = Float64.new(0) # Needed for back propagation of convolution layers
@gradient_batch = Float64.new(0) # Needed for batch-train
@input_sum = 0_f64 # Sum of activations*weights from input neurons (z)
@sigma_prime = 1_f64 # derivative of input_sum based on activation function used (s')
@gradient_sum = 0_f64 # Needed for back propagation of convolution layers
@gradient_batch = 0_f64 # Needed for batch-train

# Parameters needed for Rprop
@prev_gradient = rand(-0.1..0.1).to_f64
@prev_delta = rand(0.0..0.1).to_f64
@prev_delta_b = rand(-0.1..0.1).to_f64
@prev_gradient = rand(-0.1_f64..0.1_f64)
@prev_delta = rand(0.0_f64..0.1_f64)
@prev_delta_b = rand(-0.1_f64..0.1_f64)

# Parameters needed for Adam
@m_current = Float64.new(0) # Current moment value
@v_current = Float64.new(0) # Current moment**2 value
@m_prev = Float64.new(0) # Previous moment value
@v_prev = Float64.new(0) # Previous moment**2 value
@m_current = 0_f64 # Current moment value
@v_current = 0_f64 # Current moment**2 value
@m_prev = 0_f64 # Previous moment value
@v_prev = 0_f64 # Previous moment**2 value
end

# This is the forward propogation
Expand All @@ -53,7 +53,7 @@ module SHAInet
# Allows the neuron to absorb the error from its' own target neurons through the synapses
# Then, it sums the information and a derivative of the activation function is applied to normalize the data
def hidden_error_prop : Float64
weighted_error_sum = Float64.new(0)
weighted_error_sum = 0_f64
@synapses_out.each do |synapse| # Calculate weighted error from each target neuron, returns Array(Float64)
weighted_error_sum += synapse.propagate_backward
end
Expand Down Expand Up @@ -97,7 +97,7 @@ module SHAInet
end

def randomize_bias
@bias = rand(-1.0..1.0).to_f64
@bias = rand(-1_f64..1_f64)
end

def update_bias(value : Float64)
Expand Down
26 changes: 13 additions & 13 deletions src/shainet/basic/synapse.cr
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,22 @@ module SHAInet
property m_current : Float64, v_current : Float64, m_prev : Float64, v_prev : Float64

def initialize(@source_neuron : Neuron, @dest_neuron : Neuron)
@weight = rand(-0.1..0.1).to_f64 # Weight of the synapse
@gradient = rand(-0.1..0.1).to_f64 # Error of the synapse with respect to cost function (dC/dW)
@gradient_sum = Float64.new(0) # Needed for batch train
@gradient_batch = Float64.new(0) # Needed for batch train
@prev_weight = Float64.new(0) # Needed for delta rule improvement (with momentum)
@weight = rand(-0.1_f64..0.1_f64) # Weight of the synapse
@gradient = rand(-0.1_f64..0.1_f64) # Error of the synapse with respect to cost function (dC/dW)
@gradient_sum = 0_f64 # Needed for batch train
@gradient_batch = 0_f64 # Needed for batch train
@prev_weight = 0_f64 # Needed for delta rule improvement (with momentum)

# Parameters needed for Rprop
@prev_gradient = 0.0
@prev_delta = 0.1
@prev_delta_w = 0.1
@prev_gradient = 0.0_f64
@prev_delta = 0.1_f64
@prev_delta_w = 0.1_f64

# Parameters needed for Adam
@m_current = Float64.new(0) # Current moment value
@v_current = Float64.new(0) # Current moment**2 value
@m_prev = Float64.new(0) # Previous moment value
@v_prev = Float64.new(0) # Previous moment**2 value
@m_current = 0_f64 # Current moment value
@v_current = 0_f64 # Current moment**2 value
@m_prev = 0_f64 # Previous moment value
@v_prev = 0_f64 # Previous moment**2 value
end

# Transfer memory from source_neuron to dest_neuron while applying weight
Expand All @@ -45,7 +45,7 @@ module SHAInet
end

def randomize_weight
@weight = rand(-0.1..0.1).to_f64
@weight = rand(-0.1_f64..0.1_f64)
end

def clone
Expand Down
2 changes: 1 addition & 1 deletion src/shainet/math/functions.cr
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ module SHAInet
new_vector
end

# vector elment-by-element multiplication
# vector elment-by-element addition
def self.vector_sum(array1 : Array(Float64), array2 : Array(Float64))
raise MathError.new("Vectors must be the same size to sum!") if array1.size != array2.size

Expand Down

0 comments on commit 0fc1b5f

Please sign in to comment.