From a0799967b822e355249fbd63bfd935bc023cafd5 Mon Sep 17 00:00:00 2001 From: art Date: Sun, 17 Jun 2018 12:57:28 +0300 Subject: [PATCH] Fixed for Crystal 0.25.0 --- .gitignore | 2 +- spec/network_spec.cr | 8 ++++---- src/shainet/basic/network.cr | 20 ++++++++++++-------- src/shainet/basic/neuron.cr | 34 +++++++++++++++++----------------- src/shainet/basic/synapse.cr | 26 +++++++++++++------------- src/shainet/math/functions.cr | 2 +- 6 files changed, 48 insertions(+), 44 deletions(-) diff --git a/.gitignore b/.gitignore index 70ac01f..c7322b6 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ /.shards/ /spec/test_data/*.csv /spec/*.csv -my_net.nn +*.nn # Libraries don't need dependency lock # Dependencies will be locked in application that uses them /shard.lock diff --git a/spec/network_spec.cr b/spec/network_spec.cr index 56429cb..6a0961d 100644 --- a/spec/network_spec.cr +++ b/spec/network_spec.cr @@ -111,11 +111,11 @@ describe SHAInet::Network do xor = SHAInet::Network.new xor.add_layer(:input, 2, "memory", SHAInet.sigmoid) - 1.times { |x| xor.add_layer(:hidden, 2, "memory", SHAInet.sigmoid) } + 1.times { |x| xor.add_layer(:hidden, 3, "memory", SHAInet.sigmoid) } xor.add_layer(:output, 1, "memory", SHAInet.sigmoid) xor.fully_connect - xor.learning_rate = 0.1 + xor.learning_rate = 0.7 xor.momentum = 0.3 xor.train( @@ -557,5 +557,5 @@ end # Remove train data system("cd #{__DIR__}/test_data && rm *.csv") -File.delete("my_net.nn") -File.delete("xor.nn") +File.delete("my_net.nn") rescue nil +File.delete("xor.nn") rescue nil diff --git a/src/shainet/basic/network.cr b/src/shainet/basic/network.cr index 1956bd9..376e9d9 100644 --- a/src/shainet/basic/network.cr +++ b/src/shainet/basic/network.cr @@ -251,6 +251,9 @@ module SHAInet @total_error = @error_signal.reduce(0.0) { |acc, i| acc + i } # Sum up all the errors from output layer + # puts "@error_signal: #{@error_signal}" + # puts "@total_error: #{@total_error}" + rescue e : Exception raise NeuralNetRunError.new("Error in evaluate: #{e}") @@ -338,6 +341,7 @@ module SHAInet proc = get_cost_proc(cost_function.to_s) cost_function = proc end + counter = 0_i64 loop do # Show training progress of epochs @@ -363,8 +367,8 @@ module SHAInet # batch_mean = [] of Float64 # all_errors = [] of Float64 - batch_mean = 0.0 - all_errors = 0.0 + batch_mean = 0.0_f64 + all_errors = 0.0_f64 # Save gradients from entire batch before updating weights & biases @w_gradient = Array(Float64).new(@all_synapses.size) { 0.0 } @@ -395,13 +399,13 @@ module SHAInet # Calculate MSE per data point if @error_signal.size == 1 - error_avg = 0.0 + error_avg = 0.0_f64 else error_avg = @total_error/@output_layers.last.neurons.size end # sqrd_dists = [] of Float64 # @error_signal.each { |e| sqrd_dists << (e - error_avg)**2 } - sqrd_dists = 0.0 + sqrd_dists = 0.0_f64 @error_signal.each { |e| sqrd_dists += (e - error_avg)**2 } # @mse = (sqrd_dists.reduce { |acc, i| acc + i })/@output_layers.last.neurons.size @@ -433,8 +437,8 @@ module SHAInet @logger.info("Slice: (#{i} / #{slices}), MSE: #{@mse}") if show_slice # @logger.info("@error_signal: #{@error_signal}") end - counter += 1 end + counter += 1 end end @@ -586,7 +590,7 @@ module SHAInet # Counters for disply i = 0 - slices = data.size / mini_batch_size + slices = (data.size.to_f64 / mini_batch_size).ceil.to_i raw_data.each_slice(batch_size, reuse = false) do |data_slice| verify_data(data_slice) @@ -603,7 +607,7 @@ module SHAInet # Go over each data points and collect errors # based on each specific example in the batch - batch_mse_sum = 0.0 + batch_mse_sum = 0.0_f64 batch_errors_sum = Array(Float64).new(@output_layers.last.neurons.size) { 0.0 } data_slice.each do |data_point| @@ -627,8 +631,8 @@ module SHAInet @logger.info("Slice: (#{i} / #{slices}), MSE: #{@mse}") if show_slice # @logger.info("@error_signal: #{@error_signal}") end - counter += 1 end + counter += 1 end end diff --git a/src/shainet/basic/neuron.cr b/src/shainet/basic/neuron.cr index 84b4d03..53b9825 100644 --- a/src/shainet/basic/neuron.cr +++ b/src/shainet/basic/neuron.cr @@ -15,26 +15,26 @@ module SHAInet raise NeuralNetInitalizationError.new("Must choose currect neuron types, if you're not sure choose 'memory' as a standard neuron") unless NEURON_TYPES.includes?(@n_type) @synapses_in = [] of Synapse @synapses_out = [] of Synapse - @activation = Float64.new(0) # Activation of neuron after squashing function (a) - @gradient = Float64.new(0) # Error of the neuron, sometimes refered to as delta - @bias = rand(-1..1).to_f64 # Activation threshhold (b) - @prev_bias = rand(-1..1).to_f64 # Needed for delta rule improvement using momentum + @activation = 0_f64 # Activation of neuron after squashing function (a) + @gradient = 0_f64 # Error of the neuron, sometimes refered to as delta + @bias = rand(-1_f64..1_f64) # Activation threshhold (b) + @prev_bias = rand(-1_f64..1_f64) # Needed for delta rule improvement using momentum - @input_sum = Float64.new(0) # Sum of activations*weights from input neurons (z) - @sigma_prime = Float64.new(1) # derivative of input_sum based on activation function used (s') - @gradient_sum = Float64.new(0) # Needed for back propagation of convolution layers - @gradient_batch = Float64.new(0) # Needed for batch-train + @input_sum = 0_f64 # Sum of activations*weights from input neurons (z) + @sigma_prime = 1_f64 # derivative of input_sum based on activation function used (s') + @gradient_sum = 0_f64 # Needed for back propagation of convolution layers + @gradient_batch = 0_f64 # Needed for batch-train # Parameters needed for Rprop - @prev_gradient = rand(-0.1..0.1).to_f64 - @prev_delta = rand(0.0..0.1).to_f64 - @prev_delta_b = rand(-0.1..0.1).to_f64 + @prev_gradient = rand(-0.1_f64..0.1_f64) + @prev_delta = rand(0.0_f64..0.1_f64) + @prev_delta_b = rand(-0.1_f64..0.1_f64) # Parameters needed for Adam - @m_current = Float64.new(0) # Current moment value - @v_current = Float64.new(0) # Current moment**2 value - @m_prev = Float64.new(0) # Previous moment value - @v_prev = Float64.new(0) # Previous moment**2 value + @m_current = 0_f64 # Current moment value + @v_current = 0_f64 # Current moment**2 value + @m_prev = 0_f64 # Previous moment value + @v_prev = 0_f64 # Previous moment**2 value end # This is the forward propogation @@ -53,7 +53,7 @@ module SHAInet # Allows the neuron to absorb the error from its' own target neurons through the synapses # Then, it sums the information and a derivative of the activation function is applied to normalize the data def hidden_error_prop : Float64 - weighted_error_sum = Float64.new(0) + weighted_error_sum = 0_f64 @synapses_out.each do |synapse| # Calculate weighted error from each target neuron, returns Array(Float64) weighted_error_sum += synapse.propagate_backward end @@ -97,7 +97,7 @@ module SHAInet end def randomize_bias - @bias = rand(-1.0..1.0).to_f64 + @bias = rand(-1_f64..1_f64) end def update_bias(value : Float64) diff --git a/src/shainet/basic/synapse.cr b/src/shainet/basic/synapse.cr index 2d400e8..42c719a 100644 --- a/src/shainet/basic/synapse.cr +++ b/src/shainet/basic/synapse.cr @@ -6,22 +6,22 @@ module SHAInet property m_current : Float64, v_current : Float64, m_prev : Float64, v_prev : Float64 def initialize(@source_neuron : Neuron, @dest_neuron : Neuron) - @weight = rand(-0.1..0.1).to_f64 # Weight of the synapse - @gradient = rand(-0.1..0.1).to_f64 # Error of the synapse with respect to cost function (dC/dW) - @gradient_sum = Float64.new(0) # Needed for batch train - @gradient_batch = Float64.new(0) # Needed for batch train - @prev_weight = Float64.new(0) # Needed for delta rule improvement (with momentum) + @weight = rand(-0.1_f64..0.1_f64) # Weight of the synapse + @gradient = rand(-0.1_f64..0.1_f64) # Error of the synapse with respect to cost function (dC/dW) + @gradient_sum = 0_f64 # Needed for batch train + @gradient_batch = 0_f64 # Needed for batch train + @prev_weight = 0_f64 # Needed for delta rule improvement (with momentum) # Parameters needed for Rprop - @prev_gradient = 0.0 - @prev_delta = 0.1 - @prev_delta_w = 0.1 + @prev_gradient = 0.0_f64 + @prev_delta = 0.1_f64 + @prev_delta_w = 0.1_f64 # Parameters needed for Adam - @m_current = Float64.new(0) # Current moment value - @v_current = Float64.new(0) # Current moment**2 value - @m_prev = Float64.new(0) # Previous moment value - @v_prev = Float64.new(0) # Previous moment**2 value + @m_current = 0_f64 # Current moment value + @v_current = 0_f64 # Current moment**2 value + @m_prev = 0_f64 # Previous moment value + @v_prev = 0_f64 # Previous moment**2 value end # Transfer memory from source_neuron to dest_neuron while applying weight @@ -45,7 +45,7 @@ module SHAInet end def randomize_weight - @weight = rand(-0.1..0.1).to_f64 + @weight = rand(-0.1_f64..0.1_f64) end def clone diff --git a/src/shainet/math/functions.cr b/src/shainet/math/functions.cr index 552acbe..79ee58e 100644 --- a/src/shainet/math/functions.cr +++ b/src/shainet/math/functions.cr @@ -186,7 +186,7 @@ module SHAInet new_vector end - # vector elment-by-element multiplication + # vector elment-by-element addition def self.vector_sum(array1 : Array(Float64), array2 : Array(Float64)) raise MathError.new("Vectors must be the same size to sum!") if array1.size != array2.size