diff --git a/examples/arch/generic/layer/main.c b/examples/arch/generic/layer/main.c index 93f4022..c464655 100644 --- a/examples/arch/generic/layer/main.c +++ b/examples/arch/generic/layer/main.c @@ -34,7 +34,7 @@ int main() { } // Set the dot product function of the layer - if (!nn_layer_set_dot_product_func(&layer, nn_dot_prod, &error)) { + if (!nn_layer_set_dot_prod_func(&layer, nn_dot_prod, &error)) { fprintf(stderr, "error: %s\n", error.message); return 1; } diff --git a/include/nn_layer.h b/include/nn_layer.h index b14777b..b6e538e 100644 --- a/include/nn_layer.h +++ b/include/nn_layer.h @@ -39,7 +39,7 @@ typedef struct { size_t output_size; float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; } NNLayer; // nn_layer_init initializes a layer with the given arguments. @@ -57,8 +57,8 @@ bool nn_layer_set_weights(NNLayer *layer, const float weights[NN_LAYER_MAX_OUTPU // nn_layer_set_biases sets the biases of the given layer. bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES], NNError *error); -// nn_layer_set_dot_product_func sets the dot product function of the given layer. -bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error); +// nn_layer_set_dot_prod_func sets the dot product function of the given layer. +bool nn_layer_set_dot_prod_func(NNLayer *layer, NNDotProdFunc dot_prod_func, NNError *error); // nn_layer_forward computes the given layer with the given inputs and stores the result in outputs. bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error); diff --git a/include/nn_neuron.h b/include/nn_neuron.h index 093f652..bb50669 100644 --- a/include/nn_neuron.h +++ b/include/nn_neuron.h @@ -19,7 +19,7 @@ typedef struct { float weights[NN_NEURON_MAX_WEIGHTS]; size_t input_size; float bias; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; NNActFuncScalar act_func; } NNNeuron; @@ -33,7 +33,7 @@ bool nn_neuron_set_weights(NNNeuron *neuron, const float weights[NN_NEURON_MAX_W bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error); // nn_neuron_set_dot_prod_func sets the dot product function of the given neuron. -bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error); +bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_prod_func, NNError *error); // nn_neuron_set_act_func sets the activation function of the given neuron. bool nn_neuron_set_act_func(NNNeuron *neuron, NNActFuncScalar act_func, NNError *error); diff --git a/src/nn_layer.c b/src/nn_layer.c index 0eec6db..20a1735 100644 --- a/src/nn_layer.c +++ b/src/nn_layer.c @@ -93,14 +93,14 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES] return true; } -// nn_layer_set_dot_product_func sets the dot product function of the given layer. -bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error) { +// nn_layer_set_dot_prod_func sets the dot product function of the given layer. +bool nn_layer_set_dot_prod_func(NNLayer *layer, NNDotProdFunc dot_prod_func, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); if (layer == NULL) { nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL"); return false; } - layer->dot_product_func = dot_product_func; + layer->dot_prod_func = dot_prod_func; return true; } @@ -114,7 +114,7 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC } else if (batch_size == 0) { nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid batch size"); return false; - } else if (layer->dot_product_func == NULL) { + } else if (layer->dot_prod_func == NULL) { nn_error_set(error, NN_ERROR_INVALID_FUNCTION, "dot product function is NULL"); return false; } @@ -123,7 +123,7 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC for (size_t i = 0; i < batch_size; ++i) { // Iterate over output neurons for (size_t j = 0; j < layer->output_size; ++j) { - outputs[i][j] = layer->dot_product_func(inputs[i], layer->weights[j], layer->input_size) + layer->biases[j]; + outputs[i][j] = layer->dot_prod_func(inputs[i], layer->weights[j], layer->input_size) + layer->biases[j]; } } diff --git a/src/nn_neuron.c b/src/nn_neuron.c index 09ea86b..f9e7755 100644 --- a/src/nn_neuron.c +++ b/src/nn_neuron.c @@ -49,13 +49,13 @@ bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error) { } // nn_neuron_set_dot_prod_func sets the dot product function of the given neuron. -bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error) { +bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_prod_func, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); if (neuron == NULL) { nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL"); return false; } - neuron->dot_product_func = dot_product_func; + neuron->dot_prod_func = dot_prod_func; return true; } @@ -87,9 +87,9 @@ float nn_neuron_compute(const NNNeuron *neuron, const float inputs[NN_NEURON_MAX // 3. Apply the activation function // Compute the dot product - if (neuron->dot_product_func != NULL) { + if (neuron->dot_prod_func != NULL) { // Sum the weighted inputs - result = neuron->dot_product_func(neuron->weights, inputs, neuron->input_size); + result = neuron->dot_prod_func(neuron->weights, inputs, neuron->input_size); } // Add the bias result += neuron->bias; diff --git a/tests/arch/arm/cmsis-dsp/neuron/main.c b/tests/arch/arm/cmsis-dsp/neuron/main.c index 130b576..7a32b67 100644 --- a/tests/arch/arm/cmsis-dsp/neuron/main.c +++ b/tests/arch/arm/cmsis-dsp/neuron/main.c @@ -17,13 +17,13 @@ typedef struct { float weights[NN_NEURON_MAX_WEIGHTS]; size_t input_size; float bias; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; float output_tolerance; float expected_output; } TestCase; // run_test_cases runs the test cases. -void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) { +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) { for (int i = 0; i < n_cases; ++i) { TestCase tc = test_cases[i]; NNNeuron neuron; @@ -31,7 +31,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error); + nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error); assert(error.code == NN_ERROR_NONE); nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/arm/neon/neuron/main.c b/tests/arch/arm/neon/neuron/main.c index 6dcb378..ac4b880 100644 --- a/tests/arch/arm/neon/neuron/main.c +++ b/tests/arch/arm/neon/neuron/main.c @@ -17,13 +17,13 @@ typedef struct { float weights[NN_NEURON_MAX_WEIGHTS]; size_t input_size; float bias; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; float output_tolerance; float expected_output; } TestCase; // run_test_cases runs the test cases. -void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) { +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) { for (int i = 0; i < n_cases; ++i) { TestCase tc = test_cases[i]; NNNeuron neuron; @@ -31,7 +31,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error); + nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error); assert(error.code == NN_ERROR_NONE); nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/generic/dot_product/main.c b/tests/arch/generic/dot_product/main.c index 68a5c12..632a4ec 100644 --- a/tests/arch/generic/dot_product/main.c +++ b/tests/arch/generic/dot_product/main.c @@ -16,17 +16,17 @@ typedef struct { float b[4]; size_t vector_size; float bias; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; float output_tolerance; float expected_output; } TestCase; // run_test_cases runs the test cases. -void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) { +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) { for (int i = 0; i < n_cases; ++i) { TestCase tc = test_cases[i]; - const float output = dot_product_func(tc.a, tc.b, tc.vector_size); + const float output = dot_prod_func(tc.a, tc.b, tc.vector_size); assert(isnan(output) == false); assert(fabs(output - tc.expected_output) < tc.output_tolerance); printf("passed: %s case=%d info=%s\n", __func__, i + 1, info); diff --git a/tests/arch/generic/layer/main.c b/tests/arch/generic/layer/main.c index 8a9fe5c..8464af7 100644 --- a/tests/arch/generic/layer/main.c +++ b/tests/arch/generic/layer/main.c @@ -18,7 +18,7 @@ typedef struct { size_t output_size; float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; NNActFuncScalar act_func_scalar; NNActFuncVector act_func_vector; size_t batch_size; @@ -36,7 +36,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { nn_layer_init(&layer, tc.input_size, tc.output_size, &error); assert(error.code == NN_ERROR_NONE); - nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); + nn_layer_set_dot_prod_func(&layer, tc.dot_prod_func, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); @@ -77,7 +77,7 @@ int main() { {-0.3f, 0.4f, 0.2f, -0.5f}, }, .biases = {0.5f, -0.1f, 0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 2, .inputs = { @@ -100,7 +100,7 @@ int main() { {-0.3f, 0.4f, 0.2f, -0.5f}, }, .biases = {0.5f, -0.1f, 0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_relu, .batch_size = 2, .inputs = { @@ -123,7 +123,7 @@ int main() { {-0.3f, 0.4f, 0.2f, -0.5f}, }, .biases = {0.5f, -0.1f, 0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_vector = nn_act_func_softmax, .batch_size = 2, .inputs = { @@ -146,7 +146,7 @@ int main() { {0.4f, 0.1f, -0.4f, 0.6f}, }, .biases = {1.0f, 0.5f, -0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 2, .inputs = { @@ -169,7 +169,7 @@ int main() { {0.4f, 0.1f, -0.4f, 0.6f}, }, .biases = {1.0f, 0.5f, -0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_relu, .batch_size = 2, .inputs = { @@ -192,7 +192,7 @@ int main() { {0.4f, 0.1f, -0.4f, 0.6f}, }, .biases = {1.0f, 0.5f, -0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_vector = nn_act_func_softmax, .batch_size = 2, .inputs = { @@ -215,7 +215,7 @@ int main() { {0.1f, 0.4f, 0.2f, -0.2f}, }, .biases = {0.2f, -0.3f, 0.4f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { @@ -240,7 +240,7 @@ int main() { {0.1f, 0.4f, 0.2f, -0.2f}, }, .biases = {0.2f, -0.3f, 0.4f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_vector = nn_act_func_softmax, .batch_size = 3, .inputs = { @@ -265,7 +265,7 @@ int main() { {0.1f, 0.4f, 0.2f, -0.2f}, }, .biases = {0.2f, -0.3f, 0.4f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_relu, .batch_size = 3, .inputs = { diff --git a/tests/arch/generic/layer_multi/main.c b/tests/arch/generic/layer_multi/main.c index 82c7fd3..bf03085 100644 --- a/tests/arch/generic/layer_multi/main.c +++ b/tests/arch/generic/layer_multi/main.c @@ -20,7 +20,7 @@ typedef struct { float biases[NN_LAYER_MAX_BIASES]; float weights2[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases2[NN_LAYER_MAX_BIASES]; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; NNActFuncScalar act_func_scalar; size_t batch_size; float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; @@ -37,7 +37,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { nn_layer_init(&layer, tc.input_size, tc.output_size, &error); assert(error.code == NN_ERROR_NONE); - nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); + nn_layer_set_dot_prod_func(&layer, tc.dot_prod_func, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); @@ -86,7 +86,7 @@ int main() { {0.5f, -0.9f, 0.1f}, }, .biases2 = {0.5f, 1.5f, -0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { @@ -116,7 +116,7 @@ int main() { {0.13f, -0.31f, 0.11f}, }, .biases2 = {-0.1f, 1.0f, 0.2f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { @@ -146,7 +146,7 @@ int main() { {-0.35f, 0.62f, -0.2f}, }, .biases2 = {0.7f, -1.1f, 0.3f}, - .dot_product_func = nn_dot_prod, + .dot_prod_func = nn_dot_prod, .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { diff --git a/tests/arch/generic/neuron/main.c b/tests/arch/generic/neuron/main.c index b4846aa..eba1575 100644 --- a/tests/arch/generic/neuron/main.c +++ b/tests/arch/generic/neuron/main.c @@ -16,13 +16,13 @@ typedef struct { float weights[NN_NEURON_MAX_WEIGHTS]; size_t input_size; float bias; - NNDotProdFunc dot_product_func; + NNDotProdFunc dot_prod_func; float output_tolerance; float expected_output; } TestCase; // run_test_cases runs the test cases. -void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) { +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) { for (int i = 0; i < n_cases; ++i) { TestCase tc = test_cases[i]; NNNeuron neuron; @@ -30,7 +30,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error); + nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error); assert(error.code == NN_ERROR_NONE); nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); const float output = nn_neuron_compute(&neuron, tc.inputs, &error);