diff --git a/examples/arch/generic/layer/main.c b/examples/arch/generic/layer/main.c index 2ab31fb..b3adce7 100644 --- a/examples/arch/generic/layer/main.c +++ b/examples/arch/generic/layer/main.c @@ -16,7 +16,7 @@ int main() { const int batch_size = 2; // Initialize a layer with the given input and output sizes, ReLU activation function, and dot product function - if (!nn_layer_init(&layer, input_size, output_size, nn_activation_func_relu, nn_dot_product, &error)) { + if (!nn_layer_init(&layer, input_size, output_size, &error)) { fprintf(stderr, "error: %s\n", error.message); return 1; } @@ -33,6 +33,19 @@ int main() { return 1; } + // Set the dot product function of the layer + if (!nn_layer_set_dot_product_func(&layer, nn_dot_product, &error)) { + fprintf(stderr, "error: %s\n", error.message); + return 1; + } + + // Set the activation function of the layer + NNActivationFunction act_func = {.scalar = nn_activation_func_relu}; + if (!nn_layer_set_activation_func(&layer, act_func, &error)) { + fprintf(stderr, "error: %s\n", error.message); + return 1; + } + // Generate random inputs float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; for (size_t i = 0; i < batch_size; ++i) { diff --git a/include/nn_activation.h b/include/nn_activation.h index d5ec84b..593beeb 100644 --- a/include/nn_activation.h +++ b/include/nn_activation.h @@ -1,8 +1,21 @@ -#ifndef NN_ACTIVATION_FUNCTIONS_H -#define NN_ACTIVATION_FUNCTIONS_H +#ifndef NN_ACTIVATION_FUNCTION_H +#define NN_ACTIVATION_FUNCTION_H + +#include "nn_error.h" +#include +#include + +#ifndef NN_SOFTMAX_MAX_SIZE +#define NN_SOFTMAX_MAX_SIZE 64 +#endif // NNActivationFunction represents an activation function. -typedef float (*NNActivationFunction)(float); +typedef float (*NNActivationFunctionScalar)(float); +typedef bool (*NNActivationFunctionVector)(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error); +typedef union { + NNActivationFunctionScalar scalar; + NNActivationFunctionVector vector; +} NNActivationFunction; // nn_activation_func_identity returns x. float nn_activation_func_identity(float x); @@ -13,4 +26,7 @@ float nn_activation_func_sigmoid(float x); // nn_activation_func_relu returns the ReLU of x. float nn_activation_func_relu(float x); -#endif // NN_ACTIVATION_FUNCTIONS_H +// nn_activation_func_softmax calculates the softmax of the input and stores the result in the output. +bool nn_activation_func_softmax(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error); + +#endif // NN_ACTIVATION_FUNCTION_H diff --git a/include/nn_error.h b/include/nn_error.h index ffdfbf6..abad814 100644 --- a/include/nn_error.h +++ b/include/nn_error.h @@ -6,8 +6,11 @@ // NNErrorCode defines the error codes. typedef enum { NN_ERROR_NONE = 0, // no error + NN_ERROR_NOT_IMPLEMENTED, // not implemented NN_ERROR_INVALID_INSTANCE, // invalid instance NN_ERROR_INVALID_SIZE, // invalid size + NN_ERROR_INVALID_VALUE, // invalid value + NN_ERROR_INVALID_TYPE, // invalid type NN_ERROR_NEON_NOT_AVAILABLE, // NEON instructions not available NN_ERROR_CMSIS_DSP_NOT_AVAILABLE, // CMSIS-DSP functions not available } NNErrorCode; diff --git a/include/nn_layer.h b/include/nn_layer.h index 0571668..ca57c26 100644 --- a/include/nn_layer.h +++ b/include/nn_layer.h @@ -33,12 +33,12 @@ typedef struct { size_t output_size; float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; - NNActivationFunction act_func; NNDotProductFunction dot_product_func; + NNActivationFunction act_func; } NNLayer; // nn_layer_init initializes a layer with the given arguments. -bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNActivationFunction act_func, NNDotProductFunction dot_product_func, NNError *error); +bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNError *error); // nn_layer_init_weights_gaussian initializes the weights of the layer with a Gaussian distribution. bool nn_layer_init_weights_gaussian(NNLayer *layer, float scale, NNError *error); @@ -52,6 +52,12 @@ bool nn_layer_set_weights(NNLayer *layer, const float weights[NN_LAYER_MAX_OUTPU // nn_layer_set_biases sets the biases of the given layer. bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES], NNError *error); +// nn_layer_set_dot_product_func sets the dot product function of the given layer. +bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_product_func, NNError *error); + +// nn_layer_set_activation_func sets the activation function of the given layer. +bool nn_layer_set_activation_func(NNLayer *layer, NNActivationFunction act_func, NNError *error); + // nn_layer_forward computes the given layer with the given inputs and stores the result in outputs. bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error); diff --git a/include/nn_neuron.h b/include/nn_neuron.h index 53171f4..de3e05e 100644 --- a/include/nn_neuron.h +++ b/include/nn_neuron.h @@ -19,12 +19,12 @@ typedef struct { float weights[NN_NEURON_MAX_WEIGHTS]; size_t input_size; float bias; - NNActivationFunction act_func; NNDotProductFunction dot_product_func; + NNActivationFunctionScalar act_func; } NNNeuron; // nn_neuron_init initializes a neuron with the given arguments. -bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS], size_t input_size, float bias, NNActivationFunction act_func, NNDotProductFunction dot_product_func, NNError *error); +bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS], size_t input_size, float bias, NNError *error); // nn_neuron_set_weights sets the weights of the given neuron. bool nn_neuron_set_weights(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS], NNError *error); @@ -32,6 +32,12 @@ bool nn_neuron_set_weights(NNNeuron *neuron, const float weights[NN_NEURON_MAX_W // nn_neuron_set_bias sets the bias of the given neuron. bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error); +// nn_neuron_set_dot_product_func sets the dot product function of the given neuron. +bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_product_func, NNError *error); + +// nn_neuron_set_activation_func sets the activation function of the given neuron. +bool nn_neuron_set_activation_func(NNNeuron *neuron, NNActivationFunctionScalar act_func, NNError *error); + // nn_neuron_compute computes the given neuron and returns the output. float nn_neuron_compute(const NNNeuron *neuron, const float inputs[NN_NEURON_MAX_WEIGHTS], NNError *error); diff --git a/src/nn_activation.c b/src/nn_activation.c index 6bf5e5b..08fae7c 100644 --- a/src/nn_activation.c +++ b/src/nn_activation.c @@ -1,8 +1,9 @@ #include "nn_activation.h" +#include "nn_error.h" #include +#include // TODO: Add tests -// TODO: Add softmax activation function. // nn_activation_func_identity returns x. float nn_activation_func_identity(float x) { @@ -18,3 +19,45 @@ float nn_activation_func_sigmoid(float x) { float nn_activation_func_relu(float x) { return fmaxf(0, x); } + +// nn_activation_func_softmax calculates the softmax of the input and stores the result in the output. +bool nn_activation_func_softmax(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (input == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "input is NULL"); + return false; + } else if (output == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "output is NULL"); + return false; + } else if (input_size == 0 || input_size > NN_SOFTMAX_MAX_SIZE) { + nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid input size"); + return false; + } + + // Find the maximum input value + float max_input = input[0]; + for (size_t i = 1; i < input_size; ++i) { + if (input[i] > max_input) { + max_input = input[i]; + } + } + + // Compute exp(input[i] - max_input) to prevent overflow + float sum = 0.0f; + for (size_t i = 0; i < input_size; ++i) { + output[i] = expf(input[i] - max_input); + sum += output[i]; + } + + if (sum == 0.0f) { + nn_error_set(error, NN_ERROR_INVALID_VALUE, "sum is zero"); + return false; + } + + // Normalize to form a probability distribution + for (size_t i = 0; i < input_size; ++i) { + output[i] /= sum; + } + + return true; +} diff --git a/src/nn_layer.c b/src/nn_layer.c index 5003d2b..215e05d 100644 --- a/src/nn_layer.c +++ b/src/nn_layer.c @@ -11,7 +11,7 @@ #endif // nn_layer_init initializes a layer with the given arguments. -bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNActivationFunction act_func, NNDotProductFunction dot_product_func, NNError *error) { +bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); if (layer == NULL) { nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL"); @@ -27,12 +27,6 @@ bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNActi } layer->input_size = input_size; layer->output_size = output_size; - if (act_func) { - layer->act_func = act_func; - } - if (dot_product_func) { - layer->dot_product_func = dot_product_func; - } return true; } @@ -104,6 +98,30 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES] return true; } +// nn_layer_set_dot_product_func sets the dot product function of the given layer. +bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_product_func, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (layer == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL"); + return false; + } + layer->dot_product_func = dot_product_func; + + return true; +} + +// nn_layer_set_activation_func sets the activation function of the given layer. +bool nn_layer_set_activation_func(NNLayer *layer, NNActivationFunction act_func, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (layer == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL"); + return false; + } + layer->act_func = act_func; + + return true; +} + // nn_layer_forward computes the given layer with the given inputs and stores the result in outputs. bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); @@ -123,8 +141,8 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC if (layer->dot_product_func != NULL) { outputs[i][j] += layer->dot_product_func(inputs[i], layer->weights[j], layer->input_size); } - if (layer->act_func != NULL) { - outputs[i][j] = layer->act_func(outputs[i][j]); + if (layer->act_func.scalar != NULL) { + outputs[i][j] = layer->act_func.scalar(outputs[i][j]); } } } diff --git a/src/nn_neuron.c b/src/nn_neuron.c index de7c7b3..d1f359a 100644 --- a/src/nn_neuron.c +++ b/src/nn_neuron.c @@ -7,7 +7,7 @@ #include // nn_neuron_init initializes a neuron with the given arguments. -bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS], size_t input_size, float bias, NNActivationFunction act_func, NNDotProductFunction dot_product_func, NNError *error) { +bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS], size_t input_size, float bias, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); if (neuron == NULL) { nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL"); @@ -21,12 +21,6 @@ bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS] neuron->weights[i] = weights[i]; } neuron->bias = bias; - if (act_func != NULL) { - neuron->act_func = act_func; - } - if (dot_product_func != NULL) { - neuron->dot_product_func = dot_product_func; - } return true; } @@ -54,6 +48,28 @@ bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error) { return true; } +// nn_neuron_set_dot_product_func sets the dot product function of the given neuron. +bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_product_func, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (neuron == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL"); + return false; + } + neuron->dot_product_func = dot_product_func; + return true; +} + +// nn_neuron_set_activation_func sets the activation function of the given neuron. +bool nn_neuron_set_activation_func(NNNeuron *neuron, NNActivationFunctionScalar act_func, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (neuron == NULL) { + nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL"); + return false; + } + neuron->act_func = act_func; + return true; +} + // nn_neuron_compute computes the given neuron and returns the output. float nn_neuron_compute(const NNNeuron *neuron, const float inputs[NN_NEURON_MAX_WEIGHTS], NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); diff --git a/tests/arch/arm/cmsis-dsp/neuron/main.c b/tests/arch/arm/cmsis-dsp/neuron/main.c index 85c7bb0..c8aca33 100644 --- a/tests/arch/arm/cmsis-dsp/neuron/main.c +++ b/tests/arch/arm/cmsis-dsp/neuron/main.c @@ -29,7 +29,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF NNNeuron neuron; NNError error; - nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, nn_activation_func_identity, dot_product_func, &error); + nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); + assert(error.code == NN_ERROR_NONE); + nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); + assert(error.code == NN_ERROR_NONE); + nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); assert(error.code == NN_ERROR_NONE); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/arm/cmsis-dsp/neuron_perf/main.c b/tests/arch/arm/cmsis-dsp/neuron_perf/main.c index c2f9590..2a35dfb 100644 --- a/tests/arch/arm/cmsis-dsp/neuron_perf/main.c +++ b/tests/arch/arm/cmsis-dsp/neuron_perf/main.c @@ -32,7 +32,13 @@ int main(int argc, char *argv[]) { inputs[i] = (float)rand() / (float)RAND_MAX; } - if (!nn_neuron_init(&neuron, weights, input_size, bias, nn_activation_func_identity, nn_dot_product_cmsis, &error)) { + if (!nn_neuron_init(&neuron, weights, input_size, bias, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_cmsis, &error)) { printf("error: %s\n", error.message); return 1; } diff --git a/tests/arch/arm/neon/neuron/main.c b/tests/arch/arm/neon/neuron/main.c index baaa369..b67b265 100644 --- a/tests/arch/arm/neon/neuron/main.c +++ b/tests/arch/arm/neon/neuron/main.c @@ -29,7 +29,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF NNNeuron neuron; NNError error; - nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, nn_activation_func_identity, dot_product_func, &error); + nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); + assert(error.code == NN_ERROR_NONE); + nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); + assert(error.code == NN_ERROR_NONE); + nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); assert(error.code == NN_ERROR_NONE); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/arm/neon/neuron_perf/main.c b/tests/arch/arm/neon/neuron_perf/main.c index 0d099b9..2b4fcf3 100644 --- a/tests/arch/arm/neon/neuron_perf/main.c +++ b/tests/arch/arm/neon/neuron_perf/main.c @@ -33,7 +33,13 @@ int main(int argc, char *argv[]) { inputs[i] = (float)rand() / (float)RAND_MAX; } - if (!nn_neuron_init(&neuron, weights, input_size, bias, nn_activation_func_identity, nn_dot_product_neon, &error)) { + if (!nn_neuron_init(&neuron, weights, input_size, bias, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_neon, &error)) { printf("error: %s\n", error.message); return 1; } diff --git a/tests/arch/generic/layer/main.c b/tests/arch/generic/layer/main.c index 4c2d75f..0425dd5 100644 --- a/tests/arch/generic/layer/main.c +++ b/tests/arch/generic/layer/main.c @@ -18,8 +18,8 @@ typedef struct { size_t output_size; float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; - NNActivationFunction act_func; NNDotProductFunction dot_product_func; + NNActivationFunction act_func; size_t batch_size; float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float output_tolerance; @@ -33,7 +33,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { NNLayer layer; NNError error; - nn_layer_init(&layer, tc.input_size, tc.output_size, tc.act_func, tc.dot_product_func, &error); + nn_layer_init(&layer, tc.input_size, tc.output_size, &error); + assert(error.code == NN_ERROR_NONE); + nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); + assert(error.code == NN_ERROR_NONE); + nn_layer_set_activation_func(&layer, tc.act_func, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); @@ -63,8 +67,8 @@ int main() { {-0.3f, 0.4f, 0.2f, -0.5f}, }, .biases = {0.5f, -0.1f, 0.2f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 2, .inputs = { {1.5f, -2.0f, 1.0f, -1.5f}, @@ -86,8 +90,8 @@ int main() { {0.4f, 0.1f, -0.4f, 0.6f}, }, .biases = {1.0f, 0.5f, -0.2f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 2, .inputs = { {0.5f, 0.1f, -0.2f, 0.4f}, @@ -109,8 +113,8 @@ int main() { {0.1f, 0.4f, 0.2f, -0.2f}, }, .biases = {0.2f, -0.3f, 0.4f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 3, .inputs = { {2.0f, -1.5f, 0.5f, 0.6f}, diff --git a/tests/arch/generic/layer_multi/main.c b/tests/arch/generic/layer_multi/main.c index 7954312..eb54250 100644 --- a/tests/arch/generic/layer_multi/main.c +++ b/tests/arch/generic/layer_multi/main.c @@ -20,8 +20,8 @@ typedef struct { float biases[NN_LAYER_MAX_BIASES]; float weights2[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases2[NN_LAYER_MAX_BIASES]; - NNActivationFunction act_func; NNDotProductFunction dot_product_func; + NNActivationFunction act_func; size_t batch_size; float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float output_tolerance; @@ -35,7 +35,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { NNLayer layer; NNError error; - nn_layer_init(&layer, tc.input_size, tc.output_size, tc.act_func, tc.dot_product_func, &error); + nn_layer_init(&layer, tc.input_size, tc.output_size, &error); + assert(error.code == NN_ERROR_NONE); + nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); + assert(error.code == NN_ERROR_NONE); + nn_layer_set_activation_func(&layer, tc.act_func, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); @@ -79,8 +83,8 @@ int main() { {0.5f, -0.9f, 0.1f}, }, .biases2 = {0.5f, 1.5f, -0.2f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 3, .inputs = { {0.9f, -0.3f, 2.2f, 1.9f}, @@ -109,8 +113,8 @@ int main() { {0.13f, -0.31f, 0.11f}, }, .biases2 = {-0.1f, 1.0f, 0.2f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 3, .inputs = { {-0.5f, 2.1f, 1.9f, -1.3f}, @@ -139,8 +143,8 @@ int main() { {-0.35f, 0.62f, -0.2f}, }, .biases2 = {0.7f, -1.1f, 0.3f}, - .act_func = nn_activation_func_identity, .dot_product_func = nn_dot_product, + .act_func = {.scalar = nn_activation_func_identity}, .batch_size = 3, .inputs = { {0.2f, 2.8f, -1.5f, 1.6f}, diff --git a/tests/arch/generic/neuron/main.c b/tests/arch/generic/neuron/main.c index c45ffee..99db943 100644 --- a/tests/arch/generic/neuron/main.c +++ b/tests/arch/generic/neuron/main.c @@ -28,8 +28,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF NNNeuron neuron; NNError error; - nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, nn_activation_func_identity, dot_product_func, &error); + nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error); assert(error.code == NN_ERROR_NONE); + nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); + assert(error.code == NN_ERROR_NONE); + nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); assert(isnan(output) == false); diff --git a/tests/arch/generic/neuron_perf/main.c b/tests/arch/generic/neuron_perf/main.c index 709060a..f1ff2bb 100644 --- a/tests/arch/generic/neuron_perf/main.c +++ b/tests/arch/generic/neuron_perf/main.c @@ -27,7 +27,13 @@ int main(int argc, char *argv[]) { inputs[i] = (float)rand() / (float)RAND_MAX; } - if (!nn_neuron_init(&neuron, weights, input_size, bias, nn_activation_func_identity, nn_dot_product, &error)) { + if (!nn_neuron_init(&neuron, weights, input_size, bias, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product, &error)) { + printf("error: %s\n", error.message); + return 1; + } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { printf("error: %s\n", error.message); return 1; } diff --git a/tests/arch/generic/softmax/main.c b/tests/arch/generic/softmax/main.c new file mode 100644 index 0000000..ebea912 --- /dev/null +++ b/tests/arch/generic/softmax/main.c @@ -0,0 +1,76 @@ +#include "nn_activation.h" +#include "nn_config.h" +#include +#include +#include +#include + +// N_TEST_CASES defines the number of test cases. +#define N_TEST_CASES 4 +// DEFAULT_OUTPUT_TOLERANCE defines the default tolerance for comparing output values. +#define DEFAULT_OUTPUT_TOLERANCE 0.000001f + +// TestCase defines a single test case. +typedef struct { + float input[NN_SOFTMAX_MAX_SIZE]; + size_t input_size; + NNActivationFunctionVector activation_func; + float output_tolerance; + float expected_output[NN_SOFTMAX_MAX_SIZE]; +} TestCase; + +// run_test_cases runs the test cases. +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNActivationFunctionVector activation_func) { + for (int i = 0; i < n_cases; ++i) { + TestCase tc = test_cases[i]; + NNError error; + + float output[NN_SOFTMAX_MAX_SIZE]; + const bool result = activation_func(tc.input, output, tc.input_size, &error); + assert(result == true); + assert(error.code == NN_ERROR_NONE); + float sum = 0; + for (size_t i = 0; i < tc.input_size; ++i) { + assert(fabs(output[i] - tc.expected_output[i]) < tc.output_tolerance); + sum += output[i]; + } + assert(sum == 1.0f); + printf("passed: %s case=%d info=%s\n", __func__, i + 1, info); + } +} + +int main() { + TestCase test_cases[N_TEST_CASES] = { + { + .input = {1.0, 2.0, 3.0}, + .input_size = 3, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_output = {0.09003057317038046, 0.24472847105479767, 0.6652409557748219}, + }, + + { + .input = {-1.0, -2.0, -3.0}, + .input_size = 3, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_output = {0.6652409557748219, 0.24472847105479764, 0.09003057317038046}, + }, + + { + .input = {3.12, 0.845, -0.917}, + .input_size = 3, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_output = {0.89250074, 0.09174632, 0.01575295}, + }, + + { + .input = {1.8, -3.21, 2.44}, + .input_size = 3, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_output = {0.34445323, 0.00229781, 0.65324896}, + }, + + }; + run_test_cases(test_cases, N_TEST_CASES, "nn_activation_func_softmax", nn_activation_func_softmax); + + return 0; +}