diff --git a/Makefile b/Makefile index 2a5f7ab..eabf46e 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,16 @@ test: $(MAKE) run-test ARCH=$(ARCH) TECH=$(TECH) TEST=$$test || exit 1; \ done +## test-all Run tests (e.g., make test ARCH=generic) +test-all: + @$(MAKE) build-tests ARCH=generic + @$(MAKE) build-tests ARCH=arm TECH=neon + @$(MAKE) build-tests ARCH=arm TECH=cmsis-dsp + @echo " " + @$(MAKE) run-tests ARCH=generic + @$(MAKE) run-tests ARCH=arm TECH=neon + @$(MAKE) run-tests ARCH=arm TECH=cmsis-dsp + ## build-test Build a test (e.g., make build-test ARCH=generic TEST=arch/generic/neuron) build-test: @echo building $(TEST) @@ -56,6 +66,14 @@ run-test: @ARCH=$(ARCH) TECH=$(TECH) ARTIFACT=tests/$(TEST) ARGS="$(ARGS)" scripts/shell/run_artifact.sh @echo " " +## run-tests Run tests (e.g., make run-tests ARCH=generic) +run-tests: + @$(eval TECH_FILTER := $(if $(TECH),$(shell echo $(TECH) | tr ',' '|'),.*)) + @$(eval TESTS := $(shell find tests/arch/$(ARCH) -type f -name 'main.c' | grep -E "$(TECH_FILTER)" | sed 's|/main.c||' | sed 's|tests/||')) + @for test in $(TESTS); do \ + $(MAKE) run-test ARCH=$(ARCH) TECH=$(TECH) TEST=$$test || exit 1; \ + done + ## build-example Build an example (e.g., make build-example ARCH=generic EXAMPLE=arch/generic/layer) build-example: @echo building $(EXAMPLE) @@ -75,7 +93,7 @@ run-example: @ARCH=$(ARCH) ARTIFACT=examples/$(EXAMPLE) ARGS="$(ARGS)" scripts/shell/run_artifact.sh @echo " " -# run-examples Run examples (e.g., make run-examples ARCH=generic) +## run-examples Run examples (e.g., make run-examples ARCH=generic) run-examples: @$(eval TECH_FILTER := $(if $(TECH),$(shell echo $(TECH) | tr ',' '|'),.*)) @$(eval EXAMPLES := $(shell find examples/arch/$(ARCH) -type f -name 'main.c' | grep -E "$(TECH_FILTER)" | sed 's|/main.c||' | sed 's|examples/||')) diff --git a/README.md b/README.md index 3f14786..43e1e7a 100644 --- a/README.md +++ b/README.md @@ -31,9 +31,6 @@ git submodule update --init ```shell make build-examples ARCH=generic make run-examples ARCH=generic - -make build-examples ARCH=arm TECH=neon,cmsis-dsp -make run-examples ARCH=arm TECH=neon,cmsis-dsp ``` ## Test @@ -42,6 +39,7 @@ make run-examples ARCH=arm TECH=neon,cmsis-dsp make test make test ARCH=generic make test ARCH=arm TECH=neon,cmsis-dsp +make test-all ``` ## Contributing diff --git a/examples/arch/generic/layer/main.c b/examples/arch/generic/layer/main.c index b3adce7..50abb42 100644 --- a/examples/arch/generic/layer/main.c +++ b/examples/arch/generic/layer/main.c @@ -39,13 +39,6 @@ int main() { return 1; } - // Set the activation function of the layer - NNActivationFunction act_func = {.scalar = nn_activation_func_relu}; - if (!nn_layer_set_activation_func(&layer, act_func, &error)) { - fprintf(stderr, "error: %s\n", error.message); - return 1; - } - // Generate random inputs float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; for (size_t i = 0; i < batch_size; ++i) { @@ -56,11 +49,19 @@ int main() { // Compute the layer with the given inputs float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE]; - if (!nn_layer_forward(&layer, inputs, outputs, 2, &error)) { + if (!nn_layer_forward(&layer, inputs, outputs, batch_size, &error)) { fprintf(stderr, "error: %s\n", error.message); return 1; } + // Compute the ReLU activation function on the outputs + for (size_t i = 0; i < batch_size; ++i) { + if (!nn_act_func_forward_scalar(nn_act_func_relu, outputs[i], outputs[i], output_size, &error)) { + fprintf(stderr, "error: %s\n", error.message); + return 1; + } + } + // Print the inputs for (size_t i = 0; i < batch_size; ++i) { for (size_t j = 0; j < input_size; ++j) { diff --git a/include/nn_activation.h b/include/nn_activation.h index 593beeb..86278ea 100644 --- a/include/nn_activation.h +++ b/include/nn_activation.h @@ -5,28 +5,36 @@ #include #include -#ifndef NN_SOFTMAX_MAX_SIZE -#define NN_SOFTMAX_MAX_SIZE 64 +#ifndef NN_AF_FORWARD_MAX_SIZE +#define NN_AF_FORWARD_MAX_SIZE 64 #endif -// NNActivationFunction represents an activation function. -typedef float (*NNActivationFunctionScalar)(float); -typedef bool (*NNActivationFunctionVector)(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error); -typedef union { - NNActivationFunctionScalar scalar; - NNActivationFunctionVector vector; -} NNActivationFunction; +#ifndef NN_AF_VECTOR_MAX_SIZE +#define NN_AF_VECTOR_MAX_SIZE 64 +#endif + +// NNActFuncScalar represents a scalar activation function. +typedef float (*NNActFuncScalar)(float); + +// NNActFuncVector represents a vector activation function. +typedef bool (*NNActFuncVector)(const float input[NN_AF_VECTOR_MAX_SIZE], float output[NN_AF_VECTOR_MAX_SIZE], size_t input_size, NNError *error); + +// nn_act_func_forward_scalar computes the given activation function with the given input and stores the result in output. +bool nn_act_func_forward_scalar(NNActFuncScalar act_func, const float input[NN_AF_FORWARD_MAX_SIZE], float output[NN_AF_FORWARD_MAX_SIZE], size_t input_size, NNError *error); + +// nn_act_func_forward_vector computes the given activation function with the given input and stores the result in output. +bool nn_act_func_forward_vector(NNActFuncVector act_func, const float input[NN_AF_FORWARD_MAX_SIZE], float output[NN_AF_FORWARD_MAX_SIZE], size_t input_size, NNError *error); -// nn_activation_func_identity returns x. -float nn_activation_func_identity(float x); +// nn_act_func_identity returns x. +float nn_act_func_identity(float x); -// nn_activation_func_sigmoid returns the sigmoid of x. -float nn_activation_func_sigmoid(float x); +// nn_act_func_sigmoid returns the sigmoid of x. +float nn_act_func_sigmoid(float x); -// nn_activation_func_relu returns the ReLU of x. -float nn_activation_func_relu(float x); +// nn_act_func_relu returns the ReLU of x. +float nn_act_func_relu(float x); -// nn_activation_func_softmax calculates the softmax of the input and stores the result in the output. -bool nn_activation_func_softmax(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error); +// nn_act_func_softmax calculates the softmax of the input and stores the result in the output. +bool nn_act_func_softmax(const float input[NN_AF_VECTOR_MAX_SIZE], float output[NN_AF_VECTOR_MAX_SIZE], size_t input_size, NNError *error); #endif // NN_ACTIVATION_FUNCTION_H diff --git a/include/nn_error.h b/include/nn_error.h index abad814..c8deb38 100644 --- a/include/nn_error.h +++ b/include/nn_error.h @@ -11,6 +11,7 @@ typedef enum { NN_ERROR_INVALID_SIZE, // invalid size NN_ERROR_INVALID_VALUE, // invalid value NN_ERROR_INVALID_TYPE, // invalid type + NN_ERROR_INVALID_FUNCTION, // invalid function NN_ERROR_NEON_NOT_AVAILABLE, // NEON instructions not available NN_ERROR_CMSIS_DSP_NOT_AVAILABLE, // CMSIS-DSP functions not available } NNErrorCode; diff --git a/include/nn_layer.h b/include/nn_layer.h index ca57c26..857bb42 100644 --- a/include/nn_layer.h +++ b/include/nn_layer.h @@ -4,9 +4,15 @@ #include "nn_activation.h" #include "nn_dot_product.h" #include "nn_error.h" +#include #include #include +// M_PI is not defined in some compilers. +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + // NN_LAYER_MAX_INPUT_SIZE defines the maximum input size a layer can have. #ifndef NN_LAYER_MAX_INPUT_SIZE #define NN_LAYER_MAX_INPUT_SIZE 64 @@ -34,7 +40,6 @@ typedef struct { float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; NNDotProductFunction dot_product_func; - NNActivationFunction act_func; } NNLayer; // nn_layer_init initializes a layer with the given arguments. @@ -55,9 +60,6 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES] // nn_layer_set_dot_product_func sets the dot product function of the given layer. bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_product_func, NNError *error); -// nn_layer_set_activation_func sets the activation function of the given layer. -bool nn_layer_set_activation_func(NNLayer *layer, NNActivationFunction act_func, NNError *error); - // nn_layer_forward computes the given layer with the given inputs and stores the result in outputs. bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error); diff --git a/include/nn_neuron.h b/include/nn_neuron.h index de3e05e..530f3fd 100644 --- a/include/nn_neuron.h +++ b/include/nn_neuron.h @@ -20,7 +20,7 @@ typedef struct { size_t input_size; float bias; NNDotProductFunction dot_product_func; - NNActivationFunctionScalar act_func; + NNActFuncScalar act_func; } NNNeuron; // nn_neuron_init initializes a neuron with the given arguments. @@ -35,8 +35,8 @@ bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error); // nn_neuron_set_dot_product_func sets the dot product function of the given neuron. bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_product_func, NNError *error); -// nn_neuron_set_activation_func sets the activation function of the given neuron. -bool nn_neuron_set_activation_func(NNNeuron *neuron, NNActivationFunctionScalar act_func, NNError *error); +// nn_neuron_set_act_func sets the activation function of the given neuron. +bool nn_neuron_set_act_func(NNNeuron *neuron, NNActFuncScalar act_func, NNError *error); // nn_neuron_compute computes the given neuron and returns the output. float nn_neuron_compute(const NNNeuron *neuron, const float inputs[NN_NEURON_MAX_WEIGHTS], NNError *error); diff --git a/src/nn_activation.c b/src/nn_activation.c index 08fae7c..92550cb 100644 --- a/src/nn_activation.c +++ b/src/nn_activation.c @@ -5,31 +5,57 @@ // TODO: Add tests -// nn_activation_func_identity returns x. -float nn_activation_func_identity(float x) { +// nn_act_func_forward_scalar computes the given activation function with the given input and stores the result in output. +bool nn_act_func_forward_scalar(NNActFuncScalar act_func, const float input[NN_AF_FORWARD_MAX_SIZE], float output[NN_AF_FORWARD_MAX_SIZE], size_t input_size, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (act_func == NULL) { + nn_error_set(error, NN_ERROR_INVALID_FUNCTION, "act_func is NULL"); + return false; + } else if (input_size == 0 || input_size > NN_AF_FORWARD_MAX_SIZE) { + nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid input size"); + return false; + } + + for (size_t i = 0; i < input_size; ++i) { + output[i] = act_func(input[i]); + } + + return true; +} + +// nn_act_func_forward_vector computes the given activation function with the given input and stores the result in output. +bool nn_act_func_forward_vector(NNActFuncVector act_func, const float input[NN_AF_FORWARD_MAX_SIZE], float output[NN_AF_FORWARD_MAX_SIZE], size_t input_size, NNError *error) { + nn_error_set(error, NN_ERROR_NONE, NULL); + if (act_func == NULL) { + nn_error_set(error, NN_ERROR_INVALID_FUNCTION, "act_func is NULL"); + return false; + } else if (input_size == 0 || input_size > NN_AF_FORWARD_MAX_SIZE) { + nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid input size"); + return false; + } + + return act_func(input, output, input_size, error); +} + +// nn_act_func_identity returns x. +float nn_act_func_identity(float x) { return x; } -// nn_activation_func_sigmoid returns the sigmoid of x. -float nn_activation_func_sigmoid(float x) { +// nn_act_func_sigmoid returns the sigmoid of x. +float nn_act_func_sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } -// nn_activation_func_relu returns the ReLU of x. -float nn_activation_func_relu(float x) { +// nn_act_func_relu returns the ReLU of x. +float nn_act_func_relu(float x) { return fmaxf(0, x); } -// nn_activation_func_softmax calculates the softmax of the input and stores the result in the output. -bool nn_activation_func_softmax(const float input[NN_SOFTMAX_MAX_SIZE], float output[NN_SOFTMAX_MAX_SIZE], size_t input_size, NNError *error) { +// nn_act_func_softmax calculates the softmax of the input and stores the result in the output. +bool nn_act_func_softmax(const float input[NN_AF_VECTOR_MAX_SIZE], float output[NN_AF_VECTOR_MAX_SIZE], size_t input_size, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); - if (input == NULL) { - nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "input is NULL"); - return false; - } else if (output == NULL) { - nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "output is NULL"); - return false; - } else if (input_size == 0 || input_size > NN_SOFTMAX_MAX_SIZE) { + if (input_size == 0 || input_size > NN_AF_VECTOR_MAX_SIZE) { nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid input size"); return false; } diff --git a/src/nn_layer.c b/src/nn_layer.c index 215e05d..367de39 100644 --- a/src/nn_layer.c +++ b/src/nn_layer.c @@ -5,11 +5,6 @@ #include #include -// M_PI is not defined in some compilers. -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - // nn_layer_init initializes a layer with the given arguments. bool nn_layer_init(NNLayer *layer, size_t input_size, size_t output_size, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); @@ -110,18 +105,6 @@ bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_prod return true; } -// nn_layer_set_activation_func sets the activation function of the given layer. -bool nn_layer_set_activation_func(NNLayer *layer, NNActivationFunction act_func, NNError *error) { - nn_error_set(error, NN_ERROR_NONE, NULL); - if (layer == NULL) { - nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL"); - return false; - } - layer->act_func = act_func; - - return true; -} - // nn_layer_forward computes the given layer with the given inputs and stores the result in outputs. bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); @@ -131,19 +114,16 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC } else if (batch_size == 0) { nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid batch size"); return false; + } else if (layer->dot_product_func == NULL) { + nn_error_set(error, NN_ERROR_INVALID_FUNCTION, "dot product function is NULL"); + return false; } - // Iterate over each input in the batch + // Iterate over batch inputs for (size_t i = 0; i < batch_size; ++i) { - // Iterate over each output in the layer + // Iterate over output neurons for (size_t j = 0; j < layer->output_size; ++j) { - outputs[i][j] = layer->biases[j]; - if (layer->dot_product_func != NULL) { - outputs[i][j] += layer->dot_product_func(inputs[i], layer->weights[j], layer->input_size); - } - if (layer->act_func.scalar != NULL) { - outputs[i][j] = layer->act_func.scalar(outputs[i][j]); - } + outputs[i][j] = layer->dot_product_func(inputs[i], layer->weights[j], layer->input_size) + layer->biases[j]; } } diff --git a/src/nn_neuron.c b/src/nn_neuron.c index d1f359a..818a447 100644 --- a/src/nn_neuron.c +++ b/src/nn_neuron.c @@ -59,8 +59,8 @@ bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_p return true; } -// nn_neuron_set_activation_func sets the activation function of the given neuron. -bool nn_neuron_set_activation_func(NNNeuron *neuron, NNActivationFunctionScalar act_func, NNError *error) { +// nn_neuron_set_act_func sets the activation function of the given neuron. +bool nn_neuron_set_act_func(NNNeuron *neuron, NNActFuncScalar act_func, NNError *error) { nn_error_set(error, NN_ERROR_NONE, NULL); if (neuron == NULL) { nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL"); diff --git a/tests/arch/arm/cmsis-dsp/neuron/main.c b/tests/arch/arm/cmsis-dsp/neuron/main.c index c8aca33..7970812 100644 --- a/tests/arch/arm/cmsis-dsp/neuron/main.c +++ b/tests/arch/arm/cmsis-dsp/neuron/main.c @@ -33,7 +33,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF assert(error.code == NN_ERROR_NONE); nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); + nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); assert(error.code == NN_ERROR_NONE); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/arm/cmsis-dsp/neuron_perf/main.c b/tests/arch/arm/cmsis-dsp/neuron_perf/main.c index 2a35dfb..627d6cb 100644 --- a/tests/arch/arm/cmsis-dsp/neuron_perf/main.c +++ b/tests/arch/arm/cmsis-dsp/neuron_perf/main.c @@ -35,7 +35,7 @@ int main(int argc, char *argv[]) { if (!nn_neuron_init(&neuron, weights, input_size, bias, &error)) { printf("error: %s\n", error.message); return 1; - } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { + } else if (!nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error)) { printf("error: %s\n", error.message); return 1; } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_cmsis, &error)) { diff --git a/tests/arch/arm/neon/neuron/main.c b/tests/arch/arm/neon/neuron/main.c index b67b265..442137d 100644 --- a/tests/arch/arm/neon/neuron/main.c +++ b/tests/arch/arm/neon/neuron/main.c @@ -33,7 +33,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF assert(error.code == NN_ERROR_NONE); nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); + nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); assert(error.code == NN_ERROR_NONE); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); diff --git a/tests/arch/arm/neon/neuron_perf/main.c b/tests/arch/arm/neon/neuron_perf/main.c index 2b4fcf3..695fb21 100644 --- a/tests/arch/arm/neon/neuron_perf/main.c +++ b/tests/arch/arm/neon/neuron_perf/main.c @@ -36,7 +36,7 @@ int main(int argc, char *argv[]) { if (!nn_neuron_init(&neuron, weights, input_size, bias, &error)) { printf("error: %s\n", error.message); return 1; - } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { + } else if (!nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error)) { printf("error: %s\n", error.message); return 1; } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_neon, &error)) { diff --git a/tests/arch/generic/layer/main.c b/tests/arch/generic/layer/main.c index 0425dd5..149a089 100644 --- a/tests/arch/generic/layer/main.c +++ b/tests/arch/generic/layer/main.c @@ -8,7 +8,7 @@ #include // N_TEST_CASES defines the number of test cases. -#define N_TEST_CASES 3 +#define N_TEST_CASES 9 // DEFAULT_OUTPUT_TOLERANCE defines the default tolerance for comparing output values. #define DEFAULT_OUTPUT_TOLERANCE 0.0001f @@ -19,7 +19,8 @@ typedef struct { float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases[NN_LAYER_MAX_BIASES]; NNDotProductFunction dot_product_func; - NNActivationFunction act_func; + NNActFuncScalar act_func_scalar; + NNActFuncVector act_func_vector; size_t batch_size; float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float output_tolerance; @@ -37,16 +38,25 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { assert(error.code == NN_ERROR_NONE); nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); assert(error.code == NN_ERROR_NONE); - nn_layer_set_activation_func(&layer, tc.act_func, &error); - assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_biases(&layer, tc.biases, &error); assert(error.code == NN_ERROR_NONE); float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE]; - const bool success = nn_layer_forward(&layer, tc.inputs, outputs, tc.batch_size, &error); - assert(success == true); + const bool lfr = nn_layer_forward(&layer, tc.inputs, outputs, tc.batch_size, &error); + assert(lfr == true); assert(error.code == NN_ERROR_NONE); + for (size_t i = 0; i < tc.batch_size; ++i) { + if (tc.act_func_scalar != NULL) { + const bool laf = nn_act_func_forward_scalar(tc.act_func_scalar, outputs[i], outputs[i], tc.output_size, &error); + assert(laf == true); + assert(error.code == NN_ERROR_NONE); + } else if (tc.act_func_vector != NULL) { + const bool laf = nn_act_func_forward_vector(tc.act_func_vector, outputs[i], outputs[i], tc.output_size, &error); + assert(laf == true); + assert(error.code == NN_ERROR_NONE); + } + } for (size_t i = 0; i < tc.batch_size; ++i) { for (size_t j = 0; j < tc.output_size; ++j) { assert(fabs(outputs[i][j] - tc.expected_outputs[i][j]) <= tc.output_tolerance); @@ -68,7 +78,7 @@ int main() { }, .biases = {0.5f, -0.1f, 0.2f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 2, .inputs = { {1.5f, -2.0f, 1.0f, -1.5f}, @@ -81,6 +91,52 @@ int main() { }, }, + { + .input_size = 4, + .output_size = 3, + .weights = { + {0.1f, 0.2f, -0.1f, 0.5f}, + {0.3f, -0.2f, 0.4f, 0.1f}, + {-0.3f, 0.4f, 0.2f, -0.5f}, + }, + .biases = {0.5f, -0.1f, 0.2f}, + .dot_product_func = nn_dot_product, + .act_func_scalar = nn_act_func_relu, + .batch_size = 2, + .inputs = { + {1.5f, -2.0f, 1.0f, -1.5f}, + {-1.0f, 2.0f, -0.5f, 1.0f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {0.0f, 1.0f, 0.0f}, + {1.35f, 0.0f, 0.7f}, + }, + }, + + { + .input_size = 4, + .output_size = 3, + .weights = { + {0.1f, 0.2f, -0.1f, 0.5f}, + {0.3f, -0.2f, 0.4f, 0.1f}, + {-0.3f, 0.4f, 0.2f, -0.5f}, + }, + .biases = {0.5f, -0.1f, 0.2f}, + .dot_product_func = nn_dot_product, + .act_func_vector = nn_act_func_softmax, + .batch_size = 2, + .inputs = { + {1.5f, -2.0f, 1.0f, -1.5f}, + {-1.0f, 2.0f, -0.5f, 1.0f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {0.13154859, 0.65156444, 0.21688696}, + {0.61446009, 0.06476362, 0.32077629}, + }, + }, + { .input_size = 4, .output_size = 3, @@ -91,7 +147,7 @@ int main() { }, .biases = {1.0f, 0.5f, -0.2f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 2, .inputs = { {0.5f, 0.1f, -0.2f, 0.4f}, @@ -104,6 +160,52 @@ int main() { }, }, + { + .input_size = 4, + .output_size = 3, + .weights = { + {-0.5f, 0.8f, -0.2f, 0.4f}, + {0.2f, -0.3f, 0.5f, -0.1f}, + {0.4f, 0.1f, -0.4f, 0.6f}, + }, + .biases = {1.0f, 0.5f, -0.2f}, + .dot_product_func = nn_dot_product, + .act_func_scalar = nn_act_func_relu, + .batch_size = 2, + .inputs = { + {0.5f, 0.1f, -0.2f, 0.4f}, + {1.2f, -1.2f, 0.5f, -0.3f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {1.03f, 0.43f, 0.33f}, + {0.0f, 1.38f, 0.0f}, + }, + }, + + { + .input_size = 4, + .output_size = 3, + .weights = { + {-0.5f, 0.8f, -0.2f, 0.4f}, + {0.2f, -0.3f, 0.5f, -0.1f}, + {0.4f, 0.1f, -0.4f, 0.6f}, + }, + .biases = {1.0f, 0.5f, -0.2f}, + .dot_product_func = nn_dot_product, + .act_func_vector = nn_act_func_softmax, + .batch_size = 2, + .inputs = { + {0.5f, 0.1f, -0.2f, 0.4f}, + {1.2f, -1.2f, 0.5f, -0.3f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {0.48890266, 0.26831547, 0.24278187}, + {0.0875518, 0.75917368, 0.15327452}, + }, + }, + { .input_size = 4, .output_size = 3, @@ -114,7 +216,7 @@ int main() { }, .biases = {0.2f, -0.3f, 0.4f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { {2.0f, -1.5f, 0.5f, 0.6f}, @@ -128,6 +230,56 @@ int main() { {0.3f, 0.14f, 0.45f}, }, }, + + { + .input_size = 4, + .output_size = 3, + .weights = { + {0.6f, -0.1f, 0.2f, 0.3f}, + {-0.4f, 0.2f, -0.5f, 0.1f}, + {0.1f, 0.4f, 0.2f, -0.2f}, + }, + .biases = {0.2f, -0.3f, 0.4f}, + .dot_product_func = nn_dot_product, + .act_func_vector = nn_act_func_softmax, + .batch_size = 3, + .inputs = { + {2.0f, -1.5f, 0.5f, 0.6f}, + {-1.2f, 1.3f, -0.4f, 0.5f}, + {0.5f, 0.6f, -1.0f, 0.2f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {0.84037173, 0.02749061, 0.13213767}, + {0.12688794, 0.45182925, 0.4212828}, + {0.33178742, 0.28273059, 0.38548199}, + }, + }, + + { + .input_size = 4, + .output_size = 3, + .weights = { + {0.6f, -0.1f, 0.2f, 0.3f}, + {-0.4f, 0.2f, -0.5f, 0.1f}, + {0.1f, 0.4f, 0.2f, -0.2f}, + }, + .biases = {0.2f, -0.3f, 0.4f}, + .dot_product_func = nn_dot_product, + .act_func_scalar = nn_act_func_relu, + .batch_size = 3, + .inputs = { + {2.0f, -1.5f, 0.5f, 0.6f}, + {-1.2f, 1.3f, -0.4f, 0.5f}, + {0.5f, 0.6f, -1.0f, 0.2f}, + }, + .output_tolerance = DEFAULT_OUTPUT_TOLERANCE, + .expected_outputs = { + {1.83f, 0.0f, 0.0f}, + {0.0f, 0.69f, 0.62f}, + {0.3f, 0.14f, 0.45f}, + }, + }, }; run_test_cases(test_cases, N_TEST_CASES, "NNLayer"); diff --git a/tests/arch/generic/layer_multi/main.c b/tests/arch/generic/layer_multi/main.c index eb54250..190d1fc 100644 --- a/tests/arch/generic/layer_multi/main.c +++ b/tests/arch/generic/layer_multi/main.c @@ -21,7 +21,7 @@ typedef struct { float weights2[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float biases2[NN_LAYER_MAX_BIASES]; NNDotProductFunction dot_product_func; - NNActivationFunction act_func; + NNActFuncScalar act_func_scalar; size_t batch_size; float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE]; float output_tolerance; @@ -39,8 +39,6 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { assert(error.code == NN_ERROR_NONE); nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error); assert(error.code == NN_ERROR_NONE); - nn_layer_set_activation_func(&layer, tc.act_func, &error); - assert(error.code == NN_ERROR_NONE); nn_layer_set_weights(&layer, tc.weights, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_biases(&layer, tc.biases, &error); @@ -49,6 +47,11 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) { const bool first_layer_success = nn_layer_forward(&layer, tc.inputs, intermediate_outputs, tc.batch_size, &error); assert(first_layer_success == true); assert(error.code == NN_ERROR_NONE); + for (size_t i = 0; i < tc.batch_size; ++i) { + const bool laf = nn_act_func_forward_scalar(tc.act_func_scalar, intermediate_outputs[i], intermediate_outputs[i], tc.output_size, &error); + assert(laf == true); + assert(error.code == NN_ERROR_NONE); + } nn_layer_set_weights(&layer, tc.weights2, &error); assert(error.code == NN_ERROR_NONE); nn_layer_set_biases(&layer, tc.biases2, &error); @@ -84,7 +87,7 @@ int main() { }, .biases2 = {0.5f, 1.5f, -0.2f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { {0.9f, -0.3f, 2.2f, 1.9f}, @@ -114,7 +117,7 @@ int main() { }, .biases2 = {-0.1f, 1.0f, 0.2f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { {-0.5f, 2.1f, 1.9f, -1.3f}, @@ -144,7 +147,7 @@ int main() { }, .biases2 = {0.7f, -1.1f, 0.3f}, .dot_product_func = nn_dot_product, - .act_func = {.scalar = nn_activation_func_identity}, + .act_func_scalar = nn_act_func_identity, .batch_size = 3, .inputs = { {0.2f, 2.8f, -1.5f, 1.6f}, diff --git a/tests/arch/generic/neuron/main.c b/tests/arch/generic/neuron/main.c index 99db943..02d6eaf 100644 --- a/tests/arch/generic/neuron/main.c +++ b/tests/arch/generic/neuron/main.c @@ -32,7 +32,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductF assert(error.code == NN_ERROR_NONE); nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error); assert(error.code == NN_ERROR_NONE); - nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error); + nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error); const float output = nn_neuron_compute(&neuron, tc.inputs, &error); assert(error.code == NN_ERROR_NONE); assert(isnan(output) == false); diff --git a/tests/arch/generic/neuron_perf/main.c b/tests/arch/generic/neuron_perf/main.c index f1ff2bb..ede8fad 100644 --- a/tests/arch/generic/neuron_perf/main.c +++ b/tests/arch/generic/neuron_perf/main.c @@ -33,7 +33,7 @@ int main(int argc, char *argv[]) { } else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product, &error)) { printf("error: %s\n", error.message); return 1; - } else if (!nn_neuron_set_activation_func(&neuron, nn_activation_func_identity, &error)) { + } else if (!nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error)) { printf("error: %s\n", error.message); return 1; } diff --git a/tests/arch/generic/softmax/main.c b/tests/arch/generic/softmax/main.c index ebea912..0a92419 100644 --- a/tests/arch/generic/softmax/main.c +++ b/tests/arch/generic/softmax/main.c @@ -12,20 +12,20 @@ // TestCase defines a single test case. typedef struct { - float input[NN_SOFTMAX_MAX_SIZE]; + float input[NN_AF_VECTOR_MAX_SIZE]; size_t input_size; - NNActivationFunctionVector activation_func; + NNActFuncVector activation_func; float output_tolerance; - float expected_output[NN_SOFTMAX_MAX_SIZE]; + float expected_output[NN_AF_VECTOR_MAX_SIZE]; } TestCase; // run_test_cases runs the test cases. -void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNActivationFunctionVector activation_func) { +void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNActFuncVector activation_func) { for (int i = 0; i < n_cases; ++i) { TestCase tc = test_cases[i]; NNError error; - float output[NN_SOFTMAX_MAX_SIZE]; + float output[NN_AF_VECTOR_MAX_SIZE]; const bool result = activation_func(tc.input, output, tc.input_size, &error); assert(result == true); assert(error.code == NN_ERROR_NONE); @@ -70,7 +70,7 @@ int main() { }, }; - run_test_cases(test_cases, N_TEST_CASES, "nn_activation_func_softmax", nn_activation_func_softmax); + run_test_cases(test_cases, N_TEST_CASES, "nn_act_func_softmax", nn_act_func_softmax); return 0; }