Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename dot product functions and arguments #24

Merged
merged 1 commit into from
Apr 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/arch/generic/layer/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ int main() {
}

// Set the dot product function of the layer
if (!nn_layer_set_dot_product_func(&layer, nn_dot_prod, &error)) {
if (!nn_layer_set_dot_prod_func(&layer, nn_dot_prod, &error)) {
fprintf(stderr, "error: %s\n", error.message);
return 1;
}
Expand Down
6 changes: 3 additions & 3 deletions include/nn_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ typedef struct {
size_t output_size;
float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases[NN_LAYER_MAX_BIASES];
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
} NNLayer;

// nn_layer_init initializes a layer with the given arguments.
Expand All @@ -57,8 +57,8 @@ bool nn_layer_set_weights(NNLayer *layer, const float weights[NN_LAYER_MAX_OUTPU
// nn_layer_set_biases sets the biases of the given layer.
bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES], NNError *error);

// nn_layer_set_dot_product_func sets the dot product function of the given layer.
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error);
// nn_layer_set_dot_prod_func sets the dot product function of the given layer.
bool nn_layer_set_dot_prod_func(NNLayer *layer, NNDotProdFunc dot_prod_func, NNError *error);

// nn_layer_forward computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error);
Expand Down
4 changes: 2 additions & 2 deletions include/nn_neuron.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
NNActFuncScalar act_func;
} NNNeuron;

Expand All @@ -33,7 +33,7 @@ bool nn_neuron_set_weights(NNNeuron *neuron, const float weights[NN_NEURON_MAX_W
bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error);

// nn_neuron_set_dot_prod_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error);
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_prod_func, NNError *error);

// nn_neuron_set_act_func sets the activation function of the given neuron.
bool nn_neuron_set_act_func(NNNeuron *neuron, NNActFuncScalar act_func, NNError *error);
Expand Down
10 changes: 5 additions & 5 deletions src/nn_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,14 +93,14 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES]
return true;
}

// nn_layer_set_dot_product_func sets the dot product function of the given layer.
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error) {
// nn_layer_set_dot_prod_func sets the dot product function of the given layer.
bool nn_layer_set_dot_prod_func(NNLayer *layer, NNDotProdFunc dot_prod_func, NNError *error) {
nn_error_set(error, NN_ERROR_NONE, NULL);
if (layer == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL");
return false;
}
layer->dot_product_func = dot_product_func;
layer->dot_prod_func = dot_prod_func;

return true;
}
Expand All @@ -114,7 +114,7 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC
} else if (batch_size == 0) {
nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid batch size");
return false;
} else if (layer->dot_product_func == NULL) {
} else if (layer->dot_prod_func == NULL) {
nn_error_set(error, NN_ERROR_INVALID_FUNCTION, "dot product function is NULL");
return false;
}
Expand All @@ -123,7 +123,7 @@ bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATC
for (size_t i = 0; i < batch_size; ++i) {
// Iterate over output neurons
for (size_t j = 0; j < layer->output_size; ++j) {
outputs[i][j] = layer->dot_product_func(inputs[i], layer->weights[j], layer->input_size) + layer->biases[j];
outputs[i][j] = layer->dot_prod_func(inputs[i], layer->weights[j], layer->input_size) + layer->biases[j];
}
}

Expand Down
8 changes: 4 additions & 4 deletions src/nn_neuron.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error) {
}

// nn_neuron_set_dot_prod_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error) {
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_prod_func, NNError *error) {
nn_error_set(error, NN_ERROR_NONE, NULL);
if (neuron == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL");
return false;
}
neuron->dot_product_func = dot_product_func;
neuron->dot_prod_func = dot_prod_func;
return true;
}

Expand Down Expand Up @@ -87,9 +87,9 @@ float nn_neuron_compute(const NNNeuron *neuron, const float inputs[NN_NEURON_MAX
// 3. Apply the activation function

// Compute the dot product
if (neuron->dot_product_func != NULL) {
if (neuron->dot_prod_func != NULL) {
// Sum the weighted inputs
result = neuron->dot_product_func(neuron->weights, inputs, neuron->input_size);
result = neuron->dot_prod_func(neuron->weights, inputs, neuron->input_size);
}
// Add the bias
result += neuron->bias;
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/arm/cmsis-dsp/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];
NNNeuron neuron;
NNError error;

nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error);
nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/arm/neon/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];
NNNeuron neuron;
NNError error;

nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error);
nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/generic/dot_product/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,17 @@ typedef struct {
float b[4];
size_t vector_size;
float bias;
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];

const float output = dot_product_func(tc.a, tc.b, tc.vector_size);
const float output = dot_prod_func(tc.a, tc.b, tc.vector_size);
assert(isnan(output) == false);
assert(fabs(output - tc.expected_output) < tc.output_tolerance);
printf("passed: %s case=%d info=%s\n", __func__, i + 1, info);
Expand Down
22 changes: 11 additions & 11 deletions tests/arch/generic/layer/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ typedef struct {
size_t output_size;
float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases[NN_LAYER_MAX_BIASES];
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
NNActFuncScalar act_func_scalar;
NNActFuncVector act_func_vector;
size_t batch_size;
Expand All @@ -36,7 +36,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) {

nn_layer_init(&layer, tc.input_size, tc.output_size, &error);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error);
nn_layer_set_dot_prod_func(&layer, tc.dot_prod_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_weights(&layer, tc.weights, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down Expand Up @@ -77,7 +77,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 2,
.inputs = {
Expand All @@ -100,7 +100,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 2,
.inputs = {
Expand All @@ -123,7 +123,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 2,
.inputs = {
Expand All @@ -146,7 +146,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 2,
.inputs = {
Expand All @@ -169,7 +169,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 2,
.inputs = {
Expand All @@ -192,7 +192,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 2,
.inputs = {
Expand All @@ -215,7 +215,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand All @@ -240,7 +240,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 3,
.inputs = {
Expand All @@ -265,7 +265,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 3,
.inputs = {
Expand Down
10 changes: 5 additions & 5 deletions tests/arch/generic/layer_multi/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ typedef struct {
float biases[NN_LAYER_MAX_BIASES];
float weights2[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases2[NN_LAYER_MAX_BIASES];
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
NNActFuncScalar act_func_scalar;
size_t batch_size;
float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE];
Expand All @@ -37,7 +37,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) {

nn_layer_init(&layer, tc.input_size, tc.output_size, &error);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_dot_product_func(&layer, tc.dot_product_func, &error);
nn_layer_set_dot_prod_func(&layer, tc.dot_prod_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_weights(&layer, tc.weights, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down Expand Up @@ -86,7 +86,7 @@ int main() {
{0.5f, -0.9f, 0.1f},
},
.biases2 = {0.5f, 1.5f, -0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down Expand Up @@ -116,7 +116,7 @@ int main() {
{0.13f, -0.31f, 0.11f},
},
.biases2 = {-0.1f, 1.0f, 0.2f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down Expand Up @@ -146,7 +146,7 @@ int main() {
{-0.35f, 0.62f, -0.2f},
},
.biases2 = {0.7f, -1.1f, 0.3f},
.dot_product_func = nn_dot_prod,
.dot_prod_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/generic/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,21 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProdFunc dot_product_func;
NNDotProdFunc dot_prod_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_prod_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];
NNNeuron neuron;
NNError error;

nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error);
nn_neuron_set_dot_prod_func(&neuron, dot_prod_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error);
const float output = nn_neuron_compute(&neuron, tc.inputs, &error);
Expand Down
Loading