Skip to content

Commit

Permalink
Rename nn_layer_compute to nn_layer_forward (#18)
Browse files Browse the repository at this point in the history
  • Loading branch information
devfacet authored Apr 13, 2024
1 parent 3349962 commit ba1e061
Show file tree
Hide file tree
Showing 8 changed files with 14 additions and 16 deletions.
4 changes: 2 additions & 2 deletions include/nn_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ bool nn_layer_set_weights(NNLayer *layer, const float weights[NN_LAYER_MAX_OUTPU
// nn_layer_set_biases sets the biases of the given layer.
bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES], NNError *error);

// nn_layer_compute computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_compute(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error);
// nn_layer_forward computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error);

#endif // NN_LAYER_H
7 changes: 3 additions & 4 deletions src/nn_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,13 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES]
return true;
}

// nn_layer_compute computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_compute(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error) {
// nn_layer_forward computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error) {
nn_error_set(error, NN_ERROR_NONE, NULL);
if (layer == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL");
return false;
}
if (batch_size == 0) {
} else if (batch_size == 0) {
nn_error_set(error, NN_ERROR_INVALID_SIZE, "invalid batch size");
return false;
}
Expand Down
3 changes: 1 addition & 2 deletions src/nn_neuron.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ bool nn_neuron_init(NNNeuron *neuron, const float weights[NN_NEURON_MAX_WEIGHTS]
if (neuron == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL");
return false;
}
if (weights == NULL) {
} else if (weights == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "weights is NULL");
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion tests/arch/arm/cmsis-dsp/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,6 @@ int main() {
.expected_output = 0.000012f,
},
};
run_test_cases(test_cases, N_TEST_CASES, "nn_neuron", nn_dot_product_cmsis);
run_test_cases(test_cases, N_TEST_CASES, "NNNeuron", nn_dot_product_cmsis);
return 0;
}
2 changes: 1 addition & 1 deletion tests/arch/arm/neon/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,6 @@ int main() {
.expected_output = 0.000012f,
},
};
run_test_cases(test_cases, N_TEST_CASES, "nn_neuron", nn_dot_product_neon);
run_test_cases(test_cases, N_TEST_CASES, "NNNeuron", nn_dot_product_neon);
return 0;
}
4 changes: 2 additions & 2 deletions tests/arch/generic/layer/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) {
nn_layer_set_biases(&layer, tc.biases, &error);
assert(error.code == NN_ERROR_NONE);
float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE];
const bool success = nn_layer_compute(&layer, tc.inputs, outputs, tc.batch_size, &error);
const bool success = nn_layer_forward(&layer, tc.inputs, outputs, tc.batch_size, &error);
assert(success == true);
assert(error.code == NN_ERROR_NONE);
for (size_t i = 0; i < tc.batch_size; ++i) {
Expand Down Expand Up @@ -126,6 +126,6 @@ int main() {
},
};

run_test_cases(test_cases, N_TEST_CASES, "nn_layer");
run_test_cases(test_cases, N_TEST_CASES, "NNLayer");
return 0;
}
6 changes: 3 additions & 3 deletions tests/arch/generic/layer_multi/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,15 @@ void run_test_cases(TestCase *test_cases, int n_cases, char *info) {
nn_layer_set_biases(&layer, tc.biases, &error);
assert(error.code == NN_ERROR_NONE);
float intermediate_outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE];
const bool first_layer_success = nn_layer_compute(&layer, tc.inputs, intermediate_outputs, tc.batch_size, &error);
const bool first_layer_success = nn_layer_forward(&layer, tc.inputs, intermediate_outputs, tc.batch_size, &error);
assert(first_layer_success == true);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_weights(&layer, tc.weights2, &error);
assert(error.code == NN_ERROR_NONE);
nn_layer_set_biases(&layer, tc.biases2, &error);
assert(error.code == NN_ERROR_NONE);
float final_outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE];
const bool second_layer_success = nn_layer_compute(&layer, intermediate_outputs, final_outputs, tc.batch_size, &error);
const bool second_layer_success = nn_layer_forward(&layer, intermediate_outputs, final_outputs, tc.batch_size, &error);
assert(second_layer_success == true);
assert(error.code == NN_ERROR_NONE);
for (size_t i = 0; i < tc.batch_size; ++i) {
Expand Down Expand Up @@ -156,6 +156,6 @@ int main() {
},
};

run_test_cases(test_cases, N_TEST_CASES, "nn_layer");
run_test_cases(test_cases, N_TEST_CASES, "NNLayer");
return 0;
}
2 changes: 1 addition & 1 deletion tests/arch/generic/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,6 @@ int main() {
.expected_output = 0.000012f,
},
};
run_test_cases(test_cases, N_TEST_CASES, "nn_neuron", nn_dot_product);
run_test_cases(test_cases, N_TEST_CASES, "NNNeuron", nn_dot_product);
return 0;
}

0 comments on commit ba1e061

Please sign in to comment.