aboutsummaryrefslogtreecommitdiff
path: root/src/nn.c
diff options
context:
space:
mode:
authorjvech <jmvalenciae@unal.edu.co>2024-08-06 14:29:42 -0500
committerjvech <jmvalenciae@unal.edu.co>2024-08-06 14:29:42 -0500
commitebd66e65bf18574fa8905d7b0ae3fbb85bfc9e06 (patch)
treeda128cfa54b20abbff670c89278f0005b0f128cb /src/nn.c
parentce0001538820d819bf965a24ffbb6f6e6269859c (diff)
add: file parsing improved
Things implemented: * json_read() must die if the key does not exist or the value type is wrong. * on predict command input should be shown exactly the same * float precision CLI option should be added.
Diffstat (limited to 'src/nn.c')
-rw-r--r--src/nn.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/src/nn.c b/src/nn.c
index 4927dc6..916803e 100644
--- a/src/nn.c
+++ b/src/nn.c
@@ -154,7 +154,7 @@ void nn_backward(
}
for (size_t sample = 0; sample < input_shape[0]; sample++) {
- for (size_t l = network_size - 1; l >= 0 && l < network_size; l--) {
+ for (size_t l = network_size - 1; l < network_size; l--) {
size_t weights_shape[2] = {network[l].input_nodes, network[l].neurons};
if (l == network_size - 1) {
double *zout = Zout[l] + sample * network[l].neurons;
@@ -328,6 +328,7 @@ void nn_network_read_weights(char *filepath, Layer *network, size_t network_size
return;
nn_network_read_weights_error:
+ fclose(fp);
die("nn_network_read_weights() Error: "
"number of read objects does not match with expected ones");
}
@@ -357,14 +358,14 @@ void nn_network_write_weights(char *filepath, Layer *network, size_t network_siz
return;
nn_network_write_weights_error:
+ fclose(fp);
die("nn_network_write_weights() Error: "
"number of written objects does not match with number of objects");
}
void nn_network_init_weights(Layer layers[], size_t nmemb, size_t n_inputs, bool fill_random)
{
- int i;
- size_t prev_size = n_inputs;
+ size_t i, prev_size = n_inputs;
for (i = 0; i < nmemb; i++) {
@@ -390,7 +391,8 @@ nn_layers_calloc_weights_error:
void nn_network_free_weights(Layer layers[], size_t nmemb)
{
- for (int i = 0; i < nmemb; i++) {
+ size_t i;
+ for (i = 0; i < nmemb; i++) {
free(layers[i].weights);
free(layers[i].bias);
}
Feel free to download, copy and edit any repo