Clean up (before drastic refactoring)

This commit is contained in:
Thomas 2023-09-24 11:54:55 +02:00
parent 411e4db3db
commit f836c53711
10 changed files with 61 additions and 67 deletions

View file

@ -3,5 +3,5 @@ project(c_net C)
set(CMAKE_C_STANDARD 11)
add_executable(c_net main.c matrix.c image.c neuronal_network.c util.c util.h)
add_executable(c_net main.c matrix/matrix.c image/image.c neuronal_network.c util.c matrix/operations.c)
target_link_libraries(c_net m)

View file

@ -2,8 +2,8 @@
#include <stdlib.h>
#include "image.h"
#include "matrix.h"
#include "util.h"
#include "../matrix/matrix.h"
#include "../util.h"
void big_endian_to_c_uint(const char * bytes, void * target, int size) {
char* helper = (char*)target;
@ -68,8 +68,8 @@ Image * load_pgm_image(char * image_file_string){
Image** import_images(char* image_file_string, char* label_file_string, int* _number_imported, int count) {
printf("Loading Images\n");
// create file pointer for the image and label data
FILE* image_file = fopen(image_file_string, "r");
FILE* label_file = fopen(label_file_string, "r");
FILE* image_file = fopen(image_file_string, "rb");
FILE* label_file = fopen(label_file_string, "rb");
// check if the file could be opened
if(image_file == NULL || label_file == NULL) {

View file

@ -1,7 +1,7 @@
#pragma once
#include "matrix.h"
#include "../matrix/matrix.h"
#include "matrix.h"
#include "../matrix/matrix.h"
typedef struct {
Matrix* pixel_values;

14
main.c
View file

@ -1,6 +1,6 @@
#include <stdio.h>
#include "image.h"
#include "image/image.h"
#include "neuronal_network.h"
int main() {
@ -11,18 +11,18 @@ int main() {
// matrix_print(images[0]->pixel_values);
// matrix_print(images[1]->pixel_values);
Neural_Network* nn = new_network(28*28, 40, 5, 10, 0.08);
Neural_Network* nn = new_network(28*28, 50, 3, 10, 0.1);
randomize_network(nn, 1);
// Neural_Network* nn = load_network("../networks/newest_network.txt");
// printf("Done loading!\n");
// batch_train(nn, images, 20000, 20);
for (int i = 0; i < 30000; ++i) {
for (int i = 0; i < 60000; ++i) {
train_network(nn, images[i], images[i]->label);
}
save_network(nn);
// batch_train(nn, images, 30000, 2);
printf("Trinaing Done!\n");
// save_network(nn);
printf("%lf\n", measure_network_accuracy(nn, images, 10000));

View file

@ -249,16 +249,6 @@ Matrix* transpose(Matrix* matrix) {
}
double matrix_sum(Matrix* matrix) {
double sum = 0;
for (int i = 0; i < matrix->rows; i++) {
for (int j = 0; j < matrix->columns; j++) {
sum += matrix->numbers[i][j];
}
}
return sum;
}
void matrix_save(Matrix* matrix, char* file_string){
// open the file in append mode

View file

@ -8,7 +8,6 @@ typedef struct {
static const int scaling_value = 10000;
// operational functions
Matrix* matrix_create(int rows, int columns);
void matrix_fill(Matrix* matrix, double value);
void matrix_free(Matrix* matrix);
@ -18,18 +17,11 @@ void matrix_save(Matrix* matrix, char* file_string);
Matrix* matrix_load(char* file_string);
Matrix* load_next_matrix(FILE * save_file);
void matrix_randomize(Matrix* matrix, int n); // don't understand the usage of the n
void matrix_randomize(Matrix* matrix, int n);
int matrix_argmax(Matrix* matrix);
Matrix* matrix_flatten(Matrix* matrix, int axis);
Matrix* matrix_add_bias(Matrix* matrix);
/*
* These methods won't change or free the input matrix.
* It creates a new matrix, which is modified and then returned.
* If we don't need the original matrix, we should consider just changing the original matrix and changing the method signature to void.
*/
// mathematical functions
Matrix* multiply(Matrix* matrix1, Matrix* matrix2);
Matrix* add(Matrix* matrix1, Matrix* matrix2);
Matrix* subtract(Matrix* matrix1, Matrix* matrix2);
@ -37,5 +29,4 @@ Matrix* dot(Matrix* matrix1, Matrix* matrix2);
Matrix* apply(double (*function)(double), Matrix* matrix);
Matrix* scale(Matrix* matrix, double value);
Matrix* addScalar(Matrix* matrix, double value);
Matrix* transpose(Matrix* matrix);
double matrix_sum(Matrix* matrix);
Matrix* transpose(Matrix* matrix);

1
matrix/operations.c Normal file
View file

@ -0,0 +1 @@
#include "operations.h"

1
matrix/operations.h Normal file
View file

@ -0,0 +1 @@
#include "matrix.h"

View file

@ -6,8 +6,8 @@
double sigmoid(double input);
Matrix* predict(Neural_Network* network, Matrix* image_data);
Matrix* sigmoid_derivative(Matrix* matrix);
Matrix *calculate_weights_delta(Matrix *previous_layer_output, Matrix *delta_matrix, double learning_rate);
void apply_weights(Neural_Network* network, Matrix* delta_weights_matrix, int index);
Matrix *calculate_weights_delta(Matrix *previous_layer_output, Matrix *delta_matrix);
void apply_weights(Neural_Network *network, Matrix *delta_weights_matrix, int index, double learning_rate);
Matrix* calculate_delta_hidden(Matrix* next_layer_delta, Matrix* weights, Matrix* current_layer_output);
Neural_Network* new_network(int input_size, int hidden_size, int hidden_amount, int output_size, double learning_rate){
@ -167,22 +167,26 @@ Matrix* predict(Neural_Network* network, Matrix* image_data) {
//void batch_train(Neural_Network* network, Image** images, int amount, int batch_size) {
//
// for (int i = 0; i < amount; ++i) {
// if(amount % batch_size != 0) {
// printf("ERROR: Batch Size is not compatible with image amount! (batch_train)");
// exit(1);
// }
//
// if(amount % 1000 == 0) {
// printf("1k pics!\n");
// }
// int image_index = 0;
//
// for (int i = 0; i < amount / batch_size; ++i) {
//
// Matrix* batch_weights[network->hidden_amount + 1];
//
// for (int j = 0; j < network->hidden_amount + 1; j++) {
// batch_weights[j] = matrix_create(network->weights[j]->rows, network->weights[j]->columns);
// matrix_fill(batch_weights[j], 0);
// }
//
// for (int j = 0; j < batch_size; ++j) {
// Matrix** delta_weights = train_network(network, images[i], images[i]->label);
// Matrix** delta_weights = train_network(network, images[image_index], images[image_index]->label);
//
// for (int k = 0; k < network->hidden_amount + 1; k++) {
// if(j == 0) {
// batch_weights[k] = delta_weights[k];
// continue;
// }
//
// Matrix* temp_result = add(batch_weights[k], delta_weights[k]);
//
@ -193,14 +197,16 @@ Matrix* predict(Neural_Network* network, Matrix* image_data) {
// }
//
// free(delta_weights);
//
// image_index++;
// }
//
// for (int j = 0; j < network->hidden_amount + 1; ++j) {
// for (int j = 0; j < network->hidden_amount + 1; j++) {
// Matrix* average_delta_weight = scale(batch_weights[j], (1.0 / batch_size));
// apply_weights(network, average_delta_weight, j);
// apply_weights(network, average_delta_weight, j, network->learning_rate);
//
// matrix_free(average_delta_weight);
// matrix_free(batch_weights[j]);
// matrix_free(average_delta_weight);
// }
// }
//}
@ -239,13 +245,13 @@ void train_network(Neural_Network* network, Image *image, int label) {
Matrix* delta = multiply(sigmoid_prime, error);
//calculate and apply the delta for all weights in out-put layer
delta_weights[network->hidden_amount] = calculate_weights_delta(output[network->hidden_amount - 1], delta, network->learning_rate);
delta_weights[network->hidden_amount] = calculate_weights_delta(output[network->hidden_amount - 1], delta);
//hidden layers
Matrix* previous_delta = delta;
for (int i = network->hidden_amount; i > 1; i--) {
delta = calculate_delta_hidden(previous_delta, network->weights[i], output[i - 1]);
delta_weights[i - 1] = calculate_weights_delta(output[i - 2], delta, network->learning_rate);
delta_weights[i - 1] = calculate_weights_delta(output[i - 2], delta);
matrix_free(previous_delta);
previous_delta = delta;
@ -253,10 +259,16 @@ void train_network(Neural_Network* network, Image *image, int label) {
// Input Layer
delta = calculate_delta_hidden(previous_delta, network->weights[1], output[0]);
delta_weights[0] = calculate_weights_delta(image_data, delta, network->learning_rate);
delta_weights[0] = calculate_weights_delta(image_data, delta);
// if you want to use this method as a standalone method this part needs to be uncommented
for (int i = 0; i < network->hidden_amount + 1; ++i) {
apply_weights(network, delta_weights[i], i, network->learning_rate);
}
for (int i = 0; i < network->hidden_amount + 1; ++i) {
apply_weights(network, delta_weights[i], i);
matrix_free(delta_weights[i]);
}
// De-allocate stuff
@ -267,9 +279,7 @@ void train_network(Neural_Network* network, Image *image, int label) {
matrix_free(output[i]);
}
for (int i = 0; i < network->hidden_amount + 1; ++i) {
matrix_free(delta_weights[i]);
}
matrix_free(sigmoid_prime);
matrix_free(wanted_output);
@ -308,7 +318,7 @@ Matrix* calculate_delta_hidden(Matrix* next_layer_delta, Matrix* weights, Matrix
return new_deltas;
}
void apply_weights(Neural_Network* network, Matrix* delta_weights_matrix, int index) {
void apply_weights(Neural_Network *network, Matrix *delta_weights_matrix, int index, double learning_rate) {
if(index > network->hidden_amount || index < 0) {
printf("ERROR: Index out of range! (apply_weights)");
@ -320,27 +330,28 @@ void apply_weights(Neural_Network* network, Matrix* delta_weights_matrix, int in
exit(1);
}
// scale by learning rate
Matrix* scaled_delta_weights_matrix = scale(delta_weights_matrix, learning_rate);
for (int i = 0; i < delta_weights_matrix->rows; i++) {
for (int j = 0; j < delta_weights_matrix->columns; j++) {
network->weights[index]->numbers[i][j] += delta_weights_matrix->numbers[i][j]; // multiply delta_weights_matrix with learning rate AND - instead of + because soll-ist
for (int j = 0; j < scaled_delta_weights_matrix->columns; j++) {
network->weights[index]->numbers[i][j] += scaled_delta_weights_matrix->numbers[i][j]; // multiply delta_weights_matrix with learning rate AND - instead of + because soll-ist
}
}
matrix_free(scaled_delta_weights_matrix);
}
Matrix *calculate_weights_delta(Matrix *previous_layer_output, Matrix *delta_matrix, double learning_rate) {
Matrix *calculate_weights_delta(Matrix *previous_layer_output, Matrix *delta_matrix) {
Matrix* previous_out_with_one = matrix_add_bias(previous_layer_output);
Matrix* transposed_previous_out_with_bias = transpose(previous_out_with_one);
Matrix* weights_delta_matrix = dot(delta_matrix, transposed_previous_out_with_bias);
// scale by learning rate
Matrix* result = scale(weights_delta_matrix, learning_rate);
matrix_free(previous_out_with_one);
matrix_free(transposed_previous_out_with_bias);
matrix_free(weights_delta_matrix);
return result;
return weights_delta_matrix;
}
Matrix* sigmoid_derivative(Matrix* matrix) {

View file

@ -1,6 +1,6 @@
#include "matrix.h"
#include "image.h"
#include "matrix/matrix.h"
#include "image/image.h"
typedef struct {
int input_size;