-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMain.cpp
More file actions
75 lines (60 loc) · 1.31 KB
/
Main.cpp
File metadata and controls
75 lines (60 loc) · 1.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#include <iostream>
#include <Eigen/Dense>
#include "include/nn/layers/Layers.h"
#include "include/nn/Sequential.h"
#include "include/nn/criteria/BinaryCrossEntropyLoss.h"
#include "include/nn/optimizers/SGD.h"
void train(
Eigen::MatrixXd& X, Eigen::MatrixXd& y,
Sequential& model,
Criterion& loss_fn,
Optimizer& optimizer,
int epochs=100)
{
for (int i = 0; i < epochs; i++)
{
Eigen::MatrixXd out = model(X);
double loss = loss_fn(y, out);
if (i % 100 == 0)
{
//std::cout << "\nOutput: " << out << std::endl;
std::cout << "Epoch: " << i << ", Loss: " << loss << std::endl;
}
// Backpropagation
model.backward(loss_fn);
// Gradient descent updation step
optimizer.step();
// Resetting gradients
optimizer.zero_grad();
}
}
int main()
{
Eigen::MatrixXd X(8, 3);
X << 0, 0, 0,
0, 0, 1,
0, 1, 0,
0, 1, 1,
1, 0, 0,
1, 0, 1,
1, 1, 0,
1, 1, 1;
Eigen::MatrixXd y(8, 1);
y << 0, 1, 1, 0, 1, 0, 0, 1;
Sequential model(
Linear(3, 4),
ReLU(),
Linear(4, 4),
ReLU(),
Linear(4, 1),
Sigmoid()
);
BinaryCrossEntropyLoss loss_fn;
SGD optimizer(model, 0.1, 0.9);
train(X, y, model, loss_fn, optimizer, 10000);
Eigen::MatrixXd out = model(X);
std::cout << "\nInput: \n" << X << std::endl;
std::cout << "\nOutput: \n " << out << std::endl;
std::cin.get();
return 0;
}