Skip to content

Commit d5eec87

Browse files
MysjkintstreamDOTh
authored andcommittedOct 20, 2018
Implemented gradient descent in C++ (#352)
* Gradiant descent implementation. * Small fixes in implementation. Split data into x and y data vectors, instead of containing it all inside one data vector. * Fixes and readme documentation.
1 parent 074c449 commit d5eec87

File tree

3 files changed

+125
-0
lines changed

3 files changed

+125
-0
lines changed
 
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
2+
#include "Utility.h"
3+
4+
using namespace std;
5+
6+
// Performs predictions with the given model.
7+
vector<double> Predict(vector<double> x, pair<double,double> model){
8+
int numDataPoints = x.size();
9+
vector<double> predictions(numDataPoints);
10+
11+
for(int i = 0; i < numDataPoints; ++i){
12+
predictions[i] = x[i] * model.first + model.second;
13+
}
14+
15+
return predictions;
16+
}
17+
18+
// Performs the gradient step.
19+
pair<double, double> BatchGradientDecentStep(vector<double> predictions, vector<double> y, double learningRate, pair<double, double> model){
20+
int numSamples = y.size();
21+
double gradientX = 0.0;
22+
double gradientY = 0.0;
23+
24+
for(int k = 0; k < numSamples; ++k){
25+
double error = y[k] - predictions[k];
26+
gradientX += ((-2.0) / (double) numSamples) * error * y[k];
27+
gradientY += ((-2.0) / (double) numSamples) * error;
28+
}
29+
30+
model.first = model.first - (learningRate * gradientX);
31+
model.second = model.second - (learningRate * gradientY);
32+
33+
return model;
34+
}
35+
36+
// Runs through all the epchs updating the model based on the calculated gradient.
37+
pair<double, double> LinearRegression(vector<double> x, vector<double> y, unsigned int epochs, double learningRate){
38+
// Initialize our linear regression model as: 0x + 0.
39+
pair<double, double> model(0, 0);
40+
41+
for(int i = 0; i < epochs; ++i){
42+
auto predictions = Predict(x, model);
43+
model = BatchGradientDecentStep(predictions, y, learningRate, model);
44+
}
45+
46+
return model;
47+
}
48+
49+
int main(){
50+
// Define the x range for data generation.
51+
// Note, larger data values might cause exploding gradients.
52+
// One possible solution is to reduce the learning rate.
53+
pair<int,int> range = pair<int,int>(0,100);
54+
55+
// Get data from the following linear function: 2x + 5.
56+
pair<vector<double>, vector<double>> data = GetLinearFunctionData(range, 2, 5);
57+
vector<double> xData = data.first;
58+
vector<double> yData = data.second;
59+
60+
// Run for 10000 epochs with a learning rate of 0.0001.
61+
pair<double, double> model = LinearRegression(xData, yData, 10000, 0.0001);
62+
auto predictions = Predict(xData, model);
63+
64+
cout << "Data generating function: 2x + 5" << endl;
65+
// Mean squared error: 2.37223.
66+
cout << "Mean squared error: " << MSE(yData, predictions) << endl;
67+
// Learned model: 2.04665x + 1.94324.
68+
cout << "Learned model: " << model.first << "x + " << model.second << endl;
69+
70+
return 0;
71+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
2+
/* Header-only utility functions used for gradient descent */
3+
4+
#include <iostream>
5+
#include <vector>
6+
#include <cmath>
7+
#include <boost/iterator/zip_iterator.hpp>
8+
#include <boost/range.hpp>
9+
10+
using namespace std;
11+
using namespace boost;
12+
13+
// Generating data from a linear function.
14+
pair<vector<double>, vector<double>> GetLinearFunctionData(pair<int,int> range, double x, double yIntercept){
15+
vector<double> xData(range.second);
16+
vector<double> yData(range.second);
17+
int numSamples = range.second - range.first;
18+
19+
for(int i = range.first, k = 0; i < range.second, k < numSamples; ++i, ++k){
20+
xData[k] = i;
21+
yData[k] = i * x + yIntercept;
22+
}
23+
24+
pair<vector<double>, vector<double>> data(xData, yData);
25+
return data;
26+
}
27+
28+
// Sum of squared erros.
29+
double MSE(vector<double> actual, vector<double> predicted){
30+
auto actualItt = actual.begin();
31+
auto predictedItt = predicted.begin();
32+
double sum = 0;
33+
for( ; actualItt != actual.end(), predictedItt != predicted.end(); ++actualItt, ++predictedItt){
34+
sum += pow(*actualItt - *predictedItt, 2);
35+
}
36+
return sum/actual.size();
37+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
2+
# Gradient Descent Optimisation Algorithm
3+
4+
This explanation of the algorithm will not go into details with the mathematics, however, it is an important part but it is better explained online and in books. Instead, the focus is more on a high-level explanation of the algorithm.
5+
6+
Gradient descent is a mathematical optimization algorithm. It is essentially a hill-climbing algorithm that follows the gradient of the function being optimized in order to search for optimal values. It is called gradient descent because we minimize a function by incrementally following the gradient towards a local minimum. And it is often used when training machine learning models.
7+
8+
A gradient is basically the derivative for multi-variable functions, but it is a vector rather than a scaler. The gradient vector encapsulates the partial derivatives of a multi-variable function with respect to its parameters. The gradient of a function with respect to its input tells us something about how it behaves when we change the input, and for gradient descent we exploit a property, that is, the gradient vector points towards the steepest ascent. Hence, minimizing a function in iterations is simply calculating the gradient and moving in the opposite direction.
9+
10+
Practically, we derive the partial derivative of our error function with respect to our model parameters, calculate the partial derivative for each weight and incrementally update each parameter in the opposite sign of the corresponding partial derivative.
11+
12+
## Pseudocode
13+
Where lr is the learning rate, epochs is the number of iterations, and w are the model parameters.
14+
15+
for i to num_epochs:
16+
for w_i in w:
17+
w_i = w_i - lr * partial_derivative(loss, w_i)

0 commit comments

Comments
 (0)
Please sign in to comment.