-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNeuron.cpp
More file actions
82 lines (69 loc) · 2.11 KB
/
Neuron.cpp
File metadata and controls
82 lines (69 loc) · 2.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#include "Neuron.h"
#include <numeric>
#include <iostream>
#include "helpers.h"
/**
* Constructor.
*
* @param numberOfInputs the number of inputs to this neuron.
* @param activationFunction the neuron's activation function.
*/
Neuron::Neuron(int numberOfInputs)
{
this->numberOfInputs = numberOfInputs;
this->gradientMutex = new mutex{};
this->weights.reserve(numberOfInputs);
this->weightGradients.reserve(numberOfInputs);
for(int weight = 0; weight < numberOfInputs; ++weight)
{
weights.push_back(rand(0, 1));
weightGradients.push_back(0);
}
bias = rand(0, 1);
}
/**
* Perform a forward pass on an individual neuron, based on the given input.
*
* @param input the inputs to the neuron.
* @return the neuron's activation after performing the forward pass.
*/
double Neuron::ForwardPass(vector<double> input) {
if(input.size() != numberOfInputs) {
throw std::invalid_argument(
"Incorrect number of inputs (" + to_string(input.size()) + ") provided to neuron (expected " +
to_string(numberOfInputs) + ").");
}
return inner_product(input.begin(), input.end(), this->weights.begin(), double {}) + bias;
}
/**
* Returns the weight at the given index.
*
* @param index index of the weight wanted.
* @return the weight.
*/
double Neuron::GetWeight(int index)
{
return weights[index];
}
double Neuron::UpdateGradients(vector<double> &previousActivations, double activationDerivative, double nextLayerPartialDerivative)
{
double currentPartialDerivative = activationDerivative * nextLayerPartialDerivative;
gradientMutex->lock();
for(int weightIndex = 0; weightIndex < weights.size(); ++weightIndex)
{
weightGradients[weightIndex] += previousActivations[weightIndex] * currentPartialDerivative;
}
biasGradient += currentPartialDerivative;
gradientMutex->unlock();
return currentPartialDerivative;
}
void Neuron::ApplyGradientsToWeights(double scalingFactor)
{
for(int weightIndex = 0; weightIndex < weights.size(); ++weightIndex)
{
weights[weightIndex] -= scalingFactor * weightGradients[weightIndex];
weightGradients[weightIndex] = 0;
}
bias -= scalingFactor * biasGradient;
biasGradient = 0;
}