Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 379b934

Browse files
Add Momentum Optimizer to 'optimization' in machine-learning (TheAlgorithms#933)
1 parent a8491ae commit 379b934

File tree

3 files changed

+146
-0
lines changed

3 files changed

+146
-0
lines changed

‎DIRECTORY.md‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,7 @@
174174
* Optimization
175175
* [Adam](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/optimization/adam.rs)
176176
* [Gradient Descent](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/optimization/gradient_descent.rs)
177+
* [Momentum](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/optimization/momentum.rs)
177178
* Math
178179
* [Abs](https://github.com/TheAlgorithms/Rust/blob/master/src/math/abs.rs)
179180
* [Aliquot Sum](https://github.com/TheAlgorithms/Rust/blob/master/src/math/aliquot_sum.rs)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
mod adam;
22
mod gradient_descent;
3+
mod momentum;
34

45
pub use self::adam::Adam;
56
pub use self::gradient_descent::gradient_descent;
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
/// Momentum Optimization
2+
///
3+
/// Momentum is an extension of gradient descent that accelerates convergence by accumulating
4+
/// a velocity vector in directions of persistent reduction in the objective function.
5+
/// This helps the optimizer navigate ravines and avoid getting stuck in local minima.
6+
///
7+
/// The algorithm maintains a velocity vector that accumulates exponentially decaying moving
8+
/// averages of past gradients. This allows the optimizer to build up speed in consistent
9+
/// directions while dampening oscillations.
10+
///
11+
/// The update equations are:
12+
/// velocity_{k+1} = beta * velocity_k + gradient_of_function(x_k)
13+
/// x_{k+1} = x_k - learning_rate * velocity_{k+1}
14+
///
15+
/// where beta (typically 0.9) controls how much past gradients influence the current update.
16+
///
17+
/// # Arguments
18+
///
19+
/// * `derivative_fn` - The function that calculates the gradient of the objective function at a given point.
20+
/// * `x` - The initial parameter vector to be optimized.
21+
/// * `learning_rate` - Step size for each iteration.
22+
/// * `beta` - Momentum coefficient (typically 0.9). Higher values give more weight to past gradients.
23+
/// * `num_iterations` - The number of iterations to run the optimization.
24+
///
25+
/// # Returns
26+
///
27+
/// A reference to the optimized parameter vector `x`.
28+
#[allow(dead_code)]
29+
pub fn momentum(
30+
derivative: impl Fn(&[f64]) -> Vec<f64>,
31+
x: &mut Vec<f64>,
32+
learning_rate: f64,
33+
beta: f64,
34+
num_iterations: i32,
35+
) -> &mut Vec<f64> {
36+
// Initialize velocity vector to zero
37+
let mut velocity: Vec<f64> = vec![0.0; x.len()];
38+
39+
for _ in 0..num_iterations {
40+
let gradient = derivative(x);
41+
42+
// Update velocity and parameters
43+
for ((x_k, vel), grad) in x.iter_mut().zip(velocity.iter_mut()).zip(gradient.iter()) {
44+
*vel = beta * *vel + grad;
45+
*x_k -= learning_rate * *vel;
46+
}
47+
}
48+
x
49+
}
50+
51+
#[cfg(test)]
52+
mod test {
53+
use super::*;
54+
55+
#[test]
56+
fn test_momentum_optimized() {
57+
fn derivative_of_square(params: &[f64]) -> Vec<f64> {
58+
params.iter().map(|x| 2.0 * x).collect()
59+
}
60+
61+
let mut x: Vec<f64> = vec![5.0, 6.0];
62+
let learning_rate: f64 = 0.01;
63+
let beta: f64 = 0.9;
64+
let num_iterations: i32 = 1000;
65+
66+
let minimized_vector = momentum(
67+
derivative_of_square,
68+
&mut x,
69+
learning_rate,
70+
beta,
71+
num_iterations,
72+
);
73+
74+
let test_vector = [0.0, 0.0];
75+
let tolerance = 1e-6;
76+
77+
for (minimized_value, test_value) in minimized_vector.iter().zip(test_vector.iter()) {
78+
assert!((minimized_value - test_value).abs() < tolerance);
79+
}
80+
}
81+
82+
#[test]
83+
fn test_momentum_unoptimized() {
84+
fn derivative_of_square(params: &[f64]) -> Vec<f64> {
85+
params.iter().map(|x| 2.0 * x).collect()
86+
}
87+
88+
let mut x: Vec<f64> = vec![5.0, 6.0];
89+
let learning_rate: f64 = 0.01;
90+
let beta: f64 = 0.9;
91+
let num_iterations: i32 = 10;
92+
93+
let minimized_vector = momentum(
94+
derivative_of_square,
95+
&mut x,
96+
learning_rate,
97+
beta,
98+
num_iterations,
99+
);
100+
101+
let test_vector = [0.0, 0.0];
102+
let tolerance = 1e-6;
103+
104+
for (minimized_value, test_value) in minimized_vector.iter().zip(test_vector.iter()) {
105+
assert!((minimized_value - test_value).abs() >= tolerance);
106+
}
107+
}
108+
109+
#[test]
110+
fn test_momentum_faster_than_gd() {
111+
fn derivative_of_square(params: &[f64]) -> Vec<f64> {
112+
params.iter().map(|x| 2.0 * x).collect()
113+
}
114+
115+
// Test that momentum converges faster than gradient descent
116+
let mut x_momentum: Vec<f64> = vec![5.0, 6.0];
117+
let mut x_gd: Vec<f64> = vec![5.0, 6.0];
118+
let learning_rate: f64 = 0.01;
119+
let beta: f64 = 0.9;
120+
let num_iterations: i32 = 50;
121+
122+
momentum(
123+
derivative_of_square,
124+
&mut x_momentum,
125+
learning_rate,
126+
beta,
127+
num_iterations,
128+
);
129+
130+
// Gradient descent from your original implementation
131+
for _ in 0..num_iterations {
132+
let gradient = derivative_of_square(&x_gd);
133+
for (x_k, grad) in x_gd.iter_mut().zip(gradient.iter()) {
134+
*x_k -= learning_rate * grad;
135+
}
136+
}
137+
138+
// Momentum should be closer to zero
139+
let momentum_distance: f64 = x_momentum.iter().map(|x| x * x).sum();
140+
let gd_distance: f64 = x_gd.iter().map(|x| x * x).sum();
141+
142+
assert!(momentum_distance < gd_distance);
143+
}
144+
}

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /