[optimization] Moving gamma_t calculation to the header in SGD

This commit is contained in:
Al
2016-01-11 16:40:50 -05:00
parent 25ae5bed33
commit 7cc201dec3
2 changed files with 4 additions and 9 deletions

View File

@@ -123,12 +123,7 @@ inline bool stochastic_gradient_descent_sparse_finalize_weights(matrix_t *theta,
return true;
}
inline double gamma_t(double gamma_0, double lambda, uint32_t t) {
inline double stochastic_gradient_descent_gamma_t(double gamma_0, double lambda, uint32_t t) {
return gamma_0 / (1.0 + lambda * gamma_0 * (double)t);
}
inline bool stochastic_gradient_descent_scheduled(matrix_t *theta, matrix_t *gradient, double lambda, uint32_t t, double gamma_0) {
double gamma = gamma_t(gamma_0, lambda, t);
return stochastic_gradient_descent(theta, gradient, gamma);
}
}

View File

@@ -20,6 +20,6 @@ bool stochastic_gradient_descent(matrix_t *theta, matrix_t *gradient, double gam
bool stochastic_gradient_descent_sparse(matrix_t *theta, matrix_t *gradient, uint32_array *update_indices, double gamma);
bool stochastic_gradient_descent_sparse_regularize_weights(matrix_t *theta, uint32_array *update_indices, uint32_array *last_updated, uint32_t t, double lambda);
bool stochastic_gradient_descent_sparse_finalize_weights(matrix_t *theta, uint32_array *last_updated, uint32_t t, double lambda);
bool stochastic_gradient_descent_scheduled(matrix_t *theta, matrix_t *gradient, double lambda, uint32_t t, double gamma_0);
double stochastic_gradient_descent_gamma_t(double gamma_0, double lambda, uint32_t t);
#endif