[optimization] Adding learning rate to lazy sparse update in stochastic gradient descent

This commit is contained in:
Al
2016-01-12 11:02:12 -05:00
parent 79f2b7c192
commit 622dc354e7
3 changed files with 17 additions and 15 deletions

View File

@@ -18,8 +18,9 @@ gamma_t = gamma_0(1 + gamma_0 * lambda * t)^-1
bool stochastic_gradient_descent(matrix_t *theta, matrix_t *gradient, double gamma);
bool stochastic_gradient_descent_sparse(matrix_t *theta, matrix_t *gradient, uint32_array *update_indices, double gamma);
bool stochastic_gradient_descent_sparse_regularize_weights(matrix_t *theta, uint32_array *update_indices, uint32_array *last_updated, uint32_t t, double lambda);
bool stochastic_gradient_descent_sparse_finalize_weights(matrix_t *theta, uint32_array *last_updated, uint32_t t, double lambda);
bool stochastic_gradient_descent_regularize_weights(matrix_t *theta, uint32_array *update_indices, uint32_array *last_updated, uint32_t t, double lambda, double gamma_0);
bool stochastic_gradient_descent_finalize_weights(matrix_t *theta, uint32_array *last_updated, uint32_t t, double lambda, double gamma_0);
double stochastic_gradient_descent_gamma_t(double gamma_0, double lambda, uint32_t t);
#endif