_sag.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. """Solvers for Ridge and LogisticRegression using SAG algorithm"""
  2. # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
  3. #
  4. # License: BSD 3 clause
  5. import warnings
  6. import numpy as np
  7. from ..exceptions import ConvergenceWarning
  8. from ..utils import check_array
  9. from ..utils.extmath import row_norms
  10. from ..utils.validation import _check_sample_weight
  11. from ._base import make_dataset
  12. from ._sag_fast import sag32, sag64
  13. def get_auto_step_size(
  14. max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
  15. ):
  16. """Compute automatic step size for SAG solver.
  17. The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
  18. the max sum of squares for over all samples.
  19. Parameters
  20. ----------
  21. max_squared_sum : float
  22. Maximum squared sum of X over samples.
  23. alpha_scaled : float
  24. Constant that multiplies the regularization term, scaled by
  25. 1. / n_samples, the number of samples.
  26. loss : {'log', 'squared', 'multinomial'}
  27. The loss function used in SAG solver.
  28. fit_intercept : bool
  29. Specifies if a constant (a.k.a. bias or intercept) will be
  30. added to the decision function.
  31. n_samples : int, default=None
  32. Number of rows in X. Useful if is_saga=True.
  33. is_saga : bool, default=False
  34. Whether to return step size for the SAGA algorithm or the SAG
  35. algorithm.
  36. Returns
  37. -------
  38. step_size : float
  39. Step size used in SAG solver.
  40. References
  41. ----------
  42. Schmidt, M., Roux, N. L., & Bach, F. (2013).
  43. Minimizing finite sums with the stochastic average gradient
  44. https://hal.inria.fr/hal-00860051/document
  45. :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
  46. "SAGA: A Fast Incremental Gradient Method With Support
  47. for Non-Strongly Convex Composite Objectives" <1407.0202>`
  48. """
  49. if loss in ("log", "multinomial"):
  50. L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
  51. elif loss == "squared":
  52. # inverse Lipschitz constant for squared loss
  53. L = max_squared_sum + int(fit_intercept) + alpha_scaled
  54. else:
  55. raise ValueError(
  56. "Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
  57. % loss
  58. )
  59. if is_saga:
  60. # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
  61. # See Defazio et al. 2014
  62. mun = min(2 * n_samples * alpha_scaled, L)
  63. step = 1.0 / (2 * L + mun)
  64. else:
  65. # SAG theoretical step size is 1/16L but it is recommended to use 1 / L
  66. # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
  67. # slide 65
  68. step = 1.0 / L
  69. return step
  70. def sag_solver(
  71. X,
  72. y,
  73. sample_weight=None,
  74. loss="log",
  75. alpha=1.0,
  76. beta=0.0,
  77. max_iter=1000,
  78. tol=0.001,
  79. verbose=0,
  80. random_state=None,
  81. check_input=True,
  82. max_squared_sum=None,
  83. warm_start_mem=None,
  84. is_saga=False,
  85. ):
  86. """SAG solver for Ridge and LogisticRegression.
  87. SAG stands for Stochastic Average Gradient: the gradient of the loss is
  88. estimated each sample at a time and the model is updated along the way with
  89. a constant learning rate.
  90. IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
  91. same scale. You can normalize the data by using
  92. sklearn.preprocessing.StandardScaler on your data before passing it to the
  93. fit method.
  94. This implementation works with data represented as dense numpy arrays or
  95. sparse scipy arrays of floating point values for the features. It will
  96. fit the data according to squared loss or log loss.
  97. The regularizer is a penalty added to the loss function that shrinks model
  98. parameters towards the zero vector using the squared euclidean norm L2.
  99. .. versionadded:: 0.17
  100. Parameters
  101. ----------
  102. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  103. Training data.
  104. y : ndarray of shape (n_samples,)
  105. Target values. With loss='multinomial', y must be label encoded
  106. (see preprocessing.LabelEncoder).
  107. sample_weight : array-like of shape (n_samples,), default=None
  108. Weights applied to individual samples (1. for unweighted).
  109. loss : {'log', 'squared', 'multinomial'}, default='log'
  110. Loss function that will be optimized:
  111. -'log' is the binary logistic loss, as used in LogisticRegression.
  112. -'squared' is the squared loss, as used in Ridge.
  113. -'multinomial' is the multinomial logistic loss, as used in
  114. LogisticRegression.
  115. .. versionadded:: 0.18
  116. *loss='multinomial'*
  117. alpha : float, default=1.
  118. L2 regularization term in the objective function
  119. ``(0.5 * alpha * || W ||_F^2)``.
  120. beta : float, default=0.
  121. L1 regularization term in the objective function
  122. ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
  123. max_iter : int, default=1000
  124. The max number of passes over the training data if the stopping
  125. criteria is not reached.
  126. tol : float, default=0.001
  127. The stopping criteria for the weights. The iterations will stop when
  128. max(change in weights) / max(weights) < tol.
  129. verbose : int, default=0
  130. The verbosity level.
  131. random_state : int, RandomState instance or None, default=None
  132. Used when shuffling the data. Pass an int for reproducible output
  133. across multiple function calls.
  134. See :term:`Glossary <random_state>`.
  135. check_input : bool, default=True
  136. If False, the input arrays X and y will not be checked.
  137. max_squared_sum : float, default=None
  138. Maximum squared sum of X over samples. If None, it will be computed,
  139. going through all the samples. The value should be precomputed
  140. to speed up cross validation.
  141. warm_start_mem : dict, default=None
  142. The initialization parameters used for warm starting. Warm starting is
  143. currently used in LogisticRegression but not in Ridge.
  144. It contains:
  145. - 'coef': the weight vector, with the intercept in last line
  146. if the intercept is fitted.
  147. - 'gradient_memory': the scalar gradient for all seen samples.
  148. - 'sum_gradient': the sum of gradient over all seen samples,
  149. for each feature.
  150. - 'intercept_sum_gradient': the sum of gradient over all seen
  151. samples, for the intercept.
  152. - 'seen': array of boolean describing the seen samples.
  153. - 'num_seen': the number of seen samples.
  154. is_saga : bool, default=False
  155. Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
  156. better in the first epochs, and allow for l1 regularisation.
  157. Returns
  158. -------
  159. coef_ : ndarray of shape (n_features,)
  160. Weight vector.
  161. n_iter_ : int
  162. The number of full pass on all samples.
  163. warm_start_mem : dict
  164. Contains a 'coef' key with the fitted result, and possibly the
  165. fitted intercept at the end of the array. Contains also other keys
  166. used for warm starting.
  167. Examples
  168. --------
  169. >>> import numpy as np
  170. >>> from sklearn import linear_model
  171. >>> n_samples, n_features = 10, 5
  172. >>> rng = np.random.RandomState(0)
  173. >>> X = rng.randn(n_samples, n_features)
  174. >>> y = rng.randn(n_samples)
  175. >>> clf = linear_model.Ridge(solver='sag')
  176. >>> clf.fit(X, y)
  177. Ridge(solver='sag')
  178. >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
  179. >>> y = np.array([1, 1, 2, 2])
  180. >>> clf = linear_model.LogisticRegression(
  181. ... solver='sag', multi_class='multinomial')
  182. >>> clf.fit(X, y)
  183. LogisticRegression(multi_class='multinomial', solver='sag')
  184. References
  185. ----------
  186. Schmidt, M., Roux, N. L., & Bach, F. (2013).
  187. Minimizing finite sums with the stochastic average gradient
  188. https://hal.inria.fr/hal-00860051/document
  189. :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
  190. "SAGA: A Fast Incremental Gradient Method With Support
  191. for Non-Strongly Convex Composite Objectives" <1407.0202>`
  192. See Also
  193. --------
  194. Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
  195. LogisticRegression, SGDClassifier, LinearSVC, Perceptron
  196. """
  197. if warm_start_mem is None:
  198. warm_start_mem = {}
  199. # Ridge default max_iter is None
  200. if max_iter is None:
  201. max_iter = 1000
  202. if check_input:
  203. _dtype = [np.float64, np.float32]
  204. X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
  205. y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
  206. n_samples, n_features = X.shape[0], X.shape[1]
  207. # As in SGD, the alpha is scaled by n_samples.
  208. alpha_scaled = float(alpha) / n_samples
  209. beta_scaled = float(beta) / n_samples
  210. # if loss == 'multinomial', y should be label encoded.
  211. n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
  212. # initialization
  213. sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
  214. if "coef" in warm_start_mem.keys():
  215. coef_init = warm_start_mem["coef"]
  216. else:
  217. # assume fit_intercept is False
  218. coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
  219. # coef_init contains possibly the intercept_init at the end.
  220. # Note that Ridge centers the data before fitting, so fit_intercept=False.
  221. fit_intercept = coef_init.shape[0] == (n_features + 1)
  222. if fit_intercept:
  223. intercept_init = coef_init[-1, :]
  224. coef_init = coef_init[:-1, :]
  225. else:
  226. intercept_init = np.zeros(n_classes, dtype=X.dtype)
  227. if "intercept_sum_gradient" in warm_start_mem.keys():
  228. intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
  229. else:
  230. intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
  231. if "gradient_memory" in warm_start_mem.keys():
  232. gradient_memory_init = warm_start_mem["gradient_memory"]
  233. else:
  234. gradient_memory_init = np.zeros(
  235. (n_samples, n_classes), dtype=X.dtype, order="C"
  236. )
  237. if "sum_gradient" in warm_start_mem.keys():
  238. sum_gradient_init = warm_start_mem["sum_gradient"]
  239. else:
  240. sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
  241. if "seen" in warm_start_mem.keys():
  242. seen_init = warm_start_mem["seen"]
  243. else:
  244. seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
  245. if "num_seen" in warm_start_mem.keys():
  246. num_seen_init = warm_start_mem["num_seen"]
  247. else:
  248. num_seen_init = 0
  249. dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
  250. if max_squared_sum is None:
  251. max_squared_sum = row_norms(X, squared=True).max()
  252. step_size = get_auto_step_size(
  253. max_squared_sum,
  254. alpha_scaled,
  255. loss,
  256. fit_intercept,
  257. n_samples=n_samples,
  258. is_saga=is_saga,
  259. )
  260. if step_size * alpha_scaled == 1:
  261. raise ZeroDivisionError(
  262. "Current sag implementation does not handle "
  263. "the case step_size * alpha_scaled == 1"
  264. )
  265. sag = sag64 if X.dtype == np.float64 else sag32
  266. num_seen, n_iter_ = sag(
  267. dataset,
  268. coef_init,
  269. intercept_init,
  270. n_samples,
  271. n_features,
  272. n_classes,
  273. tol,
  274. max_iter,
  275. loss,
  276. step_size,
  277. alpha_scaled,
  278. beta_scaled,
  279. sum_gradient_init,
  280. gradient_memory_init,
  281. seen_init,
  282. num_seen_init,
  283. fit_intercept,
  284. intercept_sum_gradient,
  285. intercept_decay,
  286. is_saga,
  287. verbose,
  288. )
  289. if n_iter_ == max_iter:
  290. warnings.warn(
  291. "The max_iter was reached which means the coef_ did not converge",
  292. ConvergenceWarning,
  293. )
  294. if fit_intercept:
  295. coef_init = np.vstack((coef_init, intercept_init))
  296. warm_start_mem = {
  297. "coef": coef_init,
  298. "sum_gradient": sum_gradient_init,
  299. "intercept_sum_gradient": intercept_sum_gradient,
  300. "gradient_memory": gradient_memory_init,
  301. "seen": seen_init,
  302. "num_seen": num_seen,
  303. }
  304. if loss == "multinomial":
  305. coef_ = coef_init.T
  306. else:
  307. coef_ = coef_init[:, 0]
  308. return coef_, n_iter_, warm_start_mem