_hash.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. # Author: Lars Buitinck
  2. # License: BSD 3 clause
  3. from itertools import chain
  4. from numbers import Integral
  5. import numpy as np
  6. import scipy.sparse as sp
  7. from ..base import BaseEstimator, TransformerMixin, _fit_context
  8. from ..utils._param_validation import Interval, StrOptions
  9. from ._hashing_fast import transform as _hashing_transform
  10. def _iteritems(d):
  11. """Like d.iteritems, but accepts any collections.Mapping."""
  12. return d.iteritems() if hasattr(d, "iteritems") else d.items()
  13. class FeatureHasher(TransformerMixin, BaseEstimator):
  14. """Implements feature hashing, aka the hashing trick.
  15. This class turns sequences of symbolic feature names (strings) into
  16. scipy.sparse matrices, using a hash function to compute the matrix column
  17. corresponding to a name. The hash function employed is the signed 32-bit
  18. version of Murmurhash3.
  19. Feature names of type byte string are used as-is. Unicode strings are
  20. converted to UTF-8 first, but no Unicode normalization is done.
  21. Feature values must be (finite) numbers.
  22. This class is a low-memory alternative to DictVectorizer and
  23. CountVectorizer, intended for large-scale (online) learning and situations
  24. where memory is tight, e.g. when running prediction code on embedded
  25. devices.
  26. For an efficiency comparision of the different feature extractors, see
  27. :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
  28. Read more in the :ref:`User Guide <feature_hashing>`.
  29. .. versionadded:: 0.13
  30. Parameters
  31. ----------
  32. n_features : int, default=2**20
  33. The number of features (columns) in the output matrices. Small numbers
  34. of features are likely to cause hash collisions, but large numbers
  35. will cause larger coefficient dimensions in linear learners.
  36. input_type : str, default='dict'
  37. Choose a string from {'dict', 'pair', 'string'}.
  38. Either "dict" (the default) to accept dictionaries over
  39. (feature_name, value); "pair" to accept pairs of (feature_name, value);
  40. or "string" to accept single strings.
  41. feature_name should be a string, while value should be a number.
  42. In the case of "string", a value of 1 is implied.
  43. The feature_name is hashed to find the appropriate column for the
  44. feature. The value's sign might be flipped in the output (but see
  45. non_negative, below).
  46. dtype : numpy dtype, default=np.float64
  47. The type of feature values. Passed to scipy.sparse matrix constructors
  48. as the dtype argument. Do not set this to bool, np.boolean or any
  49. unsigned integer type.
  50. alternate_sign : bool, default=True
  51. When True, an alternating sign is added to the features as to
  52. approximately conserve the inner product in the hashed space even for
  53. small n_features. This approach is similar to sparse random projection.
  54. .. versionchanged:: 0.19
  55. ``alternate_sign`` replaces the now deprecated ``non_negative``
  56. parameter.
  57. See Also
  58. --------
  59. DictVectorizer : Vectorizes string-valued features using a hash table.
  60. sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features.
  61. Notes
  62. -----
  63. This estimator is :term:`stateless` and does not need to be fitted.
  64. However, we recommend to call :meth:`fit_transform` instead of
  65. :meth:`transform`, as parameter validation is only performed in
  66. :meth:`fit`.
  67. Examples
  68. --------
  69. >>> from sklearn.feature_extraction import FeatureHasher
  70. >>> h = FeatureHasher(n_features=10)
  71. >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
  72. >>> f = h.transform(D)
  73. >>> f.toarray()
  74. array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
  75. [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
  76. With `input_type="string"`, the input must be an iterable over iterables of
  77. strings:
  78. >>> h = FeatureHasher(n_features=8, input_type="string")
  79. >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]]
  80. >>> f = h.transform(raw_X)
  81. >>> f.toarray()
  82. array([[ 0., 0., 0., -1., 0., -1., 0., 1.],
  83. [ 0., 0., 0., -1., 0., -1., 0., 0.],
  84. [ 0., -1., 0., 0., 0., 0., 0., 1.]])
  85. """
  86. _parameter_constraints: dict = {
  87. "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")],
  88. "input_type": [StrOptions({"dict", "pair", "string"})],
  89. "dtype": "no_validation", # delegate to numpy
  90. "alternate_sign": ["boolean"],
  91. }
  92. def __init__(
  93. self,
  94. n_features=(2**20),
  95. *,
  96. input_type="dict",
  97. dtype=np.float64,
  98. alternate_sign=True,
  99. ):
  100. self.dtype = dtype
  101. self.input_type = input_type
  102. self.n_features = n_features
  103. self.alternate_sign = alternate_sign
  104. @_fit_context(prefer_skip_nested_validation=True)
  105. def fit(self, X=None, y=None):
  106. """Only validates estimator's parameters.
  107. This method allows to: (i) validate the estimator's parameters and
  108. (ii) be consistent with the scikit-learn transformer API.
  109. Parameters
  110. ----------
  111. X : Ignored
  112. Not used, present here for API consistency by convention.
  113. y : Ignored
  114. Not used, present here for API consistency by convention.
  115. Returns
  116. -------
  117. self : object
  118. FeatureHasher class instance.
  119. """
  120. return self
  121. def transform(self, raw_X):
  122. """Transform a sequence of instances to a scipy.sparse matrix.
  123. Parameters
  124. ----------
  125. raw_X : iterable over iterable over raw features, length = n_samples
  126. Samples. Each sample must be iterable an (e.g., a list or tuple)
  127. containing/generating feature names (and optionally values, see
  128. the input_type constructor argument) which will be hashed.
  129. raw_X need not support the len function, so it can be the result
  130. of a generator; n_samples is determined on the fly.
  131. Returns
  132. -------
  133. X : sparse matrix of shape (n_samples, n_features)
  134. Feature matrix, for use with estimators or further transformers.
  135. """
  136. raw_X = iter(raw_X)
  137. if self.input_type == "dict":
  138. raw_X = (_iteritems(d) for d in raw_X)
  139. elif self.input_type == "string":
  140. first_raw_X = next(raw_X)
  141. if isinstance(first_raw_X, str):
  142. raise ValueError(
  143. "Samples can not be a single string. The input must be an iterable"
  144. " over iterables of strings."
  145. )
  146. raw_X_ = chain([first_raw_X], raw_X)
  147. raw_X = (((f, 1) for f in x) for x in raw_X_)
  148. indices, indptr, values = _hashing_transform(
  149. raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
  150. )
  151. n_samples = indptr.shape[0] - 1
  152. if n_samples == 0:
  153. raise ValueError("Cannot vectorize empty sequence.")
  154. X = sp.csr_matrix(
  155. (values, indices, indptr),
  156. dtype=self.dtype,
  157. shape=(n_samples, self.n_features),
  158. )
  159. X.sum_duplicates() # also sorts the indices
  160. return X
  161. def _more_tags(self):
  162. return {"X_types": [self.input_type]}