feature_extraction.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. from typing import Dict
  2. from ..utils import add_end_docstrings
  3. from .base import GenericTensor, Pipeline, build_pipeline_init_args
  4. @add_end_docstrings(
  5. build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
  6. r"""
  7. tokenize_kwargs (`dict`, *optional*):
  8. Additional dictionary of keyword arguments passed along to the tokenizer.
  9. return_tensors (`bool`, *optional*):
  10. If `True`, returns a tensor according to the specified framework, otherwise returns a list.""",
  11. )
  12. class FeatureExtractionPipeline(Pipeline):
  13. """
  14. Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
  15. transformer, which can be used as features in downstream tasks.
  16. Example:
  17. ```python
  18. >>> from transformers import pipeline
  19. >>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
  20. >>> result = extractor("This is a simple test.", return_tensors=True)
  21. >>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
  22. torch.Size([1, 8, 768])
  23. ```
  24. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
  25. This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
  26. `"feature-extraction"`.
  27. All models may be used for this pipeline. See a list of all models, including community-contributed models on
  28. [huggingface.co/models](https://huggingface.co/models).
  29. """
  30. def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
  31. if tokenize_kwargs is None:
  32. tokenize_kwargs = {}
  33. if truncation is not None:
  34. if "truncation" in tokenize_kwargs:
  35. raise ValueError(
  36. "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
  37. )
  38. tokenize_kwargs["truncation"] = truncation
  39. preprocess_params = tokenize_kwargs
  40. postprocess_params = {}
  41. if return_tensors is not None:
  42. postprocess_params["return_tensors"] = return_tensors
  43. return preprocess_params, {}, postprocess_params
  44. def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]:
  45. model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
  46. return model_inputs
  47. def _forward(self, model_inputs):
  48. model_outputs = self.model(**model_inputs)
  49. return model_outputs
  50. def postprocess(self, model_outputs, return_tensors=False):
  51. # [0] is the first available tensor, logits or last_hidden_state.
  52. if return_tensors:
  53. return model_outputs[0]
  54. if self.framework == "pt":
  55. return model_outputs[0].tolist()
  56. elif self.framework == "tf":
  57. return model_outputs[0].numpy().tolist()
  58. def __call__(self, *args, **kwargs):
  59. """
  60. Extract the features of the input(s).
  61. Args:
  62. args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
  63. Return:
  64. A nested list of `float`: The features computed by the model.
  65. """
  66. return super().__call__(*args, **kwargs)