audio_classification.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. # Copyright 2021 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import subprocess
  15. from typing import Union
  16. import numpy as np
  17. import requests
  18. from ..utils import add_end_docstrings, is_torch_available, is_torchaudio_available, logging
  19. from .base import Pipeline, build_pipeline_init_args
  20. if is_torch_available():
  21. from ..models.auto.modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
  22. logger = logging.get_logger(__name__)
  23. def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
  24. """
  25. Helper function to read an audio file through ffmpeg.
  26. """
  27. ar = f"{sampling_rate}"
  28. ac = "1"
  29. format_for_conversion = "f32le"
  30. ffmpeg_command = [
  31. "ffmpeg",
  32. "-i",
  33. "pipe:0",
  34. "-ac",
  35. ac,
  36. "-ar",
  37. ar,
  38. "-f",
  39. format_for_conversion,
  40. "-hide_banner",
  41. "-loglevel",
  42. "quiet",
  43. "pipe:1",
  44. ]
  45. try:
  46. ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
  47. except FileNotFoundError:
  48. raise ValueError("ffmpeg was not found but is required to load audio files from filename")
  49. output_stream = ffmpeg_process.communicate(bpayload)
  50. out_bytes = output_stream[0]
  51. audio = np.frombuffer(out_bytes, np.float32)
  52. if audio.shape[0] == 0:
  53. raise ValueError("Malformed soundfile")
  54. return audio
  55. @add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True))
  56. class AudioClassificationPipeline(Pipeline):
  57. """
  58. Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
  59. raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
  60. formats.
  61. Example:
  62. ```python
  63. >>> from transformers import pipeline
  64. >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
  65. >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
  66. [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
  67. ```
  68. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
  69. This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
  70. `"audio-classification"`.
  71. See the list of available models on
  72. [huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
  73. """
  74. def __init__(self, *args, **kwargs):
  75. # Default, might be overriden by the model.config.
  76. kwargs["top_k"] = 5
  77. super().__init__(*args, **kwargs)
  78. if self.framework != "pt":
  79. raise ValueError(f"The {self.__class__} is only available in PyTorch.")
  80. self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)
  81. def __call__(
  82. self,
  83. inputs: Union[np.ndarray, bytes, str],
  84. **kwargs,
  85. ):
  86. """
  87. Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
  88. information.
  89. Args:
  90. inputs (`np.ndarray` or `bytes` or `str` or `dict`):
  91. The inputs is either :
  92. - `str` that is the filename of the audio file, the file will be read at the correct sampling rate
  93. to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
  94. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
  95. same way.
  96. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
  97. Raw audio at the correct sampling rate (no further check will be done)
  98. - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
  99. pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
  100. "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
  101. `"array"` is used to denote the raw audio waveform.
  102. top_k (`int`, *optional*, defaults to None):
  103. The number of top labels that will be returned by the pipeline. If the provided number is `None` or
  104. higher than the number of labels available in the model configuration, it will default to the number of
  105. labels.
  106. function_to_apply(`str`, *optional*, defaults to "softmax"):
  107. The function to apply to the model output. By default, the pipeline will apply the softmax function to
  108. the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
  109. built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
  110. post-processing.
  111. Return:
  112. A list of `dict` with the following keys:
  113. - **label** (`str`) -- The label predicted.
  114. - **score** (`float`) -- The corresponding probability.
  115. """
  116. return super().__call__(inputs, **kwargs)
  117. def _sanitize_parameters(self, top_k=None, function_to_apply=None, **kwargs):
  118. # No parameters on this pipeline right now
  119. postprocess_params = {}
  120. if top_k is not None:
  121. if top_k > self.model.config.num_labels:
  122. top_k = self.model.config.num_labels
  123. postprocess_params["top_k"] = top_k
  124. if function_to_apply is not None:
  125. if function_to_apply not in ["softmax", "sigmoid", "none"]:
  126. raise ValueError(
  127. f"Invalid value for `function_to_apply`: {function_to_apply}. "
  128. "Valid options are ['softmax', 'sigmoid', 'none']"
  129. )
  130. postprocess_params["function_to_apply"] = function_to_apply
  131. else:
  132. postprocess_params["function_to_apply"] = "softmax"
  133. return {}, {}, postprocess_params
  134. def preprocess(self, inputs):
  135. if isinstance(inputs, str):
  136. if inputs.startswith("http://") or inputs.startswith("https://"):
  137. # We need to actually check for a real protocol, otherwise it's impossible to use a local file
  138. # like http_huggingface_co.png
  139. inputs = requests.get(inputs).content
  140. else:
  141. with open(inputs, "rb") as f:
  142. inputs = f.read()
  143. if isinstance(inputs, bytes):
  144. inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
  145. if isinstance(inputs, dict):
  146. # Accepting `"array"` which is the key defined in `datasets` for
  147. # better integration
  148. if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
  149. raise ValueError(
  150. "When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "
  151. '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
  152. "containing the sampling_rate associated with that array"
  153. )
  154. _inputs = inputs.pop("raw", None)
  155. if _inputs is None:
  156. # Remove path which will not be used from `datasets`.
  157. inputs.pop("path", None)
  158. _inputs = inputs.pop("array", None)
  159. in_sampling_rate = inputs.pop("sampling_rate")
  160. inputs = _inputs
  161. if in_sampling_rate != self.feature_extractor.sampling_rate:
  162. import torch
  163. if is_torchaudio_available():
  164. from torchaudio import functional as F
  165. else:
  166. raise ImportError(
  167. "torchaudio is required to resample audio samples in AudioClassificationPipeline. "
  168. "The torchaudio package can be installed through: `pip install torchaudio`."
  169. )
  170. inputs = F.resample(
  171. torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate
  172. ).numpy()
  173. if not isinstance(inputs, np.ndarray):
  174. raise TypeError("We expect a numpy ndarray as input")
  175. if len(inputs.shape) != 1:
  176. raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")
  177. processed = self.feature_extractor(
  178. inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
  179. )
  180. return processed
  181. def _forward(self, model_inputs):
  182. model_outputs = self.model(**model_inputs)
  183. return model_outputs
  184. def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"):
  185. if function_to_apply == "softmax":
  186. probs = model_outputs.logits[0].softmax(-1)
  187. elif function_to_apply == "sigmoid":
  188. probs = model_outputs.logits[0].sigmoid()
  189. else:
  190. probs = model_outputs.logits[0]
  191. scores, ids = probs.topk(top_k)
  192. scores = scores.tolist()
  193. ids = ids.tolist()
  194. labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
  195. return labels