automatic_speech_recognition.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. # Copyright 2021 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import warnings
  15. from collections import defaultdict
  16. from typing import TYPE_CHECKING, Dict, Optional, Union
  17. import numpy as np
  18. import requests
  19. from ..tokenization_utils import PreTrainedTokenizer
  20. from ..utils import is_torch_available, is_torchaudio_available, logging
  21. from .audio_utils import ffmpeg_read
  22. from .base import ChunkPipeline
  23. if TYPE_CHECKING:
  24. from pyctcdecode import BeamSearchDecoderCTC
  25. from ..feature_extraction_sequence_utils import SequenceFeatureExtractor
  26. from ..modeling_utils import PreTrainedModel
  27. logger = logging.get_logger(__name__)
  28. if is_torch_available():
  29. import torch
  30. from ..models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
  31. def rescale_stride(stride, ratio):
  32. """
  33. Rescales the stride values from audio space to tokens/logits space.
  34. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
  35. """
  36. # Shape is [B, SEQ] for tokens
  37. # [B, SEQ, V] for logits
  38. new_strides = []
  39. for input_n, left, right in stride:
  40. token_n = int(round(input_n * ratio))
  41. left = int(round(left / input_n * token_n))
  42. right = int(round(right / input_n * token_n))
  43. new_stride = (token_n, left, right)
  44. new_strides.append(new_stride)
  45. return new_strides
  46. def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, dtype=None):
  47. inputs_len = inputs.shape[0]
  48. step = chunk_len - stride_left - stride_right
  49. for chunk_start_idx in range(0, inputs_len, step):
  50. chunk_end_idx = chunk_start_idx + chunk_len
  51. chunk = inputs[chunk_start_idx:chunk_end_idx]
  52. processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")
  53. if dtype is not None:
  54. processed = processed.to(dtype=dtype)
  55. _stride_left = 0 if chunk_start_idx == 0 else stride_left
  56. is_last = chunk_end_idx >= inputs_len
  57. _stride_right = 0 if is_last else stride_right
  58. chunk_len = chunk.shape[0]
  59. stride = (chunk_len, _stride_left, _stride_right)
  60. if chunk.shape[0] > _stride_left:
  61. yield {"is_last": is_last, "stride": stride, **processed}
  62. if is_last:
  63. break
  64. def _fast_find_longest_common_sequence(sequence_left, sequence_right):
  65. seq_len_left = len(sequence_left)
  66. seq_len_right = len(sequence_right)
  67. counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)]
  68. longest = 0
  69. for i in range(seq_len_left):
  70. for j in range(seq_len_right):
  71. if sequence_left[i] == sequence_right[j]:
  72. previous_counter = counter[i][j] + 1
  73. counter[i + 1][j + 1] = previous_counter
  74. if previous_counter > longest:
  75. longest = previous_counter
  76. counter = np.array(counter)
  77. # we return the idx of the first element of the longest common sequence in the left sequence
  78. index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1
  79. index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1
  80. return index_left, index_right, longest
  81. def _find_longest_common_sequence(sequences, tokenizer):
  82. # TODO Use a faster algorithm this can probably be done in O(n)
  83. # using suffix array.
  84. # It might be tedious to do because of fault tolerance.
  85. # We actually have a really good property which is that the total sequence
  86. # MUST be those subsequences in order.
  87. # Also the algorithm should be more tolerant to errors.
  88. sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids]
  89. for new_seq in sequences[1:]:
  90. new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids]
  91. index = 0
  92. max_ = 0.0
  93. for i in range(1, len(new_sequence) + 1):
  94. # epsilon to favor long perfect matches
  95. eps = i / 10000.0
  96. matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i]))
  97. matching = matches / i + eps
  98. if matches > 1 and matching > max_:
  99. index = i
  100. max_ = matching
  101. sequence.extend(new_sequence[index:])
  102. return np.array(sequence)
  103. class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
  104. """
  105. Pipeline that aims at extracting spoken text contained within some audio.
  106. The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for
  107. to support multiple audio formats
  108. Example:
  109. ```python
  110. >>> from transformers import pipeline
  111. >>> transcriber = pipeline(model="openai/whisper-base")
  112. >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
  113. {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'}
  114. ```
  115. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
  116. Arguments:
  117. model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
  118. The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
  119. [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
  120. feature_extractor ([`SequenceFeatureExtractor`]):
  121. The feature extractor that will be used by the pipeline to encode waveform for the model.
  122. tokenizer ([`PreTrainedTokenizer`]):
  123. The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
  124. [`PreTrainedTokenizer`].
  125. decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):
  126. [PyCTCDecode's
  127. BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)
  128. can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.
  129. chunk_length_s (`float`, *optional*, defaults to 0):
  130. The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default).
  131. <Tip>
  132. For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking
  133. blog post](https://huggingface.co/blog/asr-chunking).
  134. </Tip>
  135. stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
  136. The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
  137. the model to *see* more context and infer letters better than without this context but the pipeline
  138. discards the stride bits at the end to make the final reconstitution as perfect as possible.
  139. <Tip>
  140. For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking
  141. blog post](https://huggingface.co/blog/asr-chunking).
  142. </Tip>
  143. framework (`str`, *optional*):
  144. The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
  145. installed. If no framework is specified, will default to the one currently installed. If no framework is
  146. specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if
  147. no model is provided.
  148. device (Union[`int`, `torch.device`], *optional*):
  149. Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the
  150. model on the associated CUDA device id.
  151. torch_dtype (Union[`int`, `torch.dtype`], *optional*):
  152. The data-type (dtype) of the computation. Setting this to `None` will use float32 precision. Set to
  153. `torch.float16` or `torch.bfloat16` to use half-precision in the respective dtypes.
  154. """
  155. def __init__(
  156. self,
  157. model: "PreTrainedModel",
  158. feature_extractor: Union["SequenceFeatureExtractor", str] = None,
  159. tokenizer: Optional[PreTrainedTokenizer] = None,
  160. decoder: Optional[Union["BeamSearchDecoderCTC", str]] = None,
  161. device: Union[int, "torch.device"] = None,
  162. torch_dtype: Optional[Union[str, "torch.dtype"]] = None,
  163. **kwargs,
  164. ):
  165. # set the model type so we can check we have the right pre- and post-processing parameters
  166. if model.config.model_type == "whisper":
  167. self.type = "seq2seq_whisper"
  168. elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values():
  169. self.type = "seq2seq"
  170. elif (
  171. feature_extractor._processor_class
  172. and feature_extractor._processor_class.endswith("WithLM")
  173. and decoder is not None
  174. ):
  175. self.decoder = decoder
  176. self.type = "ctc_with_lm"
  177. else:
  178. self.type = "ctc"
  179. super().__init__(model, tokenizer, feature_extractor, device=device, torch_dtype=torch_dtype, **kwargs)
  180. def __call__(
  181. self,
  182. inputs: Union[np.ndarray, bytes, str],
  183. **kwargs,
  184. ):
  185. """
  186. Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`]
  187. documentation for more information.
  188. Args:
  189. inputs (`np.ndarray` or `bytes` or `str` or `dict`):
  190. The inputs is either :
  191. - `str` that is either the filename of a local audio file, or a public URL address to download the
  192. audio file. The file will be read at the correct sampling rate to get the waveform using
  193. *ffmpeg*. This requires *ffmpeg* to be installed on the system.
  194. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
  195. same way.
  196. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
  197. Raw audio at the correct sampling rate (no further check will be done)
  198. - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
  199. pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw":
  200. np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to
  201. treat the first `left` samples and last `right` samples to be ignored in decoding (but used at
  202. inference to provide more context to the model). Only use `stride` with CTC models.
  203. return_timestamps (*optional*, `str` or `bool`):
  204. Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for
  205. other sequence-to-sequence models.
  206. For CTC models, timestamps can take one of two formats:
  207. - `"char"`: the pipeline will return timestamps along the text for every character in the text. For
  208. instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7,
  209. 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before
  210. `0.6` seconds.
  211. - `"word"`: the pipeline will return timestamps along the text for every word in the text. For
  212. instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp":
  213. (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and
  214. before `0.9` seconds.
  215. For the Whisper model, timestamps can take one of two formats:
  216. - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted
  217. through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps
  218. by inspecting the cross-attention weights.
  219. - `True`: the pipeline will return timestamps along the text for *segments* of words in the text.
  220. For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the
  221. model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds.
  222. Note that a segment of text refers to a sequence of one or more words, rather than individual
  223. words as with word-level timestamps.
  224. generate_kwargs (`dict`, *optional*):
  225. The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
  226. complete overview of generate, check the [following
  227. guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation).
  228. Return:
  229. `Dict`: A dictionary with the following keys:
  230. - **text** (`str`): The recognized text.
  231. - **chunks** (*optional(, `List[Dict]`)
  232. When using `return_timestamps`, the `chunks` will become a list containing all the various text
  233. chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text":
  234. "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
  235. `"".join(chunk["text"] for chunk in output["chunks"])`.
  236. """
  237. return super().__call__(inputs, **kwargs)
  238. def _sanitize_parameters(
  239. self,
  240. chunk_length_s=None,
  241. stride_length_s=None,
  242. ignore_warning=None,
  243. decoder_kwargs=None,
  244. return_timestamps=None,
  245. return_language=None,
  246. generate_kwargs=None,
  247. max_new_tokens=None,
  248. ):
  249. # No parameters on this pipeline right now
  250. preprocess_params = {}
  251. if chunk_length_s is not None:
  252. if self.type == "seq2seq" and not ignore_warning:
  253. logger.warning(
  254. "Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily"
  255. " be entirely accurate and will have caveats. More information:"
  256. " https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...,"
  257. " ignore_warning=True)"
  258. )
  259. preprocess_params["chunk_length_s"] = chunk_length_s
  260. if stride_length_s is not None:
  261. preprocess_params["stride_length_s"] = stride_length_s
  262. forward_params = defaultdict(dict)
  263. if max_new_tokens is not None:
  264. warnings.warn(
  265. "`max_new_tokens` is deprecated and will be removed in version 4.49 of Transformers. To remove this warning, pass `max_new_tokens` as a key inside `generate_kwargs` instead.",
  266. FutureWarning,
  267. )
  268. forward_params["max_new_tokens"] = max_new_tokens
  269. if generate_kwargs is not None:
  270. if max_new_tokens is not None and "max_new_tokens" in generate_kwargs:
  271. raise ValueError(
  272. "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use"
  273. " only 1 version"
  274. )
  275. forward_params.update(generate_kwargs)
  276. postprocess_params = {}
  277. if decoder_kwargs is not None:
  278. postprocess_params["decoder_kwargs"] = decoder_kwargs
  279. if return_timestamps is not None:
  280. # Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass
  281. if self.type == "seq2seq" and return_timestamps:
  282. raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!")
  283. if self.type == "ctc_with_lm" and return_timestamps != "word":
  284. raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`")
  285. if self.type == "ctc" and return_timestamps not in ["char", "word"]:
  286. raise ValueError(
  287. "CTC can either predict character level timestamps, or word level timestamps. "
  288. "Set `return_timestamps='char'` or `return_timestamps='word'` as required."
  289. )
  290. if self.type == "seq2seq_whisper" and return_timestamps == "char":
  291. raise ValueError(
  292. "Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
  293. "Use `return_timestamps='word'` or `return_timestamps=True` respectively."
  294. )
  295. forward_params["return_timestamps"] = return_timestamps
  296. postprocess_params["return_timestamps"] = return_timestamps
  297. if return_language is not None:
  298. if self.type != "seq2seq_whisper":
  299. raise ValueError("Only Whisper can return language for now.")
  300. postprocess_params["return_language"] = return_language
  301. return preprocess_params, forward_params, postprocess_params
  302. def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):
  303. if isinstance(inputs, str):
  304. if inputs.startswith("http://") or inputs.startswith("https://"):
  305. # We need to actually check for a real protocol, otherwise it's impossible to use a local file
  306. # like http_huggingface_co.png
  307. inputs = requests.get(inputs).content
  308. else:
  309. with open(inputs, "rb") as f:
  310. inputs = f.read()
  311. if isinstance(inputs, bytes):
  312. inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
  313. stride = None
  314. extra = {}
  315. if isinstance(inputs, dict):
  316. stride = inputs.pop("stride", None)
  317. # Accepting `"array"` which is the key defined in `datasets` for
  318. # better integration
  319. if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
  320. raise ValueError(
  321. "When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "
  322. '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
  323. "containing the sampling_rate associated with that array"
  324. )
  325. _inputs = inputs.pop("raw", None)
  326. if _inputs is None:
  327. # Remove path which will not be used from `datasets`.
  328. inputs.pop("path", None)
  329. _inputs = inputs.pop("array", None)
  330. in_sampling_rate = inputs.pop("sampling_rate")
  331. extra = inputs
  332. inputs = _inputs
  333. if in_sampling_rate != self.feature_extractor.sampling_rate:
  334. if is_torchaudio_available():
  335. from torchaudio import functional as F
  336. else:
  337. raise ImportError(
  338. "torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. "
  339. "The torchaudio package can be installed through: `pip install torchaudio`."
  340. )
  341. inputs = F.resample(
  342. torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate
  343. ).numpy()
  344. ratio = self.feature_extractor.sampling_rate / in_sampling_rate
  345. else:
  346. ratio = 1
  347. if stride is not None:
  348. if stride[0] + stride[1] > inputs.shape[0]:
  349. raise ValueError("Stride is too large for input")
  350. # Stride needs to get the chunk length here, it's going to get
  351. # swallowed by the `feature_extractor` later, and then batching
  352. # can add extra data in the inputs, so we need to keep track
  353. # of the original length in the stride so we can cut properly.
  354. stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
  355. if not isinstance(inputs, np.ndarray):
  356. raise TypeError(f"We expect a numpy ndarray as input, got `{type(inputs)}`")
  357. if len(inputs.shape) != 1:
  358. raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline")
  359. if chunk_length_s:
  360. if stride_length_s is None:
  361. stride_length_s = chunk_length_s / 6
  362. if isinstance(stride_length_s, (int, float)):
  363. stride_length_s = [stride_length_s, stride_length_s]
  364. # XXX: Carefuly, this variable will not exist in `seq2seq` setting.
  365. # Currently chunking is not possible at this level for `seq2seq` so
  366. # it's ok.
  367. align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1)
  368. chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to)
  369. stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to)
  370. stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to)
  371. if chunk_len < stride_left + stride_right:
  372. raise ValueError("Chunk length must be superior to stride length")
  373. for item in chunk_iter(
  374. inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.torch_dtype
  375. ):
  376. yield item
  377. else:
  378. if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples:
  379. processed = self.feature_extractor(
  380. inputs,
  381. sampling_rate=self.feature_extractor.sampling_rate,
  382. truncation=False,
  383. padding="longest",
  384. return_tensors="pt",
  385. return_attention_mask=True,
  386. )
  387. else:
  388. if self.type == "seq2seq_whisper" and stride is None:
  389. processed = self.feature_extractor(
  390. inputs,
  391. sampling_rate=self.feature_extractor.sampling_rate,
  392. return_tensors="pt",
  393. return_token_timestamps=True,
  394. return_attention_mask=True,
  395. )
  396. extra["num_frames"] = processed.pop("num_frames")
  397. else:
  398. processed = self.feature_extractor(
  399. inputs,
  400. sampling_rate=self.feature_extractor.sampling_rate,
  401. return_tensors="pt",
  402. return_attention_mask=True,
  403. )
  404. if self.torch_dtype is not None:
  405. processed = processed.to(dtype=self.torch_dtype)
  406. if stride is not None:
  407. if self.type == "seq2seq":
  408. raise ValueError("Stride is only usable with CTC models, try removing it !")
  409. processed["stride"] = stride
  410. yield {"is_last": True, **processed, **extra}
  411. def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs):
  412. attention_mask = model_inputs.pop("attention_mask", None)
  413. stride = model_inputs.pop("stride", None)
  414. num_frames = model_inputs.pop("num_frames", None)
  415. is_last = model_inputs.pop("is_last")
  416. if stride is not None and num_frames is not None:
  417. raise ValueError("num_frames must be used only when stride is None")
  418. if self.type in {"seq2seq", "seq2seq_whisper"}:
  419. # Consume values so we can let extra information flow freely through
  420. # the pipeline (important for `partial` in microphone)
  421. if "input_features" in model_inputs:
  422. inputs = model_inputs.pop("input_features")
  423. elif "input_values" in model_inputs:
  424. inputs = model_inputs.pop("input_values")
  425. else:
  426. raise ValueError(
  427. "Seq2Seq speech recognition model requires either a "
  428. f"`input_features` or `input_values` key, but only has {model_inputs.keys()}"
  429. )
  430. # custom processing for Whisper timestamps and word-level timestamps
  431. if return_timestamps and self.type == "seq2seq_whisper":
  432. generate_kwargs["return_timestamps"] = return_timestamps
  433. if return_timestamps == "word":
  434. generate_kwargs["return_token_timestamps"] = True
  435. generate_kwargs["return_segments"] = True
  436. if stride is not None:
  437. if isinstance(stride, tuple):
  438. generate_kwargs["num_frames"] = stride[0] // self.feature_extractor.hop_length
  439. else:
  440. generate_kwargs["num_frames"] = [s[0] // self.feature_extractor.hop_length for s in stride]
  441. else:
  442. generate_kwargs["num_frames"] = num_frames
  443. # User-defined `generation_config` passed to the pipeline call take precedence
  444. if "generation_config" not in generate_kwargs:
  445. generate_kwargs["generation_config"] = self.generation_config
  446. tokens = self.model.generate(
  447. inputs=inputs,
  448. attention_mask=attention_mask,
  449. **generate_kwargs,
  450. )
  451. # whisper longform generation stores timestamps in "segments"
  452. if return_timestamps == "word" and self.type == "seq2seq_whisper":
  453. if "segments" not in tokens:
  454. out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]}
  455. else:
  456. token_timestamps = [
  457. torch.cat([segment["token_timestamps"] for segment in segment_list])
  458. for segment_list in tokens["segments"]
  459. ]
  460. out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps}
  461. else:
  462. out = {"tokens": tokens}
  463. if self.type == "seq2seq_whisper":
  464. if stride is not None:
  465. out["stride"] = stride
  466. else:
  467. inputs = {
  468. self.model.main_input_name: model_inputs.pop(self.model.main_input_name),
  469. "attention_mask": attention_mask,
  470. }
  471. outputs = self.model(**inputs)
  472. logits = outputs.logits
  473. if self.type == "ctc_with_lm":
  474. out = {"logits": logits}
  475. else:
  476. out = {"tokens": logits.argmax(dim=-1)}
  477. if stride is not None:
  478. # Send stride to `postprocess`.
  479. # it needs to be handled there where
  480. # the pieces are to be concatenated.
  481. ratio = 1 / self.model.config.inputs_to_logits_ratio
  482. if isinstance(stride, tuple):
  483. out["stride"] = rescale_stride([stride], ratio)[0]
  484. else:
  485. out["stride"] = rescale_stride(stride, ratio)
  486. # Leftover
  487. extra = model_inputs
  488. return {"is_last": is_last, **out, **extra}
  489. def postprocess(
  490. self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None, return_language=None
  491. ):
  492. # Optional return types
  493. optional = {}
  494. final_items = []
  495. key = "logits" if self.type == "ctc_with_lm" else "tokens"
  496. stride = None
  497. for outputs in model_outputs:
  498. if self.framework == "pt" and outputs[key].dtype in (torch.bfloat16, torch.float16):
  499. items = outputs[key].to(torch.float32).numpy()
  500. else:
  501. items = outputs[key].numpy()
  502. stride = outputs.get("stride", None)
  503. if stride is not None and self.type in {"ctc", "ctc_with_lm"}:
  504. total_n, left, right = stride
  505. # Total_n might be < logits.shape[1]
  506. # because of padding, that's why
  507. # we need to reconstruct this information
  508. # This won't work with left padding (which doesn't exist right now)
  509. right_n = total_n - right
  510. items = items[:, left:right_n]
  511. final_items.append(items)
  512. if stride and self.type == "seq2seq":
  513. items = _find_longest_common_sequence(final_items, self.tokenizer)
  514. elif self.type == "seq2seq_whisper":
  515. time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions
  516. # Send the chunking back to seconds, it's easier to handle in whisper
  517. sampling_rate = self.feature_extractor.sampling_rate
  518. for output in model_outputs:
  519. if "stride" in output:
  520. chunk_len, stride_left, stride_right = output["stride"]
  521. # Go back in seconds
  522. chunk_len /= sampling_rate
  523. stride_left /= sampling_rate
  524. stride_right /= sampling_rate
  525. output["stride"] = chunk_len, stride_left, stride_right
  526. text, optional = self.tokenizer._decode_asr(
  527. model_outputs,
  528. return_timestamps=return_timestamps,
  529. return_language=return_language,
  530. time_precision=time_precision,
  531. )
  532. else:
  533. items = np.concatenate(final_items, axis=1)
  534. items = items.squeeze(0)
  535. if self.type == "ctc_with_lm":
  536. if decoder_kwargs is None:
  537. decoder_kwargs = {}
  538. beams = self.decoder.decode_beams(items, **decoder_kwargs)
  539. text = beams[0][0]
  540. if return_timestamps:
  541. # Simply cast from pyctcdecode format to wav2vec2 format to leverage
  542. # pre-existing code later
  543. chunk_offset = beams[0][2]
  544. offsets = []
  545. for word, (start_offset, end_offset) in chunk_offset:
  546. offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
  547. elif self.type != "seq2seq_whisper":
  548. skip_special_tokens = self.type != "ctc"
  549. text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens)
  550. if return_timestamps:
  551. offsets = self.tokenizer.decode(
  552. items, skip_special_tokens=skip_special_tokens, output_char_offsets=True
  553. )["char_offsets"]
  554. if return_timestamps == "word":
  555. offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char)
  556. if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}:
  557. chunks = []
  558. for item in offsets:
  559. start = item["start_offset"] * self.model.config.inputs_to_logits_ratio
  560. start /= self.feature_extractor.sampling_rate
  561. stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio
  562. stop /= self.feature_extractor.sampling_rate
  563. chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)})
  564. optional["chunks"] = chunks
  565. extra = defaultdict(list)
  566. for output in model_outputs:
  567. output.pop("tokens", None)
  568. output.pop("logits", None)
  569. output.pop("is_last", None)
  570. output.pop("stride", None)
  571. output.pop("token_timestamps", None)
  572. for k, v in output.items():
  573. extra[k].append(v)
  574. return {"text": text, **optional, **extra}
  575. def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions):
  576. """
  577. Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since
  578. `WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only
  579. iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is
  580. processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to
  581. properly compute the final `offset`.
  582. """
  583. # index of the first timestamp token
  584. timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1
  585. items = []
  586. # approximation of the token to time ratio : ~0.2seconds
  587. time_precision = feature_extractor.chunk_length / max_source_positions
  588. time = 0
  589. for seq_idx, item in enumerate(sequences):
  590. sequence, stride = item
  591. if isinstance(sequence, list):
  592. sequence = np.array(sequence)
  593. chunk_len, stride_left, stride_right = stride
  594. sequence = sequence.squeeze(0)
  595. # get rid of the `forced_decoder_idx` that are use to parametrize the generation
  596. begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0
  597. sequence = sequence[begin_idx:]
  598. timestamp_tokens = sequence >= timestamp_begin
  599. if seq_idx != 0 and sum(timestamp_tokens) > 0:
  600. consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
  601. last_timestamp = np.where(timestamp_tokens)[0][-1]
  602. consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive
  603. time -= stride_left + stride_right
  604. offset = int((time / feature_extractor.sampling_rate) / time_precision)
  605. overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision)
  606. # relevant timestamps are in the overlapping part
  607. relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0]
  608. if relevant_timestamp.shape[0] > 0:
  609. relevant_timestamp = (
  610. consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0]
  611. )
  612. # if a big stride is used, we need to check some of the previous items for the best overlap
  613. best_match = 0
  614. sliced_sequence = []
  615. for idx, previous_sequence in enumerate(reversed(items)):
  616. previous_tokens = previous_sequence[1:-1]
  617. if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0:
  618. break # the previous sequence is too far in the past
  619. if len(previous_tokens) > 0:
  620. # find the longest common sequence between the overlapping parts
  621. index_left, index_right, match_length = _fast_find_longest_common_sequence(
  622. sequence[1:relevant_timestamp], previous_tokens
  623. )
  624. # don't do anything if only 1 token was matched
  625. if match_length > 1 and match_length > best_match:
  626. best_match = match_length
  627. best_idx = idx
  628. end_of_curr_sequence_idx = (
  629. np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1
  630. )
  631. end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left
  632. # if all the tokens are matched, suffix
  633. if index_left == 0 and match_length == len(previous_tokens):
  634. sliced_sequence = np.insert(
  635. sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0]
  636. )
  637. sliced_sequence[-1] = previous_sequence[-1]
  638. # if part of the previous sequence is not taken
  639. elif index_left >= 0:
  640. sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx]
  641. # let's insert the missing part of the previous sequence
  642. previous_slice = (
  643. previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]]
  644. )
  645. sliced_sequence = np.insert(sliced_sequence, 0, previous_slice)
  646. sliced_sequence[-1] += offset
  647. if len(sliced_sequence) > 0:
  648. items[len(items) - best_idx - 1] = sliced_sequence
  649. items = items[: len(items) - best_idx]
  650. sequence = sequence[end_of_curr_sequence_idx:]
  651. # sequence might have changed
  652. timestamp_tokens = sequence >= timestamp_begin
  653. consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
  654. if sum(timestamp_tokens) > 0:
  655. last_timestamp = np.where(timestamp_tokens)[0][-1]
  656. consecutive = (
  657. np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive
  658. )
  659. if len(consecutive) > 0:
  660. last_slice = 0
  661. for current_slice in consecutive:
  662. actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0]
  663. sliced_tokens = sequence[last_slice:current_slice]
  664. duration = sliced_tokens[-1] - sliced_tokens[0]
  665. sliced_tokens[0] = actual_offset
  666. sliced_tokens[-1] = actual_offset + duration
  667. items.append(sliced_tokens)
  668. last_slice = current_slice
  669. time += chunk_len
  670. result = []
  671. for i in range(len(items)):
  672. result += items[i].tolist()
  673. return result