processing_speecht5.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # coding=utf-8
  2. # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Speech processor class for SpeechT5."""
  16. from ...processing_utils import ProcessorMixin
  17. class SpeechT5Processor(ProcessorMixin):
  18. r"""
  19. Constructs a SpeechT5 processor which wraps a feature extractor and a tokenizer into a single processor.
  20. [`SpeechT5Processor`] offers all the functionalities of [`SpeechT5FeatureExtractor`] and [`SpeechT5Tokenizer`]. See
  21. the docstring of [`~SpeechT5Processor.__call__`] and [`~SpeechT5Processor.decode`] for more information.
  22. Args:
  23. feature_extractor (`SpeechT5FeatureExtractor`):
  24. An instance of [`SpeechT5FeatureExtractor`]. The feature extractor is a required input.
  25. tokenizer (`SpeechT5Tokenizer`):
  26. An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input.
  27. """
  28. feature_extractor_class = "SpeechT5FeatureExtractor"
  29. tokenizer_class = "SpeechT5Tokenizer"
  30. def __init__(self, feature_extractor, tokenizer):
  31. super().__init__(feature_extractor, tokenizer)
  32. def __call__(self, *args, **kwargs):
  33. """
  34. Processes audio and text input, as well as audio and text targets.
  35. You can process audio by using the argument `audio`, or process audio targets by using the argument
  36. `audio_target`. This forwards the arguments to SpeechT5FeatureExtractor's
  37. [`~SpeechT5FeatureExtractor.__call__`].
  38. You can process text by using the argument `text`, or process text labels by using the argument `text_target`.
  39. This forwards the arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.__call__`].
  40. Valid input combinations are:
  41. - `text` only
  42. - `audio` only
  43. - `text_target` only
  44. - `audio_target` only
  45. - `text` and `audio_target`
  46. - `audio` and `audio_target`
  47. - `text` and `text_target`
  48. - `audio` and `text_target`
  49. Please refer to the docstring of the above two methods for more information.
  50. """
  51. audio = kwargs.pop("audio", None)
  52. text = kwargs.pop("text", None)
  53. text_target = kwargs.pop("text_target", None)
  54. audio_target = kwargs.pop("audio_target", None)
  55. sampling_rate = kwargs.pop("sampling_rate", None)
  56. if audio is not None and text is not None:
  57. raise ValueError(
  58. "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?"
  59. )
  60. if audio_target is not None and text_target is not None:
  61. raise ValueError(
  62. "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?"
  63. )
  64. if audio is None and audio_target is None and text is None and text_target is None:
  65. raise ValueError(
  66. "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process."
  67. )
  68. if audio is not None:
  69. inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
  70. elif text is not None:
  71. inputs = self.tokenizer(text, **kwargs)
  72. else:
  73. inputs = None
  74. if audio_target is not None:
  75. targets = self.feature_extractor(audio_target=audio_target, *args, sampling_rate=sampling_rate, **kwargs)
  76. labels = targets["input_values"]
  77. elif text_target is not None:
  78. targets = self.tokenizer(text_target, **kwargs)
  79. labels = targets["input_ids"]
  80. else:
  81. targets = None
  82. if inputs is None:
  83. return targets
  84. if targets is not None:
  85. inputs["labels"] = labels
  86. decoder_attention_mask = targets.get("attention_mask")
  87. if decoder_attention_mask is not None:
  88. inputs["decoder_attention_mask"] = decoder_attention_mask
  89. return inputs
  90. def pad(self, *args, **kwargs):
  91. """
  92. Collates the audio and text inputs, as well as their targets, into a padded batch.
  93. Audio inputs are padded by SpeechT5FeatureExtractor's [`~SpeechT5FeatureExtractor.pad`]. Text inputs are padded
  94. by SpeechT5Tokenizer's [`~SpeechT5Tokenizer.pad`].
  95. Valid input combinations are:
  96. - `input_ids` only
  97. - `input_values` only
  98. - `labels` only, either log-mel spectrograms or text tokens
  99. - `input_ids` and log-mel spectrogram `labels`
  100. - `input_values` and text `labels`
  101. Please refer to the docstring of the above two methods for more information.
  102. """
  103. input_values = kwargs.pop("input_values", None)
  104. input_ids = kwargs.pop("input_ids", None)
  105. labels = kwargs.pop("labels", None)
  106. if input_values is not None and input_ids is not None:
  107. raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
  108. if input_values is None and input_ids is None and labels is None:
  109. raise ValueError(
  110. "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded."
  111. )
  112. if input_values is not None:
  113. inputs = self.feature_extractor.pad(input_values, *args, **kwargs)
  114. elif input_ids is not None:
  115. inputs = self.tokenizer.pad(input_ids, **kwargs)
  116. else:
  117. inputs = None
  118. if labels is not None:
  119. if "input_ids" in labels or (isinstance(labels, list) and "input_ids" in labels[0]):
  120. targets = self.tokenizer.pad(labels, **kwargs)
  121. labels = targets["input_ids"]
  122. else:
  123. feature_size_hack = self.feature_extractor.feature_size
  124. self.feature_extractor.feature_size = self.feature_extractor.num_mel_bins
  125. targets = self.feature_extractor.pad(labels, *args, **kwargs)
  126. self.feature_extractor.feature_size = feature_size_hack
  127. labels = targets["input_values"]
  128. else:
  129. targets = None
  130. if inputs is None:
  131. return targets
  132. if targets is not None:
  133. inputs["labels"] = labels
  134. decoder_attention_mask = targets.get("attention_mask")
  135. if decoder_attention_mask is not None:
  136. inputs["decoder_attention_mask"] = decoder_attention_mask
  137. return inputs
  138. def batch_decode(self, *args, **kwargs):
  139. """
  140. This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.batch_decode`]. Please refer
  141. to the docstring of this method for more information.
  142. """
  143. return self.tokenizer.batch_decode(*args, **kwargs)
  144. def decode(self, *args, **kwargs):
  145. """
  146. This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.decode`]. Please refer to
  147. the docstring of this method for more information.
  148. """
  149. return self.tokenizer.decode(*args, **kwargs)