utils.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
  3. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. import csv
  17. import dataclasses
  18. import json
  19. from dataclasses import dataclass
  20. from typing import List, Optional, Union
  21. from ...utils import is_tf_available, is_torch_available, logging
  22. logger = logging.get_logger(__name__)
  23. @dataclass
  24. class InputExample:
  25. """
  26. A single training/test example for simple sequence classification.
  27. Args:
  28. guid: Unique id for the example.
  29. text_a: string. The untokenized text of the first sequence. For single
  30. sequence tasks, only this sequence must be specified.
  31. text_b: (Optional) string. The untokenized text of the second sequence.
  32. Only must be specified for sequence pair tasks.
  33. label: (Optional) string. The label of the example. This should be
  34. specified for train and dev examples, but not for test examples.
  35. """
  36. guid: str
  37. text_a: str
  38. text_b: Optional[str] = None
  39. label: Optional[str] = None
  40. def to_json_string(self):
  41. """Serializes this instance to a JSON string."""
  42. return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
  43. @dataclass(frozen=True)
  44. class InputFeatures:
  45. """
  46. A single set of features of data. Property names are the same names as the corresponding inputs to a model.
  47. Args:
  48. input_ids: Indices of input sequence tokens in the vocabulary.
  49. attention_mask: Mask to avoid performing attention on padding token indices.
  50. Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
  51. tokens.
  52. token_type_ids: (Optional) Segment token indices to indicate first and second
  53. portions of the inputs. Only some models use them.
  54. label: (Optional) Label corresponding to the input. Int for classification problems,
  55. float for regression problems.
  56. """
  57. input_ids: List[int]
  58. attention_mask: Optional[List[int]] = None
  59. token_type_ids: Optional[List[int]] = None
  60. label: Optional[Union[int, float]] = None
  61. def to_json_string(self):
  62. """Serializes this instance to a JSON string."""
  63. return json.dumps(dataclasses.asdict(self)) + "\n"
  64. class DataProcessor:
  65. """Base class for data converters for sequence classification data sets."""
  66. def get_example_from_tensor_dict(self, tensor_dict):
  67. """
  68. Gets an example from a dict with tensorflow tensors.
  69. Args:
  70. tensor_dict: Keys and values should match the corresponding Glue
  71. tensorflow_dataset examples.
  72. """
  73. raise NotImplementedError()
  74. def get_train_examples(self, data_dir):
  75. """Gets a collection of [`InputExample`] for the train set."""
  76. raise NotImplementedError()
  77. def get_dev_examples(self, data_dir):
  78. """Gets a collection of [`InputExample`] for the dev set."""
  79. raise NotImplementedError()
  80. def get_test_examples(self, data_dir):
  81. """Gets a collection of [`InputExample`] for the test set."""
  82. raise NotImplementedError()
  83. def get_labels(self):
  84. """Gets the list of labels for this data set."""
  85. raise NotImplementedError()
  86. def tfds_map(self, example):
  87. """
  88. Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
  89. examples to the correct format.
  90. """
  91. if len(self.get_labels()) > 1:
  92. example.label = self.get_labels()[int(example.label)]
  93. return example
  94. @classmethod
  95. def _read_tsv(cls, input_file, quotechar=None):
  96. """Reads a tab separated value file."""
  97. with open(input_file, "r", encoding="utf-8-sig") as f:
  98. return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
  99. class SingleSentenceClassificationProcessor(DataProcessor):
  100. """Generic processor for a single sentence classification data set."""
  101. def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
  102. self.labels = [] if labels is None else labels
  103. self.examples = [] if examples is None else examples
  104. self.mode = mode
  105. self.verbose = verbose
  106. def __len__(self):
  107. return len(self.examples)
  108. def __getitem__(self, idx):
  109. if isinstance(idx, slice):
  110. return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
  111. return self.examples[idx]
  112. @classmethod
  113. def create_from_csv(
  114. cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
  115. ):
  116. processor = cls(**kwargs)
  117. processor.add_examples_from_csv(
  118. file_name,
  119. split_name=split_name,
  120. column_label=column_label,
  121. column_text=column_text,
  122. column_id=column_id,
  123. skip_first_row=skip_first_row,
  124. overwrite_labels=True,
  125. overwrite_examples=True,
  126. )
  127. return processor
  128. @classmethod
  129. def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
  130. processor = cls(**kwargs)
  131. processor.add_examples(texts_or_text_and_labels, labels=labels)
  132. return processor
  133. def add_examples_from_csv(
  134. self,
  135. file_name,
  136. split_name="",
  137. column_label=0,
  138. column_text=1,
  139. column_id=None,
  140. skip_first_row=False,
  141. overwrite_labels=False,
  142. overwrite_examples=False,
  143. ):
  144. lines = self._read_tsv(file_name)
  145. if skip_first_row:
  146. lines = lines[1:]
  147. texts = []
  148. labels = []
  149. ids = []
  150. for i, line in enumerate(lines):
  151. texts.append(line[column_text])
  152. labels.append(line[column_label])
  153. if column_id is not None:
  154. ids.append(line[column_id])
  155. else:
  156. guid = f"{split_name}-{i}" if split_name else str(i)
  157. ids.append(guid)
  158. return self.add_examples(
  159. texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
  160. )
  161. def add_examples(
  162. self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
  163. ):
  164. if labels is not None and len(texts_or_text_and_labels) != len(labels):
  165. raise ValueError(
  166. f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
  167. )
  168. if ids is not None and len(texts_or_text_and_labels) != len(ids):
  169. raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
  170. if ids is None:
  171. ids = [None] * len(texts_or_text_and_labels)
  172. if labels is None:
  173. labels = [None] * len(texts_or_text_and_labels)
  174. examples = []
  175. added_labels = set()
  176. for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
  177. if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
  178. text, label = text_or_text_and_label
  179. else:
  180. text = text_or_text_and_label
  181. added_labels.add(label)
  182. examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
  183. # Update examples
  184. if overwrite_examples:
  185. self.examples = examples
  186. else:
  187. self.examples.extend(examples)
  188. # Update labels
  189. if overwrite_labels:
  190. self.labels = list(added_labels)
  191. else:
  192. self.labels = list(set(self.labels).union(added_labels))
  193. return self.examples
  194. def get_features(
  195. self,
  196. tokenizer,
  197. max_length=None,
  198. pad_on_left=False,
  199. pad_token=0,
  200. mask_padding_with_zero=True,
  201. return_tensors=None,
  202. ):
  203. """
  204. Convert examples in a list of `InputFeatures`
  205. Args:
  206. tokenizer: Instance of a tokenizer that will tokenize the examples
  207. max_length: Maximum example length
  208. pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
  209. pad_token: Padding token
  210. mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
  211. and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
  212. values)
  213. Returns:
  214. If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
  215. task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
  216. `InputFeatures` which can be fed to the model.
  217. """
  218. if max_length is None:
  219. max_length = tokenizer.max_len
  220. label_map = {label: i for i, label in enumerate(self.labels)}
  221. all_input_ids = []
  222. for ex_index, example in enumerate(self.examples):
  223. if ex_index % 10000 == 0:
  224. logger.info(f"Tokenizing example {ex_index}")
  225. input_ids = tokenizer.encode(
  226. example.text_a,
  227. add_special_tokens=True,
  228. max_length=min(max_length, tokenizer.max_len),
  229. )
  230. all_input_ids.append(input_ids)
  231. batch_length = max(len(input_ids) for input_ids in all_input_ids)
  232. features = []
  233. for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
  234. if ex_index % 10000 == 0:
  235. logger.info(f"Writing example {ex_index}/{len(self.examples)}")
  236. # The mask has 1 for real tokens and 0 for padding tokens. Only real
  237. # tokens are attended to.
  238. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
  239. # Zero-pad up to the sequence length.
  240. padding_length = batch_length - len(input_ids)
  241. if pad_on_left:
  242. input_ids = ([pad_token] * padding_length) + input_ids
  243. attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
  244. else:
  245. input_ids = input_ids + ([pad_token] * padding_length)
  246. attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
  247. if len(input_ids) != batch_length:
  248. raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
  249. if len(attention_mask) != batch_length:
  250. raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
  251. if self.mode == "classification":
  252. label = label_map[example.label]
  253. elif self.mode == "regression":
  254. label = float(example.label)
  255. else:
  256. raise ValueError(self.mode)
  257. if ex_index < 5 and self.verbose:
  258. logger.info("*** Example ***")
  259. logger.info(f"guid: {example.guid}")
  260. logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
  261. logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
  262. logger.info(f"label: {example.label} (id = {label})")
  263. features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
  264. if return_tensors is None:
  265. return features
  266. elif return_tensors == "tf":
  267. if not is_tf_available():
  268. raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
  269. import tensorflow as tf
  270. def gen():
  271. for ex in features:
  272. yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
  273. dataset = tf.data.Dataset.from_generator(
  274. gen,
  275. ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
  276. ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
  277. )
  278. return dataset
  279. elif return_tensors == "pt":
  280. if not is_torch_available():
  281. raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
  282. import torch
  283. from torch.utils.data import TensorDataset
  284. all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
  285. all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
  286. if self.mode == "classification":
  287. all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
  288. elif self.mode == "regression":
  289. all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
  290. dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
  291. return dataset
  292. else:
  293. raise ValueError("return_tensors should be one of 'tf' or 'pt'")