| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530 |
- # Copyright 2020 The HuggingFace Team. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import json
- import os
- import pickle
- import random
- import time
- import warnings
- from typing import Dict, List, Optional
- import torch
- from filelock import FileLock
- from torch.utils.data import Dataset
- from ...tokenization_utils import PreTrainedTokenizer
- from ...utils import logging
- logger = logging.get_logger(__name__)
- DEPRECATION_WARNING = (
- "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
- "library. You can have a look at this example script for pointers: {0}"
- )
- class TextDataset(Dataset):
- """
- This will be superseded by a framework-agnostic approach soon.
- """
- def __init__(
- self,
- tokenizer: PreTrainedTokenizer,
- file_path: str,
- block_size: int,
- overwrite_cache=False,
- cache_dir: Optional[str] = None,
- ):
- warnings.warn(
- DEPRECATION_WARNING.format(
- "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
- ),
- FutureWarning,
- )
- if os.path.isfile(file_path) is False:
- raise ValueError(f"Input file path {file_path} not found")
- block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
- directory, filename = os.path.split(file_path)
- cached_features_file = os.path.join(
- cache_dir if cache_dir is not None else directory,
- f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
- )
- # Make sure only the first process in distributed training processes the dataset,
- # and the others will use the cache.
- lock_path = cached_features_file + ".lock"
- with FileLock(lock_path):
- if os.path.exists(cached_features_file) and not overwrite_cache:
- start = time.time()
- with open(cached_features_file, "rb") as handle:
- self.examples = pickle.load(handle)
- logger.info(
- f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
- )
- else:
- logger.info(f"Creating features from dataset file at {directory}")
- self.examples = []
- with open(file_path, encoding="utf-8") as f:
- text = f.read()
- tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
- for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
- self.examples.append(
- tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
- )
- # Note that we are losing the last truncated example here for the sake of simplicity (no padding)
- # If your dataset is small, first you should look for a bigger one :-) and second you
- # can change this behavior by adding (model specific) padding.
- start = time.time()
- with open(cached_features_file, "wb") as handle:
- pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
- logger.info(
- f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
- )
- def __len__(self):
- return len(self.examples)
- def __getitem__(self, i) -> torch.Tensor:
- return torch.tensor(self.examples[i], dtype=torch.long)
- class LineByLineTextDataset(Dataset):
- """
- This will be superseded by a framework-agnostic approach soon.
- """
- def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
- warnings.warn(
- DEPRECATION_WARNING.format(
- "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
- ),
- FutureWarning,
- )
- if os.path.isfile(file_path) is False:
- raise ValueError(f"Input file path {file_path} not found")
- # Here, we do not cache the features, operating under the assumption
- # that we will soon use fast multithreaded tokenizers from the
- # `tokenizers` repo everywhere =)
- logger.info(f"Creating features from dataset file at {file_path}")
- with open(file_path, encoding="utf-8") as f:
- lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
- batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
- self.examples = batch_encoding["input_ids"]
- self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
- def __len__(self):
- return len(self.examples)
- def __getitem__(self, i) -> Dict[str, torch.tensor]:
- return self.examples[i]
- class LineByLineWithRefDataset(Dataset):
- """
- This will be superseded by a framework-agnostic approach soon.
- """
- def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
- warnings.warn(
- DEPRECATION_WARNING.format(
- "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
- ),
- FutureWarning,
- )
- if os.path.isfile(file_path) is False:
- raise ValueError(f"Input file path {file_path} not found")
- if os.path.isfile(ref_path) is False:
- raise ValueError(f"Ref file path {file_path} not found")
- # Here, we do not cache the features, operating under the assumption
- # that we will soon use fast multithreaded tokenizers from the
- # `tokenizers` repo everywhere =)
- logger.info(f"Creating features from dataset file at {file_path}")
- logger.info(f"Use ref segment results at {ref_path}")
- with open(file_path, encoding="utf-8") as f:
- data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
- data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
- # Get ref inf from file
- with open(ref_path, encoding="utf-8") as f:
- ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
- if len(data) != len(ref):
- raise ValueError(
- f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
- f"while length of {ref_path} is {len(ref)}"
- )
- batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
- self.examples = batch_encoding["input_ids"]
- self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
- n = len(self.examples)
- for i in range(n):
- self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
- def __len__(self):
- return len(self.examples)
- def __getitem__(self, i) -> Dict[str, torch.tensor]:
- return self.examples[i]
- class LineByLineWithSOPTextDataset(Dataset):
- """
- Dataset for sentence order prediction task, prepare sentence pairs for SOP task
- """
- def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
- warnings.warn(
- DEPRECATION_WARNING.format(
- "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
- ),
- FutureWarning,
- )
- if os.path.isdir(file_dir) is False:
- raise ValueError(f"{file_dir} is not a directory")
- logger.info(f"Creating features from dataset file folder at {file_dir}")
- self.examples = []
- # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
- # file path looks like ./dataset/wiki_1, ./dataset/wiki_2
- for file_name in os.listdir(file_dir):
- file_path = os.path.join(file_dir, file_name)
- if os.path.isfile(file_path) is False:
- raise ValueError(f"{file_path} is not a file")
- article_open = False
- with open(file_path, encoding="utf-8") as f:
- original_lines = f.readlines()
- article_lines = []
- for line in original_lines:
- if "<doc id=" in line:
- article_open = True
- elif "</doc>" in line:
- article_open = False
- document = [
- tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
- for line in article_lines[1:]
- if (len(line) > 0 and not line.isspace())
- ]
- examples = self.create_examples_from_document(document, block_size, tokenizer)
- self.examples.extend(examples)
- article_lines = []
- else:
- if article_open:
- article_lines.append(line)
- logger.info("Dataset parse finished.")
- def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
- """Creates examples for a single document."""
- # Account for special tokens
- max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
- # We *usually* want to fill up the entire sequence since we are padding
- # to `block_size` anyways, so short sequences are generally wasted
- # computation. However, we *sometimes*
- # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
- # sequences to minimize the mismatch between pretraining and fine-tuning.
- # The `target_seq_length` is just a rough target however, whereas
- # `block_size` is a hard limit.
- target_seq_length = max_num_tokens
- if random.random() < short_seq_prob:
- target_seq_length = random.randint(2, max_num_tokens)
- # We DON'T just concatenate all of the tokens from a document into a long
- # sequence and choose an arbitrary split point because this would make the
- # next sentence prediction task too easy. Instead, we split the input into
- # segments "A" and "B" based on the actual "sentences" provided by the user
- # input.
- examples = []
- current_chunk = [] # a buffer stored current working segments
- current_length = 0
- i = 0
- while i < len(document):
- segment = document[i] # get a segment
- if not segment:
- i += 1
- continue
- current_chunk.append(segment) # add a segment to current chunk
- current_length += len(segment) # overall token length
- # if current length goes to the target length or reaches the end of file, start building token a and b
- if i == len(document) - 1 or current_length >= target_seq_length:
- if current_chunk:
- # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
- a_end = 1
- # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
- if len(current_chunk) >= 2:
- a_end = random.randint(1, len(current_chunk) - 1)
- # token a
- tokens_a = []
- for j in range(a_end):
- tokens_a.extend(current_chunk[j])
- # token b
- tokens_b = []
- for j in range(a_end, len(current_chunk)):
- tokens_b.extend(current_chunk[j])
- if len(tokens_a) == 0 or len(tokens_b) == 0:
- continue
- # switch tokens_a and tokens_b randomly
- if random.random() < 0.5:
- is_next = False
- tokens_a, tokens_b = tokens_b, tokens_a
- else:
- is_next = True
- def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
- """Truncates a pair of sequences to a maximum sequence length."""
- while True:
- total_length = len(tokens_a) + len(tokens_b)
- if total_length <= max_num_tokens:
- break
- trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
- if not (len(trunc_tokens) >= 1):
- raise ValueError("Sequence length to be truncated must be no less than one")
- # We want to sometimes truncate from the front and sometimes from the
- # back to add more randomness and avoid biases.
- if random.random() < 0.5:
- del trunc_tokens[0]
- else:
- trunc_tokens.pop()
- truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
- if not (len(tokens_a) >= 1):
- raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
- if not (len(tokens_b) >= 1):
- raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
- # add special tokens
- input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
- # add token type ids, 0 for sentence a, 1 for sentence b
- token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
- example = {
- "input_ids": torch.tensor(input_ids, dtype=torch.long),
- "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
- "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
- }
- examples.append(example)
- current_chunk = [] # clear current chunk
- current_length = 0 # reset current text length
- i += 1 # go to next line
- return examples
- def __len__(self):
- return len(self.examples)
- def __getitem__(self, i) -> Dict[str, torch.tensor]:
- return self.examples[i]
- class TextDatasetForNextSentencePrediction(Dataset):
- """
- This will be superseded by a framework-agnostic approach soon.
- """
- def __init__(
- self,
- tokenizer: PreTrainedTokenizer,
- file_path: str,
- block_size: int,
- overwrite_cache=False,
- short_seq_probability=0.1,
- nsp_probability=0.5,
- ):
- warnings.warn(
- DEPRECATION_WARNING.format(
- "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
- ),
- FutureWarning,
- )
- if not os.path.isfile(file_path):
- raise ValueError(f"Input file path {file_path} not found")
- self.short_seq_probability = short_seq_probability
- self.nsp_probability = nsp_probability
- directory, filename = os.path.split(file_path)
- cached_features_file = os.path.join(
- directory,
- f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
- )
- self.tokenizer = tokenizer
- # Make sure only the first process in distributed training processes the dataset,
- # and the others will use the cache.
- lock_path = cached_features_file + ".lock"
- # Input file format:
- # (1) One sentence per line. These should ideally be actual sentences, not
- # entire paragraphs or arbitrary spans of text. (Because we use the
- # sentence boundaries for the "next sentence prediction" task).
- # (2) Blank lines between documents. Document boundaries are needed so
- # that the "next sentence prediction" task doesn't span between documents.
- #
- # Example:
- # I am very happy.
- # Here is the second sentence.
- #
- # A new document.
- with FileLock(lock_path):
- if os.path.exists(cached_features_file) and not overwrite_cache:
- start = time.time()
- with open(cached_features_file, "rb") as handle:
- self.examples = pickle.load(handle)
- logger.info(
- f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
- )
- else:
- logger.info(f"Creating features from dataset file at {directory}")
- self.documents = [[]]
- with open(file_path, encoding="utf-8") as f:
- while True:
- line = f.readline()
- if not line:
- break
- line = line.strip()
- # Empty lines are used as document delimiters
- if not line and len(self.documents[-1]) != 0:
- self.documents.append([])
- tokens = tokenizer.tokenize(line)
- tokens = tokenizer.convert_tokens_to_ids(tokens)
- if tokens:
- self.documents[-1].append(tokens)
- logger.info(f"Creating examples from {len(self.documents)} documents.")
- self.examples = []
- for doc_index, document in enumerate(self.documents):
- self.create_examples_from_document(document, doc_index, block_size)
- start = time.time()
- with open(cached_features_file, "wb") as handle:
- pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
- logger.info(
- f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
- )
- def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
- """Creates examples for a single document."""
- max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
- # We *usually* want to fill up the entire sequence since we are padding
- # to `block_size` anyways, so short sequences are generally wasted
- # computation. However, we *sometimes*
- # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
- # sequences to minimize the mismatch between pretraining and fine-tuning.
- # The `target_seq_length` is just a rough target however, whereas
- # `block_size` is a hard limit.
- target_seq_length = max_num_tokens
- if random.random() < self.short_seq_probability:
- target_seq_length = random.randint(2, max_num_tokens)
- current_chunk = [] # a buffer stored current working segments
- current_length = 0
- i = 0
- while i < len(document):
- segment = document[i]
- current_chunk.append(segment)
- current_length += len(segment)
- if i == len(document) - 1 or current_length >= target_seq_length:
- if current_chunk:
- # `a_end` is how many segments from `current_chunk` go into the `A`
- # (first) sentence.
- a_end = 1
- if len(current_chunk) >= 2:
- a_end = random.randint(1, len(current_chunk) - 1)
- tokens_a = []
- for j in range(a_end):
- tokens_a.extend(current_chunk[j])
- tokens_b = []
- if len(current_chunk) == 1 or random.random() < self.nsp_probability:
- is_random_next = True
- target_b_length = target_seq_length - len(tokens_a)
- # This should rarely go for more than one iteration for large
- # corpora. However, just to be careful, we try to make sure that
- # the random document is not the same as the document
- # we're processing.
- for _ in range(10):
- random_document_index = random.randint(0, len(self.documents) - 1)
- if random_document_index != doc_index:
- break
- random_document = self.documents[random_document_index]
- random_start = random.randint(0, len(random_document) - 1)
- for j in range(random_start, len(random_document)):
- tokens_b.extend(random_document[j])
- if len(tokens_b) >= target_b_length:
- break
- # We didn't actually use these segments so we "put them back" so
- # they don't go to waste.
- num_unused_segments = len(current_chunk) - a_end
- i -= num_unused_segments
- # Actual next
- else:
- is_random_next = False
- for j in range(a_end, len(current_chunk)):
- tokens_b.extend(current_chunk[j])
- if not (len(tokens_a) >= 1):
- raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
- if not (len(tokens_b) >= 1):
- raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
- # add special tokens
- input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
- # add token type ids, 0 for sentence a, 1 for sentence b
- token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
- example = {
- "input_ids": torch.tensor(input_ids, dtype=torch.long),
- "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
- "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
- }
- self.examples.append(example)
- current_chunk = []
- current_length = 0
- i += 1
- def __len__(self):
- return len(self.examples)
- def __getitem__(self, i):
- return self.examples[i]
|