modeling_mpt.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. # coding=utf-8
  2. # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch MPT model."""
  16. import math
  17. from typing import Optional, Tuple, Union
  18. import torch
  19. import torch.utils.checkpoint
  20. from torch import nn
  21. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
  22. from torch.nn import functional as F
  23. from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
  24. from ...generation import GenerationMixin
  25. from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
  26. from ...modeling_outputs import (
  27. BaseModelOutputWithPastAndCrossAttentions,
  28. CausalLMOutputWithCrossAttentions,
  29. QuestionAnsweringModelOutput,
  30. SequenceClassifierOutputWithPast,
  31. TokenClassifierOutput,
  32. )
  33. from ...modeling_utils import PreTrainedModel
  34. from ...utils import logging
  35. from .configuration_mpt import MptConfig
  36. logger = logging.get_logger(__name__)
  37. _CHECKPOINT_FOR_DOC = "mosaicml/mpt-7b"
  38. _CONFIG_FOR_DOC = "MptConfig"
  39. def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None):
  40. r"""
  41. Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it
  42. relies on a translation invariance of softmax for quick implementation. This implementation has been copied from
  43. the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi:
  44. https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292
  45. """
  46. alibi = torch.arange(1 - sequence_length, 1, dtype=torch.int32, device=device).view(1, 1, 1, sequence_length)
  47. num_heads_power_of_2 = 2 ** math.ceil(math.log2(num_heads))
  48. base = torch.arange(1, num_heads_power_of_2 + 1, dtype=torch.int64, device=device).float()
  49. base = base * (alibi_bias_max / num_heads_power_of_2)
  50. slopes = 1.0 / torch.pow(2, base)
  51. slopes = slopes.view(1, num_heads_power_of_2, 1, 1)
  52. if num_heads_power_of_2 != num_heads:
  53. slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...]
  54. alibi = alibi * slopes
  55. return alibi.squeeze(0)
  56. class MptAttention(nn.Module):
  57. """Multi-head self attention.
  58. Using torch or triton attention implemetation enables user to also use additive bias.
  59. """
  60. def __init__(self, config: MptConfig):
  61. super().__init__()
  62. self.hidden_size = config.hidden_size
  63. self.n_heads = config.n_heads
  64. self.max_seq_length = config.max_seq_len
  65. self.head_dim = self.hidden_size // self.n_heads
  66. self.softmax_scale = config.attn_config.softmax_scale
  67. if self.softmax_scale is None:
  68. self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads)
  69. self.attn_dropout_p = config.attn_config.attn_pdrop
  70. self.clip_qkv = config.attn_config.clip_qkv
  71. self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
  72. self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
  73. def forward(
  74. self,
  75. hidden_states: torch.Tensor,
  76. position_bias: torch.Tensor,
  77. past_key_value: Optional[Tuple[torch.Tensor]] = None,
  78. attention_mask: Optional[torch.Tensor] = None,
  79. ):
  80. batch_size, seq_length = hidden_states.shape[:2]
  81. mixed_qkv = self.Wqkv(hidden_states)
  82. if self.clip_qkv:
  83. mixed_qkv = mixed_qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
  84. query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2)
  85. query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
  86. key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
  87. value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
  88. if past_key_value is not None:
  89. if len(past_key_value) != 0:
  90. key_states = torch.cat([past_key_value[0], key_states], dim=2)
  91. value_states = torch.cat([past_key_value[1], value_states], dim=2)
  92. past_key_value = (key_states, value_states)
  93. else:
  94. past_key_value = (key_states, value_states)
  95. attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale
  96. query_length = seq_length if past_key_value is None else seq_length + past_key_value[0].shape[2]
  97. if position_bias is not None:
  98. if len(position_bias.shape) != 3:
  99. raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}")
  100. key_length = key_states.shape[-2]
  101. position_bias_query_index = max(0, position_bias.size(1) - query_length)
  102. position_bias_key_index = max(0, position_bias.size(2) - key_length)
  103. position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:]
  104. attention_scores = attention_scores + position_bias
  105. if attention_mask is not None:
  106. attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min)
  107. # (batch_size, n_heads, seq_length, key_length)
  108. attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype)
  109. attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training)
  110. context_states = torch.matmul(attn_weights, value_states)
  111. context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
  112. attn_output = self.out_proj(context_states)
  113. return attn_output, attn_weights, past_key_value
  114. class MptMLP(nn.Module):
  115. def __init__(self, config: MptConfig):
  116. super().__init__()
  117. hidden_size = config.hidden_size
  118. self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
  119. self.act = nn.GELU(approximate="none")
  120. self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False)
  121. self.hidden_dropout = config.attn_config.attn_pdrop
  122. def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
  123. hidden_states = self.act(self.up_proj(hidden_states))
  124. intermediate_output = self.down_proj(hidden_states)
  125. output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training)
  126. output = output + residual
  127. return output
  128. class MptBlock(nn.Module):
  129. def __init__(self, config: MptConfig):
  130. super().__init__()
  131. hidden_size = config.hidden_size
  132. self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
  133. # backward compatibility with weights on the Hub
  134. self.norm_1.bias = None
  135. self.num_heads = config.n_heads
  136. self.attn = MptAttention(config)
  137. self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
  138. # backward compatibility with weights on the Hub
  139. self.norm_2.bias = None
  140. self.ffn = MptMLP(config)
  141. self.dropout_rate = config.attn_config.attn_pdrop
  142. self.resid_attn_dropout = nn.Dropout(self.dropout_rate)
  143. def forward(
  144. self,
  145. hidden_states: torch.Tensor,
  146. position_bias: torch.Tensor,
  147. attention_mask: torch.Tensor,
  148. layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
  149. use_cache: bool = False,
  150. output_attentions: bool = False,
  151. ):
  152. # hidden_states: [batch_size, seq_length, hidden_size]
  153. # Layer norm at the beginning of the transformer layer.
  154. layernorm_output = self.norm_1(hidden_states)
  155. residual = hidden_states
  156. # Self attention.
  157. attn_outputs, attn_weights, past_key_value = self.attn(
  158. layernorm_output,
  159. position_bias=position_bias,
  160. attention_mask=attention_mask,
  161. past_key_value=layer_past,
  162. )
  163. hidden_states = self.resid_attn_dropout(attn_outputs) + residual
  164. layernorm_output = self.norm_2(hidden_states)
  165. # Get residual
  166. residual = hidden_states
  167. # MLP.
  168. output = self.ffn(layernorm_output, residual)
  169. outputs = (output,)
  170. if use_cache:
  171. outputs += (past_key_value,)
  172. if output_attentions:
  173. outputs += (attn_weights,)
  174. return outputs # hidden_states, present, attentions
  175. class MptPreTrainedModel(PreTrainedModel):
  176. config_class = MptConfig
  177. base_model_prefix = "transformer"
  178. supports_gradient_checkpointing = True
  179. _no_split_modules = ["MptBlock"]
  180. _keys_to_ignore_on_load_missing = [r"lm_head.*."]
  181. def __init__(self, *inputs, **kwargs):
  182. super().__init__(*inputs, **kwargs)
  183. def _init_weights(self, module: nn.Module):
  184. """Initialize the weights."""
  185. if isinstance(module, nn.Linear):
  186. # Slightly different from the TF version which uses truncated_normal for initialization
  187. # cf https://github.com/pytorch/pytorch/pull/5617
  188. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  189. if module.bias is not None:
  190. module.bias.data.zero_()
  191. elif isinstance(module, nn.Embedding):
  192. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  193. if module.padding_idx is not None:
  194. module.weight.data[module.padding_idx].zero_()
  195. elif isinstance(module, LayerNorm):
  196. if module.bias is not None:
  197. module.bias.data.zero_()
  198. module.weight.data.fill_(1.0)
  199. @staticmethod
  200. def _convert_to_mpt_cache(
  201. past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]],
  202. ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
  203. """
  204. Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...]))
  205. """
  206. batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
  207. batch_size_times_num_heads = batch_size * num_heads
  208. # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
  209. # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
  210. return tuple(
  211. (
  212. layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length),
  213. layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim),
  214. )
  215. for layer_past in past_key_value
  216. )
  217. MPT_START_DOCSTRING = r"""
  218. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  219. library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
  220. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  221. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  222. and behavior.
  223. Parameters:
  224. config ([`MptConfig`]): Model configuration class with all the parameters of the model.
  225. Initializing with a config file does not load the weights associated with the model, only the
  226. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  227. """
  228. MPT_INPUTS_DOCSTRING = r"""
  229. Args:
  230. input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
  231. `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
  232. (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
  233. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
  234. `input_ids`.
  235. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  236. [`PreTrainedTokenizer.__call__`] for details.
  237. [What are input IDs?](../glossary#input-ids)
  238. past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
  239. Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
  240. `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
  241. their past given to this model should not be passed as `input_ids` as they have already been computed.
  242. Each element of `past_key_values` is a tuple (past_key, past_value):
  243. - past_key: [batch_size * num_heads, head_dim, kv_length]
  244. - past_value: [batch_size * num_heads, kv_length, head_dim]
  245. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  246. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  247. - 1 for tokens that are **not masked**,
  248. - 0 for tokens that are **masked**.
  249. [What are attention masks?](../glossary#attention-mask)
  250. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  251. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  252. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  253. model's internal embedding lookup matrix.
  254. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
  255. `past_key_values`).
  256. use_cache (`bool`, *optional*):
  257. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  258. `past_key_values`).
  259. output_attentions (`bool`, *optional*):
  260. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  261. tensors for more detail.
  262. output_hidden_states (`bool`, *optional*):
  263. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  264. more detail.
  265. return_dict (`bool`, *optional*):
  266. Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
  267. """
  268. @add_start_docstrings(
  269. "The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.",
  270. MPT_START_DOCSTRING,
  271. )
  272. class MptModel(MptPreTrainedModel):
  273. def __init__(self, config: MptConfig):
  274. super().__init__(config)
  275. self.hidden_size = config.hidden_size
  276. self.num_heads = config.n_heads
  277. # Embedding + LN Embedding
  278. self.wte = nn.Embedding(config.vocab_size, self.hidden_size)
  279. # Transformer blocks
  280. self.blocks = nn.ModuleList([MptBlock(config) for _ in range(config.n_layers)])
  281. # Final Layer Norm
  282. self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
  283. # backward compatibility with weights on the Hub
  284. self.norm_f.bias = None
  285. self.gradient_checkpointing = False
  286. # Initialize weights and apply final processing
  287. self.post_init()
  288. def get_input_embeddings(self):
  289. return self.wte
  290. def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None):
  291. return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device)
  292. def set_input_embeddings(self, new_embeddings: torch.Tensor):
  293. self.wte = new_embeddings
  294. @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
  295. @add_code_sample_docstrings(
  296. checkpoint=_CHECKPOINT_FOR_DOC,
  297. output_type=BaseModelOutputWithPastAndCrossAttentions,
  298. config_class=_CONFIG_FOR_DOC,
  299. )
  300. def forward(
  301. self,
  302. input_ids: Optional[torch.LongTensor] = None,
  303. past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
  304. attention_mask: Optional[torch.Tensor] = None,
  305. inputs_embeds: Optional[torch.LongTensor] = None,
  306. use_cache: Optional[bool] = None,
  307. output_attentions: Optional[bool] = None,
  308. output_hidden_states: Optional[bool] = None,
  309. return_dict: Optional[bool] = None,
  310. ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
  311. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  312. output_hidden_states = (
  313. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  314. )
  315. use_cache = use_cache if use_cache is not None else self.config.use_cache
  316. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  317. if input_ids is not None and inputs_embeds is not None:
  318. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  319. elif input_ids is not None:
  320. batch_size, seq_length = input_ids.shape
  321. elif inputs_embeds is not None:
  322. batch_size, seq_length, _ = inputs_embeds.shape
  323. else:
  324. raise ValueError("You have to specify either input_ids or inputs_embeds")
  325. if past_key_values is None:
  326. past_key_values = tuple([None] * len(self.blocks))
  327. if inputs_embeds is None:
  328. inputs_embeds = self.wte(input_ids)
  329. hidden_states = inputs_embeds
  330. presents = () if use_cache else None
  331. all_self_attentions = () if output_attentions else None
  332. all_hidden_states = () if output_hidden_states else None
  333. if self.gradient_checkpointing and self.training:
  334. if use_cache:
  335. logger.warning_once(
  336. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  337. )
  338. use_cache = False
  339. # Compute alibi tensor: check build_alibi_tensor documentation
  340. seq_length_with_past = seq_length
  341. past_key_values_length = 0
  342. if past_key_values[0] is not None:
  343. past_key_values_length = past_key_values[0][0].shape[2]
  344. seq_length_with_past = seq_length_with_past + past_key_values_length
  345. if attention_mask is None:
  346. attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
  347. else:
  348. attention_mask = attention_mask.to(hidden_states.device)
  349. alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device)
  350. causal_mask = _prepare_4d_causal_attention_mask(
  351. attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
  352. )
  353. causal_mask = causal_mask.bool()
  354. for block, layer_past in zip(self.blocks, past_key_values):
  355. if output_hidden_states:
  356. all_hidden_states = all_hidden_states + (hidden_states,)
  357. if self.gradient_checkpointing and self.training:
  358. outputs = self._gradient_checkpointing_func(
  359. block.__call__,
  360. hidden_states,
  361. alibi,
  362. causal_mask,
  363. layer_past,
  364. use_cache,
  365. output_attentions,
  366. )
  367. else:
  368. outputs = block(
  369. hidden_states,
  370. layer_past=layer_past,
  371. attention_mask=causal_mask,
  372. use_cache=use_cache,
  373. output_attentions=output_attentions,
  374. position_bias=alibi,
  375. )
  376. hidden_states = outputs[0]
  377. if use_cache is True:
  378. presents = presents + (outputs[1],)
  379. if output_attentions:
  380. all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
  381. # Add last hidden state
  382. hidden_states = self.norm_f(hidden_states)
  383. if output_hidden_states:
  384. all_hidden_states = all_hidden_states + (hidden_states,)
  385. if not return_dict:
  386. return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
  387. return BaseModelOutputWithPastAndCrossAttentions(
  388. last_hidden_state=hidden_states,
  389. past_key_values=presents,
  390. hidden_states=all_hidden_states,
  391. attentions=all_self_attentions,
  392. )
  393. @add_start_docstrings(
  394. """
  395. The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
  396. embeddings).
  397. """,
  398. MPT_START_DOCSTRING,
  399. )
  400. class MptForCausalLM(MptPreTrainedModel, GenerationMixin):
  401. _tied_weights_keys = ["lm_head.weight"]
  402. def __init__(self, config: MptConfig):
  403. super().__init__(config)
  404. self.transformer = MptModel(config)
  405. self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
  406. # Initialize weights and apply final processing
  407. self.post_init()
  408. def get_output_embeddings(self):
  409. return self.lm_head
  410. def set_output_embeddings(self, new_embeddings: torch.Tensor):
  411. self.lm_head = new_embeddings
  412. @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
  413. @add_code_sample_docstrings(
  414. checkpoint=_CHECKPOINT_FOR_DOC,
  415. output_type=CausalLMOutputWithCrossAttentions,
  416. config_class=_CONFIG_FOR_DOC,
  417. )
  418. def forward(
  419. self,
  420. input_ids: Optional[torch.LongTensor] = None,
  421. past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
  422. attention_mask: Optional[torch.Tensor] = None,
  423. inputs_embeds: Optional[torch.Tensor] = None,
  424. labels: Optional[torch.Tensor] = None,
  425. use_cache: Optional[bool] = None,
  426. output_attentions: Optional[bool] = None,
  427. output_hidden_states: Optional[bool] = None,
  428. return_dict: Optional[bool] = None,
  429. ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
  430. r"""
  431. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  432. Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
  433. `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
  434. are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
  435. """
  436. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  437. transformer_outputs = self.transformer(
  438. input_ids,
  439. past_key_values=past_key_values,
  440. attention_mask=attention_mask,
  441. inputs_embeds=inputs_embeds,
  442. use_cache=use_cache,
  443. output_attentions=output_attentions,
  444. output_hidden_states=output_hidden_states,
  445. return_dict=return_dict,
  446. )
  447. hidden_states = transformer_outputs[0]
  448. lm_logits = self.lm_head(hidden_states)
  449. loss = None
  450. if labels is not None:
  451. # move labels to correct device to enable model parallelism
  452. labels = labels.to(lm_logits.device)
  453. # Shift so that tokens < n predict n
  454. shift_logits = lm_logits[..., :-1, :].contiguous()
  455. shift_labels = labels[..., 1:].contiguous()
  456. batch_size, seq_length, vocab_size = shift_logits.shape
  457. # Flatten the tokens
  458. loss_fct = CrossEntropyLoss()
  459. loss = loss_fct(
  460. shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
  461. )
  462. if not return_dict:
  463. output = (lm_logits,) + transformer_outputs[1:]
  464. return ((loss,) + output) if loss is not None else output
  465. return CausalLMOutputWithCrossAttentions(
  466. loss=loss,
  467. logits=lm_logits,
  468. past_key_values=transformer_outputs.past_key_values,
  469. hidden_states=transformer_outputs.hidden_states,
  470. attentions=transformer_outputs.attentions,
  471. )
  472. def _reorder_cache(
  473. self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
  474. ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
  475. """
  476. This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
  477. [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
  478. beam_idx at every generation step.
  479. Output shares the same memory storage as `past`.
  480. """
  481. # Get a copy of `beam_idx` on all the devices where we need those indices.
  482. device_to_beam_idx = {
  483. past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
  484. }
  485. reordered_past = tuple(
  486. (
  487. layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
  488. layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
  489. )
  490. for layer_past in past
  491. )
  492. return reordered_past
  493. @add_start_docstrings(
  494. """
  495. The MPT Model transformer with a sequence classification head on top (linear layer).
  496. [`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
  497. (e.g. GPT-1) do.
  498. Since it does classification on the last token, it requires to know the position of the last token. If a
  499. `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
  500. no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
  501. padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
  502. each row of the batch).
  503. """,
  504. MPT_START_DOCSTRING,
  505. )
  506. class MptForSequenceClassification(MptPreTrainedModel):
  507. def __init__(self, config: MptConfig):
  508. super().__init__(config)
  509. self.num_labels = config.num_labels
  510. self.transformer = MptModel(config)
  511. self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
  512. # Initialize weights and apply final processing
  513. self.post_init()
  514. @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
  515. @add_code_sample_docstrings(
  516. checkpoint=_CHECKPOINT_FOR_DOC,
  517. output_type=SequenceClassifierOutputWithPast,
  518. config_class=_CONFIG_FOR_DOC,
  519. )
  520. def forward(
  521. self,
  522. input_ids: Optional[torch.LongTensor] = None,
  523. past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
  524. attention_mask: Optional[torch.Tensor] = None,
  525. inputs_embeds: Optional[torch.Tensor] = None,
  526. labels: Optional[torch.Tensor] = None,
  527. use_cache: Optional[bool] = None,
  528. output_attentions: Optional[bool] = None,
  529. output_hidden_states: Optional[bool] = None,
  530. return_dict: Optional[bool] = None,
  531. ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
  532. r"""
  533. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  534. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  535. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  536. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  537. """
  538. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  539. transformer_outputs = self.transformer(
  540. input_ids,
  541. past_key_values=past_key_values,
  542. attention_mask=attention_mask,
  543. inputs_embeds=inputs_embeds,
  544. use_cache=use_cache,
  545. output_attentions=output_attentions,
  546. output_hidden_states=output_hidden_states,
  547. return_dict=return_dict,
  548. )
  549. hidden_states = transformer_outputs[0]
  550. logits = self.score(hidden_states)
  551. if input_ids is not None:
  552. batch_size = input_ids.shape[0]
  553. else:
  554. batch_size = inputs_embeds.shape[0]
  555. if self.config.pad_token_id is None and batch_size != 1:
  556. raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
  557. if self.config.pad_token_id is None:
  558. sequence_lengths = -1
  559. else:
  560. if input_ids is not None:
  561. # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
  562. sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
  563. sequence_lengths = sequence_lengths % input_ids.shape[-1]
  564. sequence_lengths = sequence_lengths.to(logits.device)
  565. else:
  566. sequence_lengths = -1
  567. logger.warning_once(
  568. f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
  569. "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
  570. )
  571. pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
  572. loss = None
  573. if labels is not None:
  574. if self.config.problem_type is None:
  575. if self.num_labels == 1:
  576. self.config.problem_type = "regression"
  577. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  578. self.config.problem_type = "single_label_classification"
  579. else:
  580. self.config.problem_type = "multi_label_classification"
  581. if self.config.problem_type == "regression":
  582. loss_fct = MSELoss()
  583. if self.num_labels == 1:
  584. loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
  585. else:
  586. loss = loss_fct(pooled_logits, labels)
  587. elif self.config.problem_type == "single_label_classification":
  588. loss_fct = CrossEntropyLoss()
  589. loss = loss_fct(pooled_logits, labels)
  590. elif self.config.problem_type == "multi_label_classification":
  591. loss_fct = BCEWithLogitsLoss()
  592. loss = loss_fct(pooled_logits, labels)
  593. if not return_dict:
  594. output = (pooled_logits,) + transformer_outputs[1:]
  595. return ((loss,) + output) if loss is not None else output
  596. return SequenceClassifierOutputWithPast(
  597. loss=loss,
  598. logits=pooled_logits,
  599. past_key_values=transformer_outputs.past_key_values,
  600. hidden_states=transformer_outputs.hidden_states,
  601. attentions=transformer_outputs.attentions,
  602. )
  603. @add_start_docstrings(
  604. """
  605. MPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  606. Named-Entity-Recognition (NER) tasks.
  607. """,
  608. MPT_START_DOCSTRING,
  609. )
  610. class MptForTokenClassification(MptPreTrainedModel):
  611. def __init__(self, config: MptConfig):
  612. super().__init__(config)
  613. self.num_labels = config.num_labels
  614. self.transformer = MptModel(config)
  615. if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
  616. classifier_dropout = config.classifier_dropout
  617. elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
  618. classifier_dropout = config.hidden_dropout
  619. else:
  620. classifier_dropout = 0.1
  621. self.dropout = nn.Dropout(classifier_dropout)
  622. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  623. # Initialize weights and apply final processing
  624. self.post_init()
  625. @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
  626. @add_code_sample_docstrings(
  627. checkpoint=_CHECKPOINT_FOR_DOC,
  628. output_type=TokenClassifierOutput,
  629. config_class=_CONFIG_FOR_DOC,
  630. )
  631. def forward(
  632. self,
  633. input_ids: Optional[torch.LongTensor] = None,
  634. past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
  635. attention_mask: Optional[torch.Tensor] = None,
  636. inputs_embeds: Optional[torch.Tensor] = None,
  637. labels: Optional[torch.Tensor] = None,
  638. use_cache: Optional[bool] = None,
  639. output_attentions: Optional[bool] = None,
  640. output_hidden_states: Optional[bool] = None,
  641. return_dict: Optional[bool] = None,
  642. **deprecated_arguments,
  643. ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
  644. r"""
  645. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  646. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  647. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  648. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  649. """
  650. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  651. transformer_outputs = self.transformer(
  652. input_ids,
  653. past_key_values=past_key_values,
  654. attention_mask=attention_mask,
  655. inputs_embeds=inputs_embeds,
  656. use_cache=use_cache,
  657. output_attentions=output_attentions,
  658. output_hidden_states=output_hidden_states,
  659. return_dict=return_dict,
  660. )
  661. hidden_states = transformer_outputs[0]
  662. hidden_states = self.dropout(hidden_states)
  663. logits = self.classifier(hidden_states)
  664. loss = None
  665. if labels is not None:
  666. # move labels to correct device to enable model parallelism
  667. labels = labels.to(logits.device)
  668. batch_size, seq_length = labels.shape
  669. loss_fct = CrossEntropyLoss()
  670. loss = loss_fct(
  671. logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
  672. )
  673. if not return_dict:
  674. output = (logits,) + transformer_outputs[2:]
  675. return ((loss,) + output) if loss is not None else output
  676. return TokenClassifierOutput(
  677. loss=loss,
  678. logits=logits,
  679. hidden_states=transformer_outputs.hidden_states,
  680. attentions=transformer_outputs.attentions,
  681. )
  682. @add_start_docstrings(
  683. """
  684. The MPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD
  685. (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  686. """,
  687. MPT_START_DOCSTRING,
  688. )
  689. class MptForQuestionAnswering(MptPreTrainedModel):
  690. def __init__(self, config):
  691. super().__init__(config)
  692. self.transformer = MptModel(config)
  693. self.qa_outputs = nn.Linear(config.hidden_size, 2)
  694. # Initialize weights and apply final processing
  695. self.post_init()
  696. @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  697. def forward(
  698. self,
  699. input_ids: Optional[torch.LongTensor] = None,
  700. attention_mask: Optional[torch.FloatTensor] = None,
  701. inputs_embeds: Optional[torch.FloatTensor] = None,
  702. start_positions: Optional[torch.LongTensor] = None,
  703. end_positions: Optional[torch.LongTensor] = None,
  704. output_attentions: Optional[bool] = None,
  705. output_hidden_states: Optional[bool] = None,
  706. return_dict: Optional[bool] = None,
  707. ) -> Union[Tuple, QuestionAnsweringModelOutput]:
  708. r"""
  709. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  710. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  711. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  712. are not taken into account for computing the loss.
  713. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  714. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  715. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  716. are not taken into account for computing the loss.
  717. """
  718. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  719. outputs = self.transformer(
  720. input_ids,
  721. attention_mask=attention_mask,
  722. inputs_embeds=inputs_embeds,
  723. output_attentions=output_attentions,
  724. output_hidden_states=output_hidden_states,
  725. return_dict=return_dict,
  726. )
  727. sequence_output = outputs[0]
  728. logits = self.qa_outputs(sequence_output)
  729. start_logits, end_logits = logits.split(1, dim=-1)
  730. start_logits = start_logits.squeeze(-1).contiguous()
  731. end_logits = end_logits.squeeze(-1).contiguous()
  732. total_loss = None
  733. if start_positions is not None and end_positions is not None:
  734. # If we are on multi-GPU, split add a dimension
  735. if len(start_positions.size()) > 1:
  736. start_positions = start_positions.squeeze(-1)
  737. if len(end_positions.size()) > 1:
  738. end_positions = end_positions.squeeze(-1)
  739. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  740. ignored_index = start_logits.size(1)
  741. start_positions = start_positions.clamp(0, ignored_index)
  742. end_positions = end_positions.clamp(0, ignored_index)
  743. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  744. start_loss = loss_fct(start_logits, start_positions)
  745. end_loss = loss_fct(end_logits, end_positions)
  746. total_loss = (start_loss + end_loss) / 2
  747. if not return_dict:
  748. output = (start_logits, end_logits) + outputs[2:]
  749. return ((total_loss,) + output) if total_loss is not None else output
  750. return QuestionAnsweringModelOutput(
  751. loss=total_loss,
  752. start_logits=start_logits,
  753. end_logits=end_logits,
  754. hidden_states=outputs.hidden_states,
  755. attentions=outputs.attentions,
  756. )