configuration_layoutlm.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. # coding=utf-8
  2. # Copyright 2010, The Microsoft Research Asia LayoutLM Team authors
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """LayoutLM model configuration"""
  16. from collections import OrderedDict
  17. from typing import Any, List, Mapping, Optional
  18. from ... import PretrainedConfig, PreTrainedTokenizer
  19. from ...onnx import OnnxConfig, PatchingSpec
  20. from ...utils import TensorType, is_torch_available, logging
  21. logger = logging.get_logger(__name__)
  22. class LayoutLMConfig(PretrainedConfig):
  23. r"""
  24. This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a
  25. LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
  26. with the defaults will yield a similar configuration to that of the LayoutLM
  27. [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) architecture.
  28. Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
  29. documentation from [`BertConfig`] for more information.
  30. Args:
  31. vocab_size (`int`, *optional*, defaults to 30522):
  32. Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the
  33. *inputs_ids* passed to the forward method of [`LayoutLMModel`].
  34. hidden_size (`int`, *optional*, defaults to 768):
  35. Dimensionality of the encoder layers and the pooler layer.
  36. num_hidden_layers (`int`, *optional*, defaults to 12):
  37. Number of hidden layers in the Transformer encoder.
  38. num_attention_heads (`int`, *optional*, defaults to 12):
  39. Number of attention heads for each attention layer in the Transformer encoder.
  40. intermediate_size (`int`, *optional*, defaults to 3072):
  41. Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
  42. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
  43. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  44. `"relu"`, `"silu"` and `"gelu_new"` are supported.
  45. hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
  46. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  47. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
  48. The dropout ratio for the attention probabilities.
  49. max_position_embeddings (`int`, *optional*, defaults to 512):
  50. The maximum sequence length that this model might ever be used with. Typically set this to something large
  51. just in case (e.g., 512 or 1024 or 2048).
  52. type_vocab_size (`int`, *optional*, defaults to 2):
  53. The vocabulary size of the `token_type_ids` passed into [`LayoutLMModel`].
  54. initializer_range (`float`, *optional*, defaults to 0.02):
  55. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  56. layer_norm_eps (`float`, *optional*, defaults to 1e-12):
  57. The epsilon used by the layer normalization layers.
  58. pad_token_id (`int`, *optional*, defaults to 0):
  59. The value used to pad input_ids.
  60. position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
  61. Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
  62. positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
  63. [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
  64. For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
  65. with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
  66. use_cache (`bool`, *optional*, defaults to `True`):
  67. Whether or not the model should return the last key/values attentions (not used by all models). Only
  68. relevant if `config.is_decoder=True`.
  69. max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
  70. The maximum value that the 2D position embedding might ever used. Typically set this to something large
  71. just in case (e.g., 1024).
  72. Examples:
  73. ```python
  74. >>> from transformers import LayoutLMConfig, LayoutLMModel
  75. >>> # Initializing a LayoutLM configuration
  76. >>> configuration = LayoutLMConfig()
  77. >>> # Initializing a model (with random weights) from the configuration
  78. >>> model = LayoutLMModel(configuration)
  79. >>> # Accessing the model configuration
  80. >>> configuration = model.config
  81. ```"""
  82. model_type = "layoutlm"
  83. def __init__(
  84. self,
  85. vocab_size=30522,
  86. hidden_size=768,
  87. num_hidden_layers=12,
  88. num_attention_heads=12,
  89. intermediate_size=3072,
  90. hidden_act="gelu",
  91. hidden_dropout_prob=0.1,
  92. attention_probs_dropout_prob=0.1,
  93. max_position_embeddings=512,
  94. type_vocab_size=2,
  95. initializer_range=0.02,
  96. layer_norm_eps=1e-12,
  97. pad_token_id=0,
  98. position_embedding_type="absolute",
  99. use_cache=True,
  100. max_2d_position_embeddings=1024,
  101. **kwargs,
  102. ):
  103. super().__init__(pad_token_id=pad_token_id, **kwargs)
  104. self.vocab_size = vocab_size
  105. self.hidden_size = hidden_size
  106. self.num_hidden_layers = num_hidden_layers
  107. self.num_attention_heads = num_attention_heads
  108. self.hidden_act = hidden_act
  109. self.intermediate_size = intermediate_size
  110. self.hidden_dropout_prob = hidden_dropout_prob
  111. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  112. self.max_position_embeddings = max_position_embeddings
  113. self.type_vocab_size = type_vocab_size
  114. self.initializer_range = initializer_range
  115. self.layer_norm_eps = layer_norm_eps
  116. self.position_embedding_type = position_embedding_type
  117. self.use_cache = use_cache
  118. self.max_2d_position_embeddings = max_2d_position_embeddings
  119. class LayoutLMOnnxConfig(OnnxConfig):
  120. def __init__(
  121. self,
  122. config: PretrainedConfig,
  123. task: str = "default",
  124. patching_specs: List[PatchingSpec] = None,
  125. ):
  126. super().__init__(config, task=task, patching_specs=patching_specs)
  127. self.max_2d_positions = config.max_2d_position_embeddings - 1
  128. @property
  129. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  130. return OrderedDict(
  131. [
  132. ("input_ids", {0: "batch", 1: "sequence"}),
  133. ("bbox", {0: "batch", 1: "sequence"}),
  134. ("attention_mask", {0: "batch", 1: "sequence"}),
  135. ("token_type_ids", {0: "batch", 1: "sequence"}),
  136. ]
  137. )
  138. def generate_dummy_inputs(
  139. self,
  140. tokenizer: PreTrainedTokenizer,
  141. batch_size: int = -1,
  142. seq_length: int = -1,
  143. is_pair: bool = False,
  144. framework: Optional[TensorType] = None,
  145. ) -> Mapping[str, Any]:
  146. """
  147. Generate inputs to provide to the ONNX exporter for the specific framework
  148. Args:
  149. tokenizer: The tokenizer associated with this model configuration
  150. batch_size: The batch size (int) to export the model for (-1 means dynamic axis)
  151. seq_length: The sequence length (int) to export the model for (-1 means dynamic axis)
  152. is_pair: Indicate if the input is a pair (sentence 1, sentence 2)
  153. framework: The framework (optional) the tokenizer will generate tensor for
  154. Returns:
  155. Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
  156. """
  157. input_dict = super().generate_dummy_inputs(
  158. tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
  159. )
  160. # Generate a dummy bbox
  161. box = [48, 84, 73, 128]
  162. if not framework == TensorType.PYTORCH:
  163. raise NotImplementedError("Exporting LayoutLM to ONNX is currently only supported for PyTorch.")
  164. if not is_torch_available():
  165. raise ValueError("Cannot generate dummy inputs without PyTorch installed.")
  166. import torch
  167. batch_size, seq_length = input_dict["input_ids"].shape
  168. input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1)
  169. return input_dict