modeling_perceiver.py 145 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501
  1. # coding=utf-8
  2. # Copyright 2021 Deepmind and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch Perceiver model."""
  16. import abc
  17. import math
  18. from dataclasses import dataclass
  19. from functools import reduce
  20. from operator import __add__
  21. from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
  22. import numpy as np
  23. import torch
  24. import torch.utils.checkpoint
  25. from torch import nn
  26. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  27. from ...activations import ACT2FN
  28. from ...modeling_outputs import BaseModelOutputWithCrossAttentions
  29. from ...modeling_utils import PreTrainedModel
  30. from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
  31. from ...utils import (
  32. ModelOutput,
  33. add_start_docstrings,
  34. add_start_docstrings_to_model_forward,
  35. logging,
  36. replace_return_docstrings,
  37. torch_int,
  38. )
  39. from .configuration_perceiver import PerceiverConfig
  40. ModalitySizeType = Mapping[str, int]
  41. PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
  42. PreprocessorType = Callable[..., PreprocessorOutputType]
  43. PostprocessorType = Callable[..., Any]
  44. logger = logging.get_logger(__name__)
  45. _CHECKPOINT_FOR_DOC = "deepmind/language-perceiver"
  46. _CONFIG_FOR_DOC = "PerceiverConfig"
  47. @dataclass
  48. class PerceiverModelOutput(ModelOutput):
  49. """
  50. Base class for Perceiver base model's outputs, with potential hidden states, attentions and cross-attentions.
  51. Args:
  52. logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
  53. Classification (or regression if config.num_labels==1) scores (before SoftMax).
  54. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  55. Sequence of hidden-states at the output of the last layer of the model.
  56. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  57. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  58. shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
  59. plus the initial embedding outputs.
  60. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  61. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  62. sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
  63. the self-attention heads.
  64. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  65. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  66. sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
  67. used to compute the weighted average in the cross-attention heads.
  68. """
  69. logits: torch.FloatTensor = None
  70. last_hidden_state: torch.FloatTensor = None
  71. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  72. attentions: Optional[Tuple[torch.FloatTensor]] = None
  73. cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
  74. @dataclass
  75. class PerceiverDecoderOutput(ModelOutput):
  76. """
  77. Base class for Perceiver decoder outputs, with potential cross-attentions.
  78. Args:
  79. logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
  80. Output of the basic decoder.
  81. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  82. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  83. sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
  84. used to compute the weighted average in the cross-attention heads.
  85. """
  86. logits: torch.FloatTensor = None
  87. cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
  88. @dataclass
  89. class PerceiverMaskedLMOutput(ModelOutput):
  90. """
  91. Base class for Perceiver's masked language model outputs.
  92. Args:
  93. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
  94. Masked language modeling (MLM) loss.
  95. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  96. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  97. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  98. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  99. shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
  100. plus the initial embedding outputs.
  101. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  102. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_latents,
  103. num_latents)`. Attentions weights after the attention softmax, used to compute the weighted average in the
  104. self-attention heads.
  105. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  106. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  107. sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
  108. used to compute the weighted average in the cross-attention heads.
  109. """
  110. loss: Optional[torch.FloatTensor] = None
  111. logits: torch.FloatTensor = None
  112. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  113. attentions: Optional[Tuple[torch.FloatTensor]] = None
  114. cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
  115. @dataclass
  116. class PerceiverClassifierOutput(ModelOutput):
  117. """
  118. Base class for Perceiver's outputs of sequence/image classification models, optical flow and multimodal
  119. autoencoding.
  120. Args:
  121. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
  122. Classification (or regression if config.num_labels==1) loss.
  123. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
  124. Classification (or regression if config.num_labels==1) scores (before SoftMax).
  125. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  126. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  127. shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
  128. plus the initial embedding outputs.
  129. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  130. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  131. sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
  132. the self-attention heads.
  133. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  134. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  135. sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
  136. used to compute the weighted average in the cross-attention heads.
  137. """
  138. loss: Optional[torch.FloatTensor] = None
  139. logits: torch.FloatTensor = None
  140. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  141. attentions: Optional[Tuple[torch.FloatTensor]] = None
  142. cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
  143. class PerceiverEmbeddings(nn.Module):
  144. """Construct the latent embeddings."""
  145. def __init__(self, config):
  146. super().__init__()
  147. self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
  148. def forward(self, batch_size: int):
  149. return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
  150. class PerceiverSelfAttention(nn.Module):
  151. """Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder."""
  152. def __init__(
  153. self,
  154. config,
  155. is_cross_attention=False,
  156. qk_channels=None,
  157. v_channels=None,
  158. num_heads=1,
  159. q_dim=None,
  160. kv_dim=None,
  161. ):
  162. super().__init__()
  163. self.num_heads = num_heads
  164. # Q and K must have the same number of channels.
  165. # Default to preserving Q's input's shape.
  166. if qk_channels is None:
  167. qk_channels = q_dim
  168. # V's num_channels determines the shape of the output of QKV-attention.
  169. # Default to the same number of channels used in the key-query operation.
  170. if v_channels is None:
  171. v_channels = qk_channels
  172. if qk_channels % num_heads != 0:
  173. raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).")
  174. if v_channels % num_heads != 0:
  175. raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).")
  176. self.qk_channels = qk_channels
  177. self.v_channels = v_channels
  178. self.qk_channels_per_head = self.qk_channels // num_heads
  179. self.v_channels_per_head = self.v_channels // num_heads
  180. # Layer normalization
  181. self.layernorm1 = nn.LayerNorm(q_dim)
  182. self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
  183. # Projection matrices
  184. self.query = nn.Linear(q_dim, qk_channels)
  185. self.key = nn.Linear(kv_dim, qk_channels)
  186. self.value = nn.Linear(kv_dim, v_channels)
  187. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  188. def transpose_for_scores(self, x, channels_per_head):
  189. new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head)
  190. x = x.view(*new_x_shape)
  191. return x.permute(0, 2, 1, 3)
  192. def forward(
  193. self,
  194. hidden_states: torch.Tensor,
  195. attention_mask: Optional[torch.FloatTensor] = None,
  196. head_mask: Optional[torch.FloatTensor] = None,
  197. inputs: Optional[torch.FloatTensor] = None,
  198. inputs_mask: Optional[torch.FloatTensor] = None,
  199. output_attentions: Optional[bool] = False,
  200. ) -> Tuple[torch.Tensor]:
  201. hidden_states = self.layernorm1(hidden_states)
  202. inputs = self.layernorm2(inputs)
  203. # Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module,
  204. # the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to.
  205. is_cross_attention = inputs is not None
  206. queries = self.query(hidden_states)
  207. if is_cross_attention:
  208. keys = self.key(inputs)
  209. values = self.value(inputs)
  210. attention_mask = inputs_mask
  211. else:
  212. keys = self.key(hidden_states)
  213. values = self.value(hidden_states)
  214. # Reshape channels for multi-head attention.
  215. # We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head)
  216. queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
  217. keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
  218. values = self.transpose_for_scores(values, self.v_channels_per_head)
  219. # Take the dot product between the queries and keys to get the raw attention scores.
  220. attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
  221. batch_size, num_heads, seq_len, q_head_dim = queries.shape
  222. _, _, _, v_head_dim = values.shape
  223. hiddens = self.num_heads * v_head_dim
  224. attention_scores = attention_scores / math.sqrt(q_head_dim)
  225. if attention_mask is not None:
  226. # Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
  227. attention_scores = attention_scores + attention_mask
  228. # Normalize the attention scores to probabilities.
  229. attention_probs = nn.Softmax(dim=-1)(attention_scores)
  230. # This is actually dropping out entire tokens to attend to, which might
  231. # seem a bit unusual, but is taken from the original Transformer paper.
  232. attention_probs = self.dropout(attention_probs)
  233. # Mask heads if we want to
  234. if head_mask is not None:
  235. attention_probs = attention_probs * head_mask
  236. context_layer = torch.matmul(attention_probs, values)
  237. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  238. new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
  239. context_layer = context_layer.view(*new_context_layer_shape)
  240. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  241. return outputs
  242. class PerceiverSelfOutput(nn.Module):
  243. def __init__(self, config, input_channels, output_channels):
  244. super().__init__()
  245. self.dense = nn.Linear(input_channels, output_channels)
  246. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  247. hidden_states = self.dense(hidden_states)
  248. return hidden_states
  249. class PerceiverAttention(nn.Module):
  250. """Attention module, including a dense block."""
  251. def __init__(
  252. self,
  253. config,
  254. is_cross_attention=False,
  255. qk_channels=None,
  256. v_channels=None,
  257. num_heads=1,
  258. q_dim=None,
  259. kv_dim=None,
  260. use_query_residual=True,
  261. ):
  262. super().__init__()
  263. # MultiHead attention
  264. if is_cross_attention and qk_channels is None:
  265. if config.cross_attention_shape_for_attention == "q":
  266. qk_channels = q_dim
  267. elif config.cross_attention_shape_for_attention == "kv":
  268. qk_channels = kv_dim
  269. else:
  270. raise ValueError(
  271. f"Unknown value {config.cross_attention_shape_for_attention} for "
  272. "cross_attention_shape_for_attention."
  273. )
  274. else:
  275. if qk_channels is None:
  276. qk_channels = q_dim
  277. if v_channels is None:
  278. v_channels = qk_channels
  279. self.self = PerceiverSelfAttention(
  280. config,
  281. is_cross_attention=is_cross_attention,
  282. qk_channels=qk_channels,
  283. v_channels=v_channels,
  284. num_heads=num_heads,
  285. q_dim=q_dim,
  286. kv_dim=kv_dim,
  287. )
  288. # dense block
  289. output_channels = None
  290. if is_cross_attention:
  291. output_channels = q_dim
  292. else:
  293. if output_channels is None:
  294. output_channels = v_channels
  295. self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
  296. self.use_query_residual = use_query_residual
  297. self.pruned_heads = set()
  298. def prune_heads(self, heads):
  299. if len(heads) == 0:
  300. return
  301. heads, index = find_pruneable_heads_and_indices(
  302. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  303. )
  304. # Prune linear layers
  305. self.self.query = prune_linear_layer(self.self.query, index)
  306. self.self.key = prune_linear_layer(self.self.key, index)
  307. self.self.value = prune_linear_layer(self.self.value, index)
  308. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  309. # Update hyper params and store pruned heads
  310. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  311. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  312. self.pruned_heads = self.pruned_heads.union(heads)
  313. def forward(
  314. self,
  315. hidden_states: torch.Tensor,
  316. attention_mask: Optional[torch.FloatTensor] = None,
  317. head_mask: Optional[torch.FloatTensor] = None,
  318. inputs: Optional[torch.FloatTensor] = None,
  319. inputs_mask: Optional[torch.FloatTensor] = None,
  320. output_attentions: Optional[bool] = False,
  321. ) -> Tuple[torch.Tensor]:
  322. self_outputs = self.self(
  323. hidden_states,
  324. attention_mask,
  325. head_mask,
  326. inputs,
  327. inputs_mask,
  328. output_attentions,
  329. )
  330. # Output projection
  331. attention_output = self.output(self_outputs[0])
  332. # Optionally include a residual to the original queries.
  333. # Consider omitting the residual if the semantics of query and output
  334. # are different, e.g. if queries are positions and outputs are pixels.
  335. if self.use_query_residual:
  336. attention_output = attention_output + hidden_states
  337. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  338. return outputs
  339. class PerceiverMLP(nn.Module):
  340. """A Transformer-style dense module to follow attention."""
  341. def __init__(self, config, input_size, widening_factor):
  342. super().__init__()
  343. self.dense1 = nn.Linear(input_size, widening_factor * input_size)
  344. if isinstance(config.hidden_act, str):
  345. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  346. else:
  347. self.intermediate_act_fn = config.hidden_act
  348. self.dense2 = nn.Linear(widening_factor * input_size, input_size)
  349. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  350. hidden_states = self.dense1(hidden_states)
  351. hidden_states = self.intermediate_act_fn(hidden_states)
  352. hidden_states = self.dense2(hidden_states)
  353. return hidden_states
  354. class PerceiverLayer(nn.Module):
  355. def __init__(
  356. self,
  357. config,
  358. is_cross_attention=False,
  359. qk_channels=None,
  360. v_channels=None,
  361. num_heads=1,
  362. q_dim=None,
  363. kv_dim=None,
  364. widening_factor=4,
  365. use_query_residual=True,
  366. ):
  367. super().__init__()
  368. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  369. self.seq_len_dim = 1
  370. self.attention = PerceiverAttention(
  371. config,
  372. is_cross_attention=is_cross_attention,
  373. qk_channels=qk_channels,
  374. v_channels=v_channels,
  375. num_heads=num_heads,
  376. q_dim=q_dim,
  377. kv_dim=kv_dim,
  378. use_query_residual=use_query_residual,
  379. )
  380. self.layernorm = nn.LayerNorm(q_dim)
  381. self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor)
  382. def forward(
  383. self,
  384. hidden_states: torch.Tensor,
  385. attention_mask: Optional[torch.FloatTensor] = None,
  386. head_mask: Optional[torch.FloatTensor] = None,
  387. inputs: Optional[torch.FloatTensor] = None,
  388. inputs_mask: Optional[torch.FloatTensor] = None,
  389. output_attentions: Optional[bool] = False,
  390. ) -> Tuple[torch.Tensor]:
  391. attention_outputs = self.attention(
  392. hidden_states,
  393. attention_mask,
  394. head_mask,
  395. inputs,
  396. inputs_mask,
  397. output_attentions,
  398. )
  399. attention_output = attention_outputs[0]
  400. outputs = attention_outputs[1:] # add attentions if we output attention weights
  401. layer_output = apply_chunking_to_forward(
  402. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  403. )
  404. layer_output = layer_output + attention_output # residual connection
  405. outputs = (layer_output,) + outputs
  406. return outputs
  407. def feed_forward_chunk(self, attention_output):
  408. layer_output = self.layernorm(attention_output)
  409. layer_output = self.mlp(layer_output)
  410. return layer_output
  411. class PerceiverEncoder(nn.Module):
  412. """The Perceiver Encoder: a scalable, fully attentional encoder."""
  413. def __init__(self, config, kv_dim=None):
  414. super().__init__()
  415. self.config = config
  416. # Check that we can use multihead-attention with these shapes.
  417. if config.d_latents % config.num_self_attention_heads != 0:
  418. raise ValueError(
  419. f"num_z_channels ({config.d_latents}) must be divisible by"
  420. f" num_self_attend_heads ({config.num_self_attention_heads})."
  421. )
  422. if config.d_latents % config.num_cross_attention_heads != 0:
  423. raise ValueError(
  424. f"num_z_channels ({config.d_latents}) must be divisible by"
  425. f" num_cross_attend_heads ({config.num_cross_attention_heads})."
  426. )
  427. # Construct the cross attention layer.
  428. self.cross_attention = PerceiverLayer(
  429. config,
  430. is_cross_attention=True,
  431. qk_channels=config.qk_channels,
  432. v_channels=config.v_channels,
  433. num_heads=config.num_cross_attention_heads,
  434. q_dim=config.d_latents,
  435. kv_dim=kv_dim,
  436. widening_factor=config.cross_attention_widening_factor,
  437. use_query_residual=config.use_query_residual,
  438. )
  439. # Construct a single block of self-attention layers.
  440. # We get deeper architectures by applying this block more than once.
  441. self_attention_layers = []
  442. for _ in range(config.num_self_attends_per_block):
  443. layer = PerceiverLayer(
  444. config,
  445. is_cross_attention=False,
  446. qk_channels=config.qk_channels,
  447. v_channels=config.v_channels,
  448. num_heads=config.num_self_attention_heads,
  449. q_dim=config.d_latents,
  450. kv_dim=config.d_latents,
  451. widening_factor=config.self_attention_widening_factor,
  452. )
  453. self_attention_layers.append(layer)
  454. self.self_attends = nn.ModuleList(self_attention_layers)
  455. def forward(
  456. self,
  457. hidden_states: torch.Tensor,
  458. attention_mask: Optional[torch.FloatTensor] = None,
  459. head_mask: Optional[torch.FloatTensor] = None,
  460. inputs: Optional[torch.FloatTensor] = None,
  461. inputs_mask: Optional[torch.FloatTensor] = None,
  462. output_attentions: Optional[bool] = False,
  463. output_hidden_states: Optional[bool] = False,
  464. return_dict: Optional[bool] = True,
  465. ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
  466. all_hidden_states = () if output_hidden_states else None
  467. all_self_attentions = () if output_attentions else None
  468. all_cross_attentions = () if output_attentions else None
  469. # Apply the cross-attention between the latents (hidden_states) and inputs:
  470. layer_outputs = self.cross_attention(
  471. hidden_states,
  472. attention_mask=attention_mask,
  473. head_mask=None,
  474. inputs=inputs,
  475. inputs_mask=inputs_mask,
  476. output_attentions=output_attentions,
  477. )
  478. hidden_states = layer_outputs[0]
  479. if output_attentions:
  480. all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
  481. # Apply the block of self-attention layers more than once:
  482. for _ in range(self.config.num_blocks):
  483. for i, layer_module in enumerate(self.self_attends):
  484. if output_hidden_states:
  485. all_hidden_states = all_hidden_states + (hidden_states,)
  486. layer_head_mask = head_mask[i] if head_mask is not None else None
  487. layer_outputs = layer_module(
  488. hidden_states,
  489. attention_mask=attention_mask,
  490. head_mask=layer_head_mask,
  491. output_attentions=output_attentions,
  492. )
  493. hidden_states = layer_outputs[0]
  494. if output_attentions:
  495. all_self_attentions = all_self_attentions + (layer_outputs[1],)
  496. if output_hidden_states:
  497. all_hidden_states = all_hidden_states + (hidden_states,)
  498. if not return_dict:
  499. return tuple(
  500. v
  501. for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
  502. if v is not None
  503. )
  504. return BaseModelOutputWithCrossAttentions(
  505. last_hidden_state=hidden_states,
  506. hidden_states=all_hidden_states,
  507. attentions=all_self_attentions,
  508. cross_attentions=all_cross_attentions,
  509. )
  510. class PerceiverPreTrainedModel(PreTrainedModel):
  511. """
  512. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  513. models.
  514. """
  515. config_class = PerceiverConfig
  516. base_model_prefix = "perceiver"
  517. main_input_name = "inputs"
  518. def _init_weights(self, module):
  519. """Initialize the weights"""
  520. if isinstance(module, (nn.Linear, nn.Conv2d)):
  521. # Slightly different from the TF version which uses truncated_normal for initialization
  522. # cf https://github.com/pytorch/pytorch/pull/5617
  523. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  524. if module.bias is not None:
  525. module.bias.data.zero_()
  526. elif hasattr(module, "latents"):
  527. module.latents.data.normal_(mean=0.0, std=self.config.initializer_range)
  528. elif hasattr(module, "position_embeddings") and isinstance(module, PerceiverTrainablePositionEncoding):
  529. module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range)
  530. elif isinstance(module, nn.ParameterDict):
  531. for modality in module.keys():
  532. module[modality].data.normal_(mean=0.0, std=self.config.initializer_range)
  533. elif isinstance(module, nn.Embedding):
  534. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  535. if module.padding_idx is not None:
  536. module.weight.data[module.padding_idx].zero_()
  537. elif isinstance(module, nn.LayerNorm):
  538. module.bias.data.zero_()
  539. module.weight.data.fill_(1.0)
  540. PERCEIVER_START_DOCSTRING = r"""
  541. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
  542. it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  543. behavior.
  544. Parameters:
  545. config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model.
  546. Initializing with a config file does not load the weights associated with the model, only the
  547. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  548. """
  549. PERCEIVER_MODEL_START_DOCSTRING = r"""
  550. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
  551. it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  552. behavior.
  553. Parameters:
  554. config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model.
  555. Initializing with a config file does not load the weights associated with the model, only the
  556. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  557. decoder (*DecoderType*, *optional*):
  558. Optional decoder to use to decode the latent representation of the encoder. Examples include
  559. *transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder*,
  560. *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder*,
  561. *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder*.
  562. input_preprocessor (*PreprocessorType*, *optional*):
  563. Optional input preprocessor to use. Examples include
  564. *transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor*,
  565. *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor*,
  566. *transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor*,
  567. *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor*.
  568. output_postprocessor (*PostprocessorType*, *optional*):
  569. Optional output postprocessor to use. Examples include
  570. *transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor*,
  571. *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor*,
  572. *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor*,
  573. *transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor*,
  574. *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor*.
  575. Note that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case.
  576. """
  577. PERCEIVER_INPUTS_DOCSTRING = r"""
  578. Args:
  579. inputs (`torch.FloatTensor`):
  580. Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
  581. attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
  582. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  583. - 1 for tokens that are **not masked**,
  584. - 0 for tokens that are **masked**.
  585. [What are attention masks?](../glossary#attention-mask)
  586. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  587. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  588. - 1 indicates the head is **not masked**,
  589. - 0 indicates the head is **masked**.
  590. output_attentions (`bool`, *optional*):
  591. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  592. tensors for more detail.
  593. output_hidden_states (`bool`, *optional*):
  594. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  595. more detail.
  596. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
  597. Whether to interpolate the pre-trained position encodings.
  598. return_dict (`bool`, *optional*):
  599. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  600. """
  601. @add_start_docstrings(
  602. """The Perceiver: a scalable, fully attentional architecture.
  603. <Tip>
  604. Note that it's possible to fine-tune Perceiver on higher resolution images than the ones it has been trained on, by
  605. setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
  606. position embeddings to the higher resolution.
  607. </Tip>
  608. """,
  609. PERCEIVER_MODEL_START_DOCSTRING,
  610. )
  611. class PerceiverModel(PerceiverPreTrainedModel):
  612. def __init__(
  613. self,
  614. config,
  615. decoder=None,
  616. input_preprocessor: PreprocessorType = None,
  617. output_postprocessor: PostprocessorType = None,
  618. ):
  619. super().__init__(config)
  620. self.config = config
  621. self.input_preprocessor = input_preprocessor
  622. self.output_postprocessor = output_postprocessor
  623. self.embeddings = PerceiverEmbeddings(config)
  624. self.encoder = PerceiverEncoder(
  625. config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model
  626. )
  627. self.decoder = decoder
  628. # Initialize weights and apply final processing
  629. self.post_init()
  630. def get_input_embeddings(self):
  631. return self.embeddings.latents
  632. def set_input_embeddings(self, value):
  633. self.embeddings.latents = value
  634. def _prune_heads(self, heads_to_prune):
  635. """
  636. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  637. class PreTrainedModel
  638. """
  639. for layer, heads in heads_to_prune.items():
  640. self.encoder.layer[layer].attention.prune_heads(heads)
  641. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
  642. @replace_return_docstrings(output_type=PerceiverModelOutput, config_class=_CONFIG_FOR_DOC)
  643. def forward(
  644. self,
  645. inputs: torch.FloatTensor,
  646. attention_mask: Optional[torch.FloatTensor] = None,
  647. subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None,
  648. head_mask: Optional[torch.FloatTensor] = None,
  649. output_attentions: Optional[bool] = None,
  650. output_hidden_states: Optional[bool] = None,
  651. interpolate_pos_encoding: bool = False,
  652. return_dict: Optional[bool] = None,
  653. ) -> Union[Tuple, PerceiverModelOutput]:
  654. r"""
  655. Returns:
  656. Examples:
  657. ```python
  658. >>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel
  659. >>> from transformers.models.perceiver.modeling_perceiver import (
  660. ... PerceiverTextPreprocessor,
  661. ... PerceiverImagePreprocessor,
  662. ... PerceiverClassificationDecoder,
  663. ... )
  664. >>> import torch
  665. >>> import requests
  666. >>> from PIL import Image
  667. >>> # EXAMPLE 1: using the Perceiver to classify texts
  668. >>> # - we define a TextPreprocessor, which can be used to embed tokens
  669. >>> # - we define a ClassificationDecoder, which can be used to decode the
  670. >>> # final hidden states of the latents to classification logits
  671. >>> # using trainable position embeddings
  672. >>> config = PerceiverConfig()
  673. >>> preprocessor = PerceiverTextPreprocessor(config)
  674. >>> decoder = PerceiverClassificationDecoder(
  675. ... config,
  676. ... num_channels=config.d_latents,
  677. ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
  678. ... use_query_residual=True,
  679. ... )
  680. >>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder)
  681. >>> # you can then do a forward pass as follows:
  682. >>> tokenizer = PerceiverTokenizer()
  683. >>> text = "hello world"
  684. >>> inputs = tokenizer(text, return_tensors="pt").input_ids
  685. >>> with torch.no_grad():
  686. ... outputs = model(inputs=inputs)
  687. >>> logits = outputs.logits
  688. >>> list(logits.shape)
  689. [1, 2]
  690. >>> # to train, one can train the model using standard cross-entropy:
  691. >>> criterion = torch.nn.CrossEntropyLoss()
  692. >>> labels = torch.tensor([1])
  693. >>> loss = criterion(logits, labels)
  694. >>> # EXAMPLE 2: using the Perceiver to classify images
  695. >>> # - we define an ImagePreprocessor, which can be used to embed images
  696. >>> config = PerceiverConfig(image_size=224)
  697. >>> preprocessor = PerceiverImagePreprocessor(
  698. ... config,
  699. ... prep_type="conv1x1",
  700. ... spatial_downsample=1,
  701. ... out_channels=256,
  702. ... position_encoding_type="trainable",
  703. ... concat_or_add_pos="concat",
  704. ... project_pos_dim=256,
  705. ... trainable_position_encoding_kwargs=dict(
  706. ... num_channels=256,
  707. ... index_dims=config.image_size**2,
  708. ... ),
  709. ... )
  710. >>> model = PerceiverModel(
  711. ... config,
  712. ... input_preprocessor=preprocessor,
  713. ... decoder=PerceiverClassificationDecoder(
  714. ... config,
  715. ... num_channels=config.d_latents,
  716. ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
  717. ... use_query_residual=True,
  718. ... ),
  719. ... )
  720. >>> # you can then do a forward pass as follows:
  721. >>> image_processor = PerceiverImageProcessor()
  722. >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
  723. >>> image = Image.open(requests.get(url, stream=True).raw)
  724. >>> inputs = image_processor(image, return_tensors="pt").pixel_values
  725. >>> with torch.no_grad():
  726. ... outputs = model(inputs=inputs)
  727. >>> logits = outputs.logits
  728. >>> list(logits.shape)
  729. [1, 2]
  730. >>> # to train, one can train the model using standard cross-entropy:
  731. >>> criterion = torch.nn.CrossEntropyLoss()
  732. >>> labels = torch.tensor([1])
  733. >>> loss = criterion(logits, labels)
  734. ```"""
  735. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  736. output_hidden_states = (
  737. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  738. )
  739. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  740. if self.input_preprocessor is not None:
  741. inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(
  742. inputs, interpolate_pos_encoding=interpolate_pos_encoding
  743. )
  744. else:
  745. modality_sizes = None
  746. inputs_without_pos = None
  747. if inputs.size()[-1] != self.config.d_model:
  748. raise ValueError(
  749. f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:"
  750. f" {self.config.d_model}. Make sure to set config.d_model appropriately."
  751. )
  752. batch_size, seq_length, _ = inputs.size()
  753. device = inputs.device
  754. # If no attention mask is provided, make them all ones
  755. if attention_mask is None:
  756. attention_mask = torch.ones((batch_size, seq_length), device=device)
  757. # Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
  758. extended_attention_mask = self.invert_attention_mask(attention_mask)
  759. # Prepare head mask if needed
  760. # 1.0 in head_mask indicate we keep the head
  761. # attention_probs has shape bsz x n_heads x N x N
  762. # input head_mask has shape [num_heads] or [num_blocks x num_heads]
  763. # and head_mask is converted to shape [num_blocks x batch x num_heads x N x N]
  764. head_mask = self.get_head_mask(head_mask, self.config.num_blocks * self.config.num_self_attends_per_block)
  765. embedding_output = self.embeddings(batch_size=batch_size)
  766. encoder_outputs = self.encoder(
  767. embedding_output,
  768. attention_mask=None,
  769. head_mask=head_mask,
  770. inputs=inputs,
  771. inputs_mask=extended_attention_mask,
  772. output_attentions=output_attentions,
  773. output_hidden_states=output_hidden_states,
  774. return_dict=return_dict,
  775. )
  776. sequence_output = encoder_outputs[0]
  777. logits = None
  778. if self.decoder:
  779. if subsampled_output_points is not None:
  780. output_modality_sizes = {
  781. "audio": subsampled_output_points["audio"].shape[0],
  782. "image": subsampled_output_points["image"].shape[0],
  783. "label": 1,
  784. }
  785. else:
  786. output_modality_sizes = modality_sizes
  787. decoder_query = self.decoder.decoder_query(
  788. inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points
  789. )
  790. decoder_outputs = self.decoder(
  791. decoder_query,
  792. z=sequence_output,
  793. query_mask=extended_attention_mask,
  794. output_attentions=output_attentions,
  795. )
  796. logits = decoder_outputs.logits
  797. # add cross-attentions of decoder
  798. if output_attentions and decoder_outputs.cross_attentions is not None:
  799. if return_dict:
  800. encoder_outputs.cross_attentions = (
  801. encoder_outputs.cross_attentions + decoder_outputs.cross_attentions
  802. )
  803. else:
  804. encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions
  805. if self.output_postprocessor:
  806. logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes)
  807. if not return_dict:
  808. if logits is not None:
  809. return (logits, sequence_output) + encoder_outputs[1:]
  810. else:
  811. return (sequence_output,) + encoder_outputs[1:]
  812. return PerceiverModelOutput(
  813. logits=logits,
  814. last_hidden_state=sequence_output,
  815. hidden_states=encoder_outputs.hidden_states,
  816. attentions=encoder_outputs.attentions,
  817. cross_attentions=encoder_outputs.cross_attentions,
  818. )
  819. @add_start_docstrings("""Example use of Perceiver for masked language modeling.""", PERCEIVER_START_DOCSTRING)
  820. class PerceiverForMaskedLM(PerceiverPreTrainedModel):
  821. def __init__(self, config: PerceiverConfig):
  822. super().__init__(config)
  823. text_preprocessor = PerceiverTextPreprocessor(config)
  824. trainable_position_encoding_kwargs_decoder = {
  825. "num_channels": text_preprocessor.num_channels,
  826. "index_dims": config.max_position_embeddings,
  827. }
  828. self.perceiver = PerceiverModel(
  829. config,
  830. input_preprocessor=text_preprocessor,
  831. decoder=PerceiverBasicDecoder(
  832. config,
  833. output_num_channels=config.d_latents,
  834. output_index_dims=config.max_position_embeddings, # we need to define the seq_len of the inputs beforehand
  835. num_channels=text_preprocessor.num_channels,
  836. qk_channels=8 * 32,
  837. v_channels=text_preprocessor.num_channels,
  838. num_heads=8,
  839. use_query_residual=False,
  840. final_project=False,
  841. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
  842. ),
  843. )
  844. self.embedding_decoder = PerceiverEmbeddingDecoder(config)
  845. # Initialize weights and apply final processing
  846. self.post_init()
  847. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  848. @replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
  849. def forward(
  850. self,
  851. inputs: Optional[torch.Tensor] = None,
  852. attention_mask: Optional[torch.Tensor] = None,
  853. head_mask: Optional[torch.Tensor] = None,
  854. output_attentions: Optional[bool] = None,
  855. output_hidden_states: Optional[bool] = None,
  856. labels: Optional[torch.Tensor] = None,
  857. return_dict: Optional[bool] = None,
  858. input_ids: Optional[torch.Tensor] = None,
  859. ) -> Union[Tuple, PerceiverMaskedLMOutput]:
  860. r"""
  861. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  862. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  863. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  864. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  865. Returns:
  866. Examples:
  867. ```python
  868. >>> from transformers import AutoTokenizer, PerceiverForMaskedLM
  869. >>> import torch
  870. >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
  871. >>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver")
  872. >>> # training
  873. >>> text = "This is an incomplete sentence where some words are missing."
  874. >>> inputs = tokenizer(text, padding="max_length", return_tensors="pt")
  875. >>> # mask " missing."
  876. >>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id
  877. >>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
  878. >>> outputs = model(**inputs, labels=labels)
  879. >>> loss = outputs.loss
  880. >>> round(loss.item(), 2)
  881. 19.87
  882. >>> logits = outputs.logits
  883. >>> list(logits.shape)
  884. [1, 2048, 262]
  885. >>> # inference
  886. >>> text = "This is an incomplete sentence where some words are missing."
  887. >>> encoding = tokenizer(text, padding="max_length", return_tensors="pt")
  888. >>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space.
  889. >>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id
  890. >>> # forward pass
  891. >>> with torch.no_grad():
  892. ... outputs = model(**encoding)
  893. >>> logits = outputs.logits
  894. >>> list(logits.shape)
  895. [1, 2048, 262]
  896. >>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
  897. >>> tokenizer.decode(masked_tokens_predictions)
  898. ' missing.'
  899. ```"""
  900. if inputs is not None and input_ids is not None:
  901. raise ValueError("You cannot use both `inputs` and `input_ids`")
  902. elif inputs is None and input_ids is not None:
  903. inputs = input_ids
  904. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  905. outputs = self.perceiver(
  906. inputs=inputs,
  907. attention_mask=attention_mask,
  908. head_mask=head_mask,
  909. output_attentions=output_attentions,
  910. output_hidden_states=output_hidden_states,
  911. return_dict=return_dict,
  912. )
  913. logits = self.embedding_decoder(
  914. outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings
  915. )
  916. masked_lm_loss = None
  917. if labels is not None:
  918. loss_fct = CrossEntropyLoss() # -100 index = padding token
  919. masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
  920. if not return_dict:
  921. output = (logits,) + outputs[2:]
  922. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  923. return PerceiverMaskedLMOutput(
  924. loss=masked_lm_loss,
  925. logits=logits,
  926. hidden_states=outputs.hidden_states,
  927. attentions=outputs.attentions,
  928. cross_attentions=outputs.cross_attentions,
  929. )
  930. @add_start_docstrings("""Example use of Perceiver for text classification.""", PERCEIVER_START_DOCSTRING)
  931. class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
  932. def __init__(self, config):
  933. super().__init__(config)
  934. trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
  935. self.num_labels = config.num_labels
  936. self.perceiver = PerceiverModel(
  937. config,
  938. input_preprocessor=PerceiverTextPreprocessor(config),
  939. decoder=PerceiverClassificationDecoder(
  940. config,
  941. num_channels=config.d_latents,
  942. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
  943. use_query_residual=True,
  944. ),
  945. )
  946. # Initialize weights and apply final processing
  947. self.post_init()
  948. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  949. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  950. def forward(
  951. self,
  952. inputs: Optional[torch.Tensor] = None,
  953. attention_mask: Optional[torch.Tensor] = None,
  954. head_mask: Optional[torch.Tensor] = None,
  955. output_attentions: Optional[bool] = None,
  956. output_hidden_states: Optional[bool] = None,
  957. labels: Optional[torch.Tensor] = None,
  958. return_dict: Optional[bool] = None,
  959. input_ids: Optional[torch.Tensor] = None,
  960. ) -> Union[Tuple, PerceiverClassifierOutput]:
  961. r"""
  962. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  963. Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
  964. 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels >
  965. 1` a classification loss is computed (Cross-Entropy).
  966. Returns:
  967. Examples:
  968. ```python
  969. >>> from transformers import AutoTokenizer, PerceiverForSequenceClassification
  970. >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
  971. >>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver")
  972. >>> text = "hello world"
  973. >>> inputs = tokenizer(text, return_tensors="pt").input_ids
  974. >>> outputs = model(inputs=inputs)
  975. >>> logits = outputs.logits
  976. >>> list(logits.shape)
  977. [1, 2]
  978. ```"""
  979. if inputs is not None and input_ids is not None:
  980. raise ValueError("You cannot use both `inputs` and `input_ids`")
  981. elif inputs is None and input_ids is not None:
  982. inputs = input_ids
  983. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  984. outputs = self.perceiver(
  985. inputs=inputs,
  986. attention_mask=attention_mask,
  987. head_mask=head_mask,
  988. output_attentions=output_attentions,
  989. output_hidden_states=output_hidden_states,
  990. return_dict=return_dict,
  991. )
  992. logits = outputs.logits if return_dict else outputs[0]
  993. loss = None
  994. if labels is not None:
  995. if self.config.problem_type is None:
  996. if self.num_labels == 1:
  997. self.config.problem_type = "regression"
  998. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  999. self.config.problem_type = "single_label_classification"
  1000. else:
  1001. self.config.problem_type = "multi_label_classification"
  1002. if self.config.problem_type == "regression":
  1003. loss_fct = MSELoss()
  1004. if self.num_labels == 1:
  1005. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1006. else:
  1007. loss = loss_fct(logits, labels)
  1008. elif self.config.problem_type == "single_label_classification":
  1009. loss_fct = CrossEntropyLoss()
  1010. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1011. elif self.config.problem_type == "multi_label_classification":
  1012. loss_fct = BCEWithLogitsLoss()
  1013. loss = loss_fct(logits, labels)
  1014. if not return_dict:
  1015. output = (logits,) + outputs[2:]
  1016. return ((loss,) + output) if loss is not None else output
  1017. return PerceiverClassifierOutput(
  1018. loss=loss,
  1019. logits=logits,
  1020. hidden_states=outputs.hidden_states,
  1021. attentions=outputs.attentions,
  1022. cross_attentions=outputs.cross_attentions,
  1023. )
  1024. @add_start_docstrings(
  1025. """
  1026. Example use of Perceiver for image classification, for tasks such as ImageNet.
  1027. This model uses learned position embeddings. In other words, this model is not given any privileged information about
  1028. the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.
  1029. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
  1030. (with `prep_type="conv1x1"`) to preprocess the input images, and
  1031. [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
  1032. [`PerceiverModel`] into classification logits.
  1033. """,
  1034. PERCEIVER_START_DOCSTRING,
  1035. )
  1036. class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
  1037. def __init__(self, config):
  1038. super().__init__(config)
  1039. trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2}
  1040. trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
  1041. self.num_labels = config.num_labels
  1042. self.perceiver = PerceiverModel(
  1043. config,
  1044. input_preprocessor=PerceiverImagePreprocessor(
  1045. config,
  1046. prep_type="conv1x1",
  1047. spatial_downsample=1,
  1048. out_channels=256,
  1049. position_encoding_type="trainable",
  1050. concat_or_add_pos="concat",
  1051. project_pos_dim=256,
  1052. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor,
  1053. ),
  1054. decoder=PerceiverClassificationDecoder(
  1055. config,
  1056. num_channels=config.d_latents,
  1057. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
  1058. use_query_residual=True,
  1059. ),
  1060. )
  1061. # Initialize weights and apply final processing
  1062. self.post_init()
  1063. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1064. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1065. def forward(
  1066. self,
  1067. inputs: Optional[torch.Tensor] = None,
  1068. attention_mask: Optional[torch.Tensor] = None,
  1069. head_mask: Optional[torch.Tensor] = None,
  1070. output_attentions: Optional[bool] = None,
  1071. output_hidden_states: Optional[bool] = None,
  1072. labels: Optional[torch.Tensor] = None,
  1073. interpolate_pos_encoding: bool = False,
  1074. return_dict: Optional[bool] = None,
  1075. pixel_values: Optional[torch.Tensor] = None,
  1076. ) -> Union[Tuple, PerceiverClassifierOutput]:
  1077. r"""
  1078. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1079. Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
  1080. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1081. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1082. Returns:
  1083. Examples:
  1084. ```python
  1085. >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
  1086. >>> from PIL import Image
  1087. >>> import requests
  1088. >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
  1089. >>> image = Image.open(requests.get(url, stream=True).raw)
  1090. >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
  1091. >>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
  1092. >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
  1093. >>> outputs = model(inputs=inputs)
  1094. >>> logits = outputs.logits
  1095. >>> list(logits.shape)
  1096. [1, 1000]
  1097. >>> # model predicts one of the 1000 ImageNet classes
  1098. >>> predicted_class_idx = logits.argmax(-1).item()
  1099. >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
  1100. Predicted class: tabby, tabby cat
  1101. ```"""
  1102. if inputs is not None and pixel_values is not None:
  1103. raise ValueError("You cannot use both `inputs` and `pixel_values`")
  1104. elif inputs is None and pixel_values is not None:
  1105. inputs = pixel_values
  1106. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1107. outputs = self.perceiver(
  1108. inputs=inputs,
  1109. attention_mask=attention_mask,
  1110. head_mask=head_mask,
  1111. output_attentions=output_attentions,
  1112. output_hidden_states=output_hidden_states,
  1113. interpolate_pos_encoding=interpolate_pos_encoding,
  1114. return_dict=return_dict,
  1115. )
  1116. logits = outputs.logits if return_dict else outputs[0]
  1117. loss = None
  1118. if labels is not None:
  1119. if self.config.problem_type is None:
  1120. if self.num_labels == 1:
  1121. self.config.problem_type = "regression"
  1122. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1123. self.config.problem_type = "single_label_classification"
  1124. else:
  1125. self.config.problem_type = "multi_label_classification"
  1126. if self.config.problem_type == "regression":
  1127. loss_fct = MSELoss()
  1128. if self.num_labels == 1:
  1129. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1130. else:
  1131. loss = loss_fct(logits, labels)
  1132. elif self.config.problem_type == "single_label_classification":
  1133. loss_fct = CrossEntropyLoss()
  1134. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1135. elif self.config.problem_type == "multi_label_classification":
  1136. loss_fct = BCEWithLogitsLoss()
  1137. loss = loss_fct(logits, labels)
  1138. if not return_dict:
  1139. output = (logits,) + outputs[2:]
  1140. return ((loss,) + output) if loss is not None else output
  1141. return PerceiverClassifierOutput(
  1142. loss=loss,
  1143. logits=logits,
  1144. hidden_states=outputs.hidden_states,
  1145. attentions=outputs.attentions,
  1146. cross_attentions=outputs.cross_attentions,
  1147. )
  1148. @add_start_docstrings(
  1149. """
  1150. Example use of Perceiver for image classification, for tasks such as ImageNet.
  1151. This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of
  1152. 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).
  1153. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
  1154. (with `prep_type="pixels"`) to preprocess the input images, and
  1155. [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
  1156. [`PerceiverModel`] into classification logits.
  1157. """,
  1158. PERCEIVER_START_DOCSTRING,
  1159. )
  1160. class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
  1161. def __init__(self, config):
  1162. super().__init__(config)
  1163. fourier_position_encoding_kwargs_preprocessor = {
  1164. "concat_pos": True,
  1165. "max_resolution": (224, 224),
  1166. "num_bands": 64,
  1167. "sine_only": False,
  1168. }
  1169. trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
  1170. self.num_labels = config.num_labels
  1171. self.perceiver = PerceiverModel(
  1172. config,
  1173. input_preprocessor=PerceiverImagePreprocessor(
  1174. config,
  1175. prep_type="pixels",
  1176. spatial_downsample=1,
  1177. fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
  1178. ),
  1179. decoder=PerceiverClassificationDecoder(
  1180. config,
  1181. num_channels=config.d_latents,
  1182. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
  1183. use_query_residual=True,
  1184. ),
  1185. )
  1186. # Initialize weights and apply final processing
  1187. self.post_init()
  1188. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1189. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1190. def forward(
  1191. self,
  1192. inputs: Optional[torch.Tensor] = None,
  1193. attention_mask: Optional[torch.Tensor] = None,
  1194. head_mask: Optional[torch.Tensor] = None,
  1195. output_attentions: Optional[bool] = None,
  1196. output_hidden_states: Optional[bool] = None,
  1197. labels: Optional[torch.Tensor] = None,
  1198. return_dict: Optional[bool] = None,
  1199. pixel_values: Optional[torch.Tensor] = None,
  1200. ) -> Union[Tuple, PerceiverClassifierOutput]:
  1201. r"""
  1202. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1203. Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
  1204. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1205. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1206. Returns:
  1207. Examples:
  1208. ```python
  1209. >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
  1210. >>> from PIL import Image
  1211. >>> import requests
  1212. >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
  1213. >>> image = Image.open(requests.get(url, stream=True).raw)
  1214. >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
  1215. >>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
  1216. >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
  1217. >>> outputs = model(inputs=inputs)
  1218. >>> logits = outputs.logits
  1219. >>> list(logits.shape)
  1220. [1, 1000]
  1221. >>> # model predicts one of the 1000 ImageNet classes
  1222. >>> predicted_class_idx = logits.argmax(-1).item()
  1223. >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
  1224. Predicted class: tabby, tabby cat
  1225. ```"""
  1226. if inputs is not None and pixel_values is not None:
  1227. raise ValueError("You cannot use both `inputs` and `pixel_values`")
  1228. elif inputs is None and pixel_values is not None:
  1229. inputs = pixel_values
  1230. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1231. outputs = self.perceiver(
  1232. inputs=inputs,
  1233. attention_mask=attention_mask,
  1234. head_mask=head_mask,
  1235. output_attentions=output_attentions,
  1236. output_hidden_states=output_hidden_states,
  1237. return_dict=return_dict,
  1238. )
  1239. logits = outputs.logits if return_dict else outputs[0]
  1240. loss = None
  1241. if labels is not None:
  1242. if self.config.problem_type is None:
  1243. if self.num_labels == 1:
  1244. self.config.problem_type = "regression"
  1245. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1246. self.config.problem_type = "single_label_classification"
  1247. else:
  1248. self.config.problem_type = "multi_label_classification"
  1249. if self.config.problem_type == "regression":
  1250. loss_fct = MSELoss()
  1251. if self.num_labels == 1:
  1252. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1253. else:
  1254. loss = loss_fct(logits, labels)
  1255. elif self.config.problem_type == "single_label_classification":
  1256. loss_fct = CrossEntropyLoss()
  1257. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1258. elif self.config.problem_type == "multi_label_classification":
  1259. loss_fct = BCEWithLogitsLoss()
  1260. loss = loss_fct(logits, labels)
  1261. if not return_dict:
  1262. output = (logits,) + outputs[2:]
  1263. return ((loss,) + output) if loss is not None else output
  1264. return PerceiverClassifierOutput(
  1265. loss=loss,
  1266. logits=logits,
  1267. hidden_states=outputs.hidden_states,
  1268. attentions=outputs.attentions,
  1269. cross_attentions=outputs.cross_attentions,
  1270. )
  1271. @add_start_docstrings(
  1272. """
  1273. Example use of Perceiver for image classification, for tasks such as ImageNet.
  1274. This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy
  1275. of 82.1 on ImageNet.
  1276. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
  1277. (with `prep_type="conv"`) to preprocess the input images, and
  1278. [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
  1279. [`PerceiverModel`] into classification logits.
  1280. """,
  1281. PERCEIVER_START_DOCSTRING,
  1282. )
  1283. class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
  1284. def __init__(self, config):
  1285. super().__init__(config)
  1286. fourier_position_encoding_kwargs_preprocessor = {
  1287. "concat_pos": True,
  1288. "max_resolution": (56, 56),
  1289. "num_bands": 64,
  1290. "sine_only": False,
  1291. }
  1292. trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
  1293. self.num_labels = config.num_labels
  1294. self.perceiver = PerceiverModel(
  1295. config,
  1296. input_preprocessor=PerceiverImagePreprocessor(
  1297. config,
  1298. prep_type="conv",
  1299. spatial_downsample=1,
  1300. position_encoding_type="fourier",
  1301. fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
  1302. ),
  1303. decoder=PerceiverClassificationDecoder(
  1304. config,
  1305. num_channels=config.d_latents,
  1306. trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
  1307. use_query_residual=True,
  1308. ),
  1309. )
  1310. # Initialize weights and apply final processing
  1311. self.post_init()
  1312. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1313. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1314. def forward(
  1315. self,
  1316. inputs: Optional[torch.Tensor] = None,
  1317. attention_mask: Optional[torch.Tensor] = None,
  1318. head_mask: Optional[torch.Tensor] = None,
  1319. output_attentions: Optional[bool] = None,
  1320. output_hidden_states: Optional[bool] = None,
  1321. labels: Optional[torch.Tensor] = None,
  1322. return_dict: Optional[bool] = None,
  1323. pixel_values: Optional[torch.Tensor] = None,
  1324. ) -> Union[Tuple, PerceiverClassifierOutput]:
  1325. r"""
  1326. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1327. Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
  1328. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1329. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1330. Returns:
  1331. Examples:
  1332. ```python
  1333. >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing
  1334. >>> from PIL import Image
  1335. >>> import requests
  1336. >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
  1337. >>> image = Image.open(requests.get(url, stream=True).raw)
  1338. >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv")
  1339. >>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
  1340. >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
  1341. >>> outputs = model(inputs=inputs)
  1342. >>> logits = outputs.logits
  1343. >>> list(logits.shape)
  1344. [1, 1000]
  1345. >>> # model predicts one of the 1000 ImageNet classes
  1346. >>> predicted_class_idx = logits.argmax(-1).item()
  1347. >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
  1348. Predicted class: tabby, tabby cat
  1349. ```"""
  1350. if inputs is not None and pixel_values is not None:
  1351. raise ValueError("You cannot use both `inputs` and `pixel_values`")
  1352. elif inputs is None and pixel_values is not None:
  1353. inputs = pixel_values
  1354. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1355. outputs = self.perceiver(
  1356. inputs=inputs,
  1357. attention_mask=attention_mask,
  1358. head_mask=head_mask,
  1359. output_attentions=output_attentions,
  1360. output_hidden_states=output_hidden_states,
  1361. return_dict=return_dict,
  1362. )
  1363. logits = outputs.logits if return_dict else outputs[0]
  1364. loss = None
  1365. if labels is not None:
  1366. if self.config.problem_type is None:
  1367. if self.num_labels == 1:
  1368. self.config.problem_type = "regression"
  1369. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1370. self.config.problem_type = "single_label_classification"
  1371. else:
  1372. self.config.problem_type = "multi_label_classification"
  1373. if self.config.problem_type == "regression":
  1374. loss_fct = MSELoss()
  1375. if self.num_labels == 1:
  1376. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1377. else:
  1378. loss = loss_fct(logits, labels)
  1379. elif self.config.problem_type == "single_label_classification":
  1380. loss_fct = CrossEntropyLoss()
  1381. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1382. elif self.config.problem_type == "multi_label_classification":
  1383. loss_fct = BCEWithLogitsLoss()
  1384. loss = loss_fct(logits, labels)
  1385. if not return_dict:
  1386. output = (logits,) + outputs[2:]
  1387. return ((loss,) + output) if loss is not None else output
  1388. return PerceiverClassifierOutput(
  1389. loss=loss,
  1390. logits=logits,
  1391. hidden_states=outputs.hidden_states,
  1392. attentions=outputs.attentions,
  1393. cross_attentions=outputs.cross_attentions,
  1394. )
  1395. @add_start_docstrings(
  1396. """
  1397. Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses
  1398. [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the
  1399. input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent
  1400. representation of [`PerceiverModel`].
  1401. As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel
  1402. (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position
  1403. of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation
  1404. using the same encoding used for the input.
  1405. """,
  1406. PERCEIVER_START_DOCSTRING,
  1407. )
  1408. class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
  1409. def __init__(self, config):
  1410. super().__init__(config)
  1411. fourier_position_encoding_kwargs_preprocessor = {
  1412. "num_bands": 64,
  1413. "max_resolution": config.train_size,
  1414. "sine_only": False,
  1415. "concat_pos": True,
  1416. }
  1417. fourier_position_encoding_kwargs_decoder = {
  1418. "concat_pos": True,
  1419. "max_resolution": config.train_size,
  1420. "num_bands": 64,
  1421. "sine_only": False,
  1422. }
  1423. image_preprocessor = PerceiverImagePreprocessor(
  1424. config,
  1425. prep_type="patches",
  1426. spatial_downsample=1,
  1427. conv_after_patching=True,
  1428. conv_after_patching_in_channels=54,
  1429. temporal_downsample=2,
  1430. position_encoding_type="fourier",
  1431. # position_encoding_kwargs
  1432. fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
  1433. )
  1434. self.perceiver = PerceiverModel(
  1435. config,
  1436. input_preprocessor=image_preprocessor,
  1437. decoder=PerceiverOpticalFlowDecoder(
  1438. config,
  1439. num_channels=image_preprocessor.num_channels,
  1440. output_image_shape=config.train_size,
  1441. rescale_factor=100.0,
  1442. # decoder kwargs
  1443. use_query_residual=False,
  1444. output_num_channels=2,
  1445. # We query the decoder using the first frame features
  1446. # rather than a standard decoder position encoding.
  1447. position_encoding_type="fourier",
  1448. fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
  1449. ),
  1450. )
  1451. # Initialize weights and apply final processing
  1452. self.post_init()
  1453. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1454. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1455. def forward(
  1456. self,
  1457. inputs: Optional[torch.Tensor] = None,
  1458. attention_mask: Optional[torch.Tensor] = None,
  1459. head_mask: Optional[torch.Tensor] = None,
  1460. output_attentions: Optional[bool] = None,
  1461. output_hidden_states: Optional[bool] = None,
  1462. labels: Optional[torch.Tensor] = None,
  1463. return_dict: Optional[bool] = None,
  1464. ) -> Union[Tuple, PerceiverClassifierOutput]:
  1465. r"""
  1466. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1467. Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1468. Returns:
  1469. Examples:
  1470. ```python
  1471. >>> from transformers import PerceiverForOpticalFlow
  1472. >>> import torch
  1473. >>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
  1474. >>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
  1475. >>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
  1476. >>> # patches have shape (batch_size, num_frames, num_channels, height, width)
  1477. >>> # the authors train on resolutions of 368 x 496
  1478. >>> patches = torch.randn(1, 2, 27, 368, 496)
  1479. >>> outputs = model(inputs=patches)
  1480. >>> logits = outputs.logits
  1481. >>> list(logits.shape)
  1482. [1, 368, 496, 2]
  1483. ```"""
  1484. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1485. loss = None
  1486. if labels is not None:
  1487. raise NotImplementedError("Optical flow training is not yet supported")
  1488. outputs = self.perceiver(
  1489. inputs=inputs,
  1490. attention_mask=attention_mask,
  1491. head_mask=head_mask,
  1492. output_attentions=output_attentions,
  1493. output_hidden_states=output_hidden_states,
  1494. return_dict=return_dict,
  1495. )
  1496. logits = outputs.logits if return_dict else outputs[0]
  1497. if not return_dict:
  1498. output = (logits,) + outputs[2:]
  1499. return ((loss,) + output) if loss is not None else output
  1500. return PerceiverClassifierOutput(
  1501. loss=loss,
  1502. logits=logits,
  1503. hidden_states=outputs.hidden_states,
  1504. attentions=outputs.attentions,
  1505. cross_attentions=outputs.cross_attentions,
  1506. )
  1507. @add_start_docstrings(
  1508. """
  1509. Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.
  1510. [`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to
  1511. preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to
  1512. preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad
  1513. each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies
  1514. the Perceiver encoder.
  1515. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of
  1516. [`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are
  1517. created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is
  1518. computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent
  1519. representation. This is determined by the subsampled indices for each modality, which can be provided as additional
  1520. input to the forward pass of [`PerceiverForMultimodalAutoencoding`].
  1521. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different
  1522. modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention
  1523. is performed with the latent representation of [`PerceiverModel`].
  1524. Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an
  1525. actual video. It first splits up the output into the different modalities, and then applies the respective
  1526. postprocessor for each modality.
  1527. Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the
  1528. "label" modality), this auto-encoding model becomes a Kinetics 700 video classifier.
  1529. """,
  1530. PERCEIVER_START_DOCSTRING,
  1531. )
  1532. class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel):
  1533. def __init__(self, config: PerceiverConfig):
  1534. super().__init__(config)
  1535. n_audio_samples = config.num_frames * config.audio_samples_per_frame
  1536. input_preprocessor = PerceiverMultimodalPreprocessor(
  1537. min_padding_size=4,
  1538. modalities={
  1539. "audio": PerceiverAudioPreprocessor(
  1540. config,
  1541. position_encoding_type="fourier",
  1542. fourier_position_encoding_kwargs={
  1543. "num_bands": 192,
  1544. "max_resolution": (n_audio_samples,),
  1545. "sine_only": False,
  1546. "concat_pos": True,
  1547. },
  1548. prep_type="patches",
  1549. samples_per_patch=config.samples_per_patch,
  1550. ),
  1551. "image": PerceiverImagePreprocessor(
  1552. config,
  1553. position_encoding_type="fourier",
  1554. fourier_position_encoding_kwargs={
  1555. "num_bands": 32,
  1556. "max_resolution": (config.num_frames, config.image_size, config.image_size),
  1557. "sine_only": False,
  1558. "concat_pos": True,
  1559. },
  1560. prep_type="patches",
  1561. spatial_downsample=4,
  1562. temporal_downsample=1,
  1563. ),
  1564. "label": PerceiverOneHotPreprocessor(config),
  1565. },
  1566. mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0},
  1567. )
  1568. image_decoder = PerceiverBasicVideoAutoencodingDecoder(
  1569. config,
  1570. # Autoencoding, don't pass inputs to the queries.
  1571. concat_preprocessed_input=False,
  1572. output_shape=config.output_shape,
  1573. output_num_channels=config.output_num_channels,
  1574. use_query_residual=False,
  1575. position_encoding_only=True,
  1576. position_encoding_type="fourier",
  1577. fourier_position_encoding_kwargs={
  1578. "num_bands": 32,
  1579. "max_resolution": (config.num_frames, config.image_size, config.image_size),
  1580. "sine_only": False,
  1581. "concat_pos": True,
  1582. },
  1583. )
  1584. decoder = PerceiverMultimodalDecoder(
  1585. config,
  1586. # Autoencoding, don't pass inputs to the queries.
  1587. concat_preprocessed_input=False,
  1588. # Modality specific decoders are used ONLY to generate queries.
  1589. # All modalties are decoded together using a unified decoder.
  1590. modalities={
  1591. "audio": PerceiverBasicDecoder(
  1592. config,
  1593. # Autoencoding, don't pass inputs to the queries.
  1594. concat_preprocessed_input=False,
  1595. output_index_dims=(n_audio_samples // config.samples_per_patch,),
  1596. output_num_channels=config.output_num_channels,
  1597. use_query_residual=False,
  1598. position_encoding_only=True,
  1599. position_encoding_type="fourier",
  1600. fourier_position_encoding_kwargs={
  1601. "num_bands": 192,
  1602. "max_resolution": (n_audio_samples,),
  1603. "sine_only": False,
  1604. "concat_pos": True,
  1605. },
  1606. ),
  1607. "image": image_decoder,
  1608. "label": PerceiverClassificationDecoder(
  1609. config,
  1610. # Autoencoding, don't pass inputs to the queries.
  1611. concat_preprocessed_input=False,
  1612. use_query_residual=False,
  1613. position_encoding_only=True,
  1614. position_encoding_type="trainable",
  1615. trainable_position_encoding_kwargs={
  1616. "num_channels": config._label_trainable_num_channels,
  1617. "index_dims": 1,
  1618. },
  1619. ),
  1620. },
  1621. num_outputs=None,
  1622. output_num_channels=config.output_num_channels,
  1623. use_query_residual=False,
  1624. )
  1625. output_postprocessor = PerceiverMultimodalPostprocessor(
  1626. modalities={
  1627. "audio": PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels),
  1628. "image": PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3),
  1629. "label": PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels),
  1630. }
  1631. )
  1632. self.perceiver = PerceiverModel(
  1633. config,
  1634. input_preprocessor=input_preprocessor,
  1635. decoder=decoder,
  1636. output_postprocessor=output_postprocessor,
  1637. )
  1638. # Initialize weights and apply final processing
  1639. self.post_init()
  1640. @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1641. @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1642. def forward(
  1643. self,
  1644. inputs: Optional[torch.Tensor] = None,
  1645. attention_mask: Optional[torch.Tensor] = None,
  1646. subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None,
  1647. head_mask: Optional[torch.Tensor] = None,
  1648. output_attentions: Optional[bool] = None,
  1649. output_hidden_states: Optional[bool] = None,
  1650. labels: Optional[torch.Tensor] = None,
  1651. return_dict: Optional[bool] = None,
  1652. ) -> Union[Tuple, PerceiverClassifierOutput]:
  1653. r"""
  1654. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1655. Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
  1656. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1657. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1658. Returns:
  1659. Examples:
  1660. ```python
  1661. >>> from transformers import PerceiverForMultimodalAutoencoding
  1662. >>> import torch
  1663. >>> import numpy as np
  1664. >>> # create multimodal inputs
  1665. >>> images = torch.randn((1, 16, 3, 224, 224))
  1666. >>> audio = torch.randn((1, 30720, 1))
  1667. >>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700)))
  1668. >>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver")
  1669. >>> # in the Perceiver IO paper, videos are auto-encoded in chunks
  1670. >>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries
  1671. >>> nchunks = 128
  1672. >>> image_chunk_size = np.prod((16, 224, 224)) // nchunks
  1673. >>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks
  1674. >>> # process the first chunk
  1675. >>> chunk_idx = 0
  1676. >>> subsampling = {
  1677. ... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
  1678. ... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
  1679. ... "label": None,
  1680. ... }
  1681. >>> outputs = model(inputs=inputs, subsampled_output_points=subsampling)
  1682. >>> logits = outputs.logits
  1683. >>> list(logits["audio"].shape)
  1684. [1, 240]
  1685. >>> list(logits["image"].shape)
  1686. [1, 6272, 3]
  1687. >>> list(logits["label"].shape)
  1688. [1, 700]
  1689. ```"""
  1690. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1691. loss = None
  1692. if labels is not None:
  1693. raise NotImplementedError("Multimodal autoencoding training is not yet supported")
  1694. outputs = self.perceiver(
  1695. inputs=inputs,
  1696. attention_mask=attention_mask,
  1697. subsampled_output_points=subsampled_output_points,
  1698. head_mask=head_mask,
  1699. output_attentions=output_attentions,
  1700. output_hidden_states=output_hidden_states,
  1701. return_dict=return_dict,
  1702. )
  1703. logits = outputs.logits if return_dict else outputs[0]
  1704. if not return_dict:
  1705. output = (logits,) + outputs[2:]
  1706. return ((loss,) + output) if loss is not None else output
  1707. return PerceiverClassifierOutput(
  1708. loss=loss,
  1709. logits=logits,
  1710. hidden_states=outputs.hidden_states,
  1711. attentions=outputs.attentions,
  1712. cross_attentions=outputs.cross_attentions,
  1713. )
  1714. # Below: position encodings
  1715. def build_position_encoding(
  1716. position_encoding_type,
  1717. out_channels=None,
  1718. project_pos_dim=-1,
  1719. trainable_position_encoding_kwargs=None,
  1720. fourier_position_encoding_kwargs=None,
  1721. ):
  1722. """
  1723. Builds the position encoding.
  1724. Args:
  1725. - out_channels: refers to the number of channels of the position encodings.
  1726. - project_pos_dim: if specified, will project the position encodings to this dimension.
  1727. """
  1728. if position_encoding_type == "trainable":
  1729. if not trainable_position_encoding_kwargs:
  1730. raise ValueError("Make sure to pass trainable_position_encoding_kwargs")
  1731. output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)
  1732. elif position_encoding_type == "fourier":
  1733. # We don't use the index_dims argument, as this is only known during the forward pass
  1734. if not fourier_position_encoding_kwargs:
  1735. raise ValueError("Make sure to pass fourier_position_encoding_kwargs")
  1736. output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)
  1737. else:
  1738. raise ValueError(f"Unknown position encoding type: {position_encoding_type}.")
  1739. # Optionally, project the position encoding to a target dimension:
  1740. positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()
  1741. return output_pos_enc, positions_projection
  1742. # Below: Perceiver decoders
  1743. class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta):
  1744. """Perceiver abstract decoder."""
  1745. @abc.abstractmethod
  1746. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  1747. raise NotImplementedError
  1748. @property
  1749. @abc.abstractmethod
  1750. def num_query_channels(self):
  1751. raise NotImplementedError
  1752. @abc.abstractmethod
  1753. def forward(self, query, z, query_mask=None):
  1754. raise NotImplementedError
  1755. class PerceiverProjectionDecoder(PerceiverAbstractDecoder):
  1756. """
  1757. Baseline projection decoder (no cross-attention).
  1758. Args:
  1759. config ([`PerceiverConfig`]):
  1760. Model configuration.
  1761. """
  1762. def __init__(self, config):
  1763. super().__init__()
  1764. self.classifier = nn.Linear(config.d_latents, config.num_labels)
  1765. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  1766. return None
  1767. def forward(
  1768. self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
  1769. ) -> torch.FloatTensor:
  1770. # (batch_size, num_latents, d_latents) -> (batch_size, d_latents)
  1771. z = torch.mean(z, dim=1)
  1772. # (batch_size, d_latents) -> (batch_size, config.num_labels)
  1773. logits = self.classifier(z)
  1774. return logits
  1775. class PerceiverBasicDecoder(PerceiverAbstractDecoder):
  1776. """
  1777. Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a
  1778. cross-attention operation, in which the latents produce keys and values.
  1779. The shape of the output of this class depends on how one defines the output queries (also called decoder queries).
  1780. Args:
  1781. config ([*PerceiverConfig*]):
  1782. Model configuration.
  1783. output_num_channels (`int`, *optional*):
  1784. The number of channels in the output. Will only be used in case *final_project* is set to `True`.
  1785. position_encoding_type (`str`, *optional*, defaults to "trainable"):
  1786. The type of position encoding to use. Can be either "trainable", "fourier", or "none".
  1787. output_index_dims (`int`, *optional*):
  1788. The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'.
  1789. num_channels (`int`, *optional*, defaults to 128):
  1790. The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'.
  1791. qk_channels (`int`, *optional*):
  1792. The number of channels of the queries and keys in the cross-attention layer.
  1793. v_channels (`int`, *optional*):
  1794. The number of channels of the values in the cross-attention layer.
  1795. num_heads (`int`, *optional*, defaults to 1):
  1796. The number of attention heads in the cross-attention layer.
  1797. widening_factor (`int`, *optional*, defaults to 1):
  1798. The widening factor of the cross-attention layer.
  1799. use_query_residual (`bool`, *optional*, defaults to `False`):
  1800. Whether to use a residual connection between the query and the output of the cross-attention layer.
  1801. concat_preprocessed_input (`bool`, *optional*, defaults to `False`):
  1802. Whether to concatenate the preprocessed input to the query.
  1803. final_project (`bool`, *optional*, defaults to `True`):
  1804. Whether to project the output of the cross-attention layer to a target dimension.
  1805. position_encoding_only (`bool`, *optional*, defaults to `False`):
  1806. Whether to only use this class to define output queries.
  1807. """
  1808. def __init__(
  1809. self,
  1810. config: PerceiverConfig,
  1811. output_num_channels: int,
  1812. position_encoding_type: Optional[str] = "trainable",
  1813. # The following 2 arguments are ignored if position_encoding_type == 'none':
  1814. output_index_dims: Optional[int] = None,
  1815. num_channels: Optional[int] = 128,
  1816. subsampled_index_dims: Optional[int] = None,
  1817. qk_channels: Optional[int] = None,
  1818. v_channels: Optional[int] = None,
  1819. num_heads: Optional[int] = 1,
  1820. widening_factor: Optional[int] = 1,
  1821. use_query_residual: Optional[bool] = False,
  1822. concat_preprocessed_input: Optional[bool] = False,
  1823. final_project: Optional[bool] = True,
  1824. position_encoding_only: Optional[bool] = False,
  1825. **position_encoding_kwargs,
  1826. ) -> None:
  1827. super().__init__()
  1828. self.output_num_channels = output_num_channels
  1829. # If `none`, the decoder will not construct any position encodings.
  1830. # You should construct your own when querying the decoder.
  1831. self.output_position_encodings = None
  1832. self.position_encoding_type = position_encoding_type
  1833. self.position_encoding_kwargs = position_encoding_kwargs
  1834. if position_encoding_type != "none":
  1835. self.output_position_encodings, self.positions_projection = build_position_encoding(
  1836. position_encoding_type=position_encoding_type, **position_encoding_kwargs
  1837. )
  1838. self.output_index_dims = output_index_dims
  1839. self.num_channels = num_channels
  1840. if subsampled_index_dims is None:
  1841. subsampled_index_dims = output_index_dims
  1842. self.subsampled_index_dims = subsampled_index_dims
  1843. self.concat_preprocessed_input = concat_preprocessed_input
  1844. self.final_project = final_project
  1845. self.position_encoding_only = position_encoding_only
  1846. # for multimodal autoencoding, we don't need the decoder cross-attention and final layer
  1847. # so then we will set position_encoding_only to True
  1848. if not self.position_encoding_only:
  1849. self.decoding_cross_attention = PerceiverLayer(
  1850. config,
  1851. is_cross_attention=True,
  1852. qk_channels=qk_channels,
  1853. v_channels=v_channels,
  1854. num_heads=num_heads,
  1855. q_dim=num_channels,
  1856. kv_dim=config.d_latents,
  1857. widening_factor=widening_factor,
  1858. use_query_residual=use_query_residual,
  1859. )
  1860. self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity()
  1861. @property
  1862. def num_query_channels(self) -> int:
  1863. if self.position_encoding_type == "none": # Queries come from elsewhere
  1864. raise ValueError(
  1865. "You cannot calculate number of decoder query channels when position_encoding_type is set to none"
  1866. )
  1867. if self.position_encoding_only:
  1868. if "project_pos_dim" in self.position_encoding_kwargs:
  1869. return self.position_encoding_kwargs["project_pos_dim"]
  1870. return self.output_position_encodings.output_size()
  1871. if self.final_project:
  1872. return self.output_num_channels
  1873. return self.num_channels
  1874. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  1875. if self.position_encoding_type == "none": # Queries come from elsewhere
  1876. raise ValueError("You cannot construct decoder queries when position_encoding_type is set to none")
  1877. if subsampled_points is not None:
  1878. # subsampled_points are the indices if the inputs would be flattened
  1879. # however, the inputs aren't flattened, that's why we use unravel_index
  1880. # to get the indices for the unflattened array
  1881. # unravel_index returns a tuple (x_idx, y_idx, ...)
  1882. # stack to get the [n, d] tensor of coordinates
  1883. indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)]
  1884. pos = torch.stack(indices, dim=1)
  1885. batch_size = inputs.shape[0]
  1886. # Map these coordinates to [-1, 1]
  1887. pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :]
  1888. pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]])
  1889. # Construct the position encoding.
  1890. if self.position_encoding_type == "trainable":
  1891. pos_emb = self.output_position_encodings(batch_size)
  1892. elif self.position_encoding_type == "fourier":
  1893. pos_emb = self.output_position_encodings(
  1894. self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos
  1895. )
  1896. # Optionally project them to a target dimension.
  1897. pos_emb = self.positions_projection(pos_emb)
  1898. pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
  1899. else:
  1900. batch_size = inputs.shape[0]
  1901. index_dims = inputs.shape[2:]
  1902. # Construct the position encoding.
  1903. if self.position_encoding_type == "trainable":
  1904. pos_emb = self.output_position_encodings(batch_size)
  1905. elif self.position_encoding_type == "fourier":
  1906. pos_emb = self.output_position_encodings(
  1907. index_dims, batch_size, device=inputs.device, dtype=inputs.dtype
  1908. )
  1909. # Optionally project them to a target dimension.
  1910. pos_emb = self.positions_projection(pos_emb)
  1911. if self.concat_preprocessed_input:
  1912. if inputs_without_pos is None:
  1913. raise ValueError("Value is required for inputs_without_pos if concat_preprocessed_input is True")
  1914. pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1)
  1915. return pos_emb
  1916. def forward(
  1917. self,
  1918. query: torch.Tensor,
  1919. z: torch.FloatTensor,
  1920. query_mask: Optional[torch.FloatTensor] = None,
  1921. output_attentions: Optional[bool] = False,
  1922. ) -> PerceiverDecoderOutput:
  1923. # Cross-attention decoding.
  1924. # key, value: B x N x K; query: B x M x K
  1925. # Attention maps -> B x N x M
  1926. # Output -> B x M x K
  1927. cross_attentions = () if output_attentions else None
  1928. layer_outputs = self.decoding_cross_attention(
  1929. query,
  1930. attention_mask=query_mask,
  1931. head_mask=None,
  1932. inputs=z,
  1933. inputs_mask=None,
  1934. output_attentions=output_attentions,
  1935. )
  1936. output = layer_outputs[0]
  1937. if output_attentions:
  1938. cross_attentions = cross_attentions + (layer_outputs[1],)
  1939. logits = self.final_layer(output)
  1940. return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions)
  1941. class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
  1942. """
  1943. Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
  1944. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
  1945. shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
  1946. Args:
  1947. config ([`PerceiverConfig`]):
  1948. Model configuration.
  1949. """
  1950. def __init__(self, config, **decoder_kwargs):
  1951. super().__init__()
  1952. self.num_labels = config.num_labels
  1953. self.decoder = PerceiverBasicDecoder(
  1954. config,
  1955. output_num_channels=self.num_labels,
  1956. output_index_dims=1, # Predict a single logit array.
  1957. **decoder_kwargs,
  1958. )
  1959. @property
  1960. def num_query_channels(self) -> int:
  1961. return self.decoder.num_query_channels
  1962. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  1963. return self.decoder.decoder_query(
  1964. inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points
  1965. )
  1966. def forward(
  1967. self,
  1968. query: torch.Tensor,
  1969. z: torch.FloatTensor,
  1970. query_mask: Optional[torch.FloatTensor] = None,
  1971. output_attentions: Optional[bool] = False,
  1972. ) -> PerceiverDecoderOutput:
  1973. decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
  1974. # B x 1 x num_classes -> B x num_classes
  1975. logits = decoder_outputs.logits[:, 0, :]
  1976. return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
  1977. class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder):
  1978. """Cross-attention based optical flow decoder."""
  1979. def __init__(self, config, output_image_shape, output_num_channels=2, rescale_factor=100.0, **decoder_kwargs):
  1980. super().__init__()
  1981. self.output_image_shape = output_image_shape
  1982. self.output_num_channels = output_num_channels
  1983. self.rescale_factor = rescale_factor
  1984. self.decoder = PerceiverBasicDecoder(config, output_num_channels=output_num_channels, **decoder_kwargs)
  1985. @property
  1986. def num_query_channels(self) -> int:
  1987. return self.decoder.num_query_channels
  1988. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  1989. if subsampled_points is not None:
  1990. raise ValueError("FlowDecoder doesn't support subsampling yet.")
  1991. return inputs
  1992. def forward(
  1993. self,
  1994. query: torch.Tensor,
  1995. z: torch.FloatTensor,
  1996. query_mask: Optional[torch.FloatTensor] = None,
  1997. output_attentions: Optional[bool] = False,
  1998. ) -> PerceiverDecoderOutput:
  1999. decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
  2000. preds = decoder_outputs.logits
  2001. # Output flow and rescale.
  2002. preds /= self.rescale_factor
  2003. preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]])
  2004. return PerceiverDecoderOutput(logits=preds, cross_attentions=decoder_outputs.cross_attentions)
  2005. class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
  2006. """
  2007. Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video
  2008. reshaping logic.
  2009. Args:
  2010. config ([*PerceiverConfig*]):
  2011. Model configuration.
  2012. output_shape (`List[int]`):
  2013. Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.
  2014. position_encoding_type (`str`):
  2015. The type of position encoding to use. Can be either "trainable", "fourier", or "none".
  2016. """
  2017. def __init__(
  2018. self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs
  2019. ) -> None:
  2020. super().__init__()
  2021. if len(output_shape) != 4: # B, T, H, W
  2022. raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.")
  2023. # Build the decoder components:
  2024. self.output_shape = output_shape
  2025. self.output_num_channels = decoder_kwargs["output_num_channels"]
  2026. self.decoder = PerceiverBasicDecoder(
  2027. config,
  2028. output_index_dims=self.output_shape[1:4], # T*H*W
  2029. position_encoding_type=position_encoding_type,
  2030. **decoder_kwargs,
  2031. )
  2032. @property
  2033. def num_query_channels(self) -> int:
  2034. return self.decoder.num_query_channels
  2035. def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
  2036. return self.decoder.decoder_query(
  2037. inputs,
  2038. modality_sizes=modality_sizes,
  2039. inputs_without_pos=inputs_without_pos,
  2040. subsampled_points=subsampled_points,
  2041. )
  2042. def forward(
  2043. self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
  2044. ) -> PerceiverDecoderOutput:
  2045. decoder_outputs = self.decoder(query, z)
  2046. logits = decoder_outputs.logits
  2047. logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])
  2048. return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
  2049. def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]:
  2050. """
  2051. Partitions a [B, N, C] tensor into tensors for each modality.
  2052. Args:
  2053. modality_sizes
  2054. dict specifying the size of the modality
  2055. inputs:
  2056. input tensor
  2057. Returns:
  2058. dict mapping name of modality to its associated tensor.
  2059. """
  2060. outputs = {}
  2061. index = 0
  2062. # Apply a predictable ordering to the modalities
  2063. for modality in sorted(modality_sizes.keys()):
  2064. size = modality_sizes[modality]
  2065. inp = inputs[:, index : index + size]
  2066. index += size
  2067. outputs[modality] = inp
  2068. return outputs
  2069. class PerceiverMultimodalDecoder(PerceiverAbstractDecoder):
  2070. """
  2071. Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary
  2072. mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that
  2073. modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are
  2074. concatenated along the time dimension.
  2075. Next, there is a shared cross attention operation across all modalities.
  2076. Args:
  2077. config ([*PerceiverConfig*]):
  2078. Model configuration.
  2079. modalities (`Dict[str, PerceiverAbstractDecoder]`):
  2080. Dictionary mapping modality name to the decoder of that modality.
  2081. num_outputs (`int`):
  2082. The number of outputs of the decoder.
  2083. output_num_channels (`int`):
  2084. The number of channels in the output.
  2085. min_padding_size (`int`, *optional*, defaults to 2):
  2086. The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
  2087. channels across all modalities plus min_padding_size.
  2088. subsampled_index_dims (`Dict[str, PerceiverAbstractDecoder]`, *optional*):
  2089. Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that
  2090. modality.
  2091. """
  2092. def __init__(
  2093. self,
  2094. config: PerceiverConfig,
  2095. modalities: Dict[str, PerceiverAbstractDecoder],
  2096. num_outputs: int,
  2097. output_num_channels: int,
  2098. min_padding_size: Optional[int] = 2,
  2099. subsampled_index_dims: Optional[Dict[str, PerceiverAbstractDecoder]] = None,
  2100. **decoder_kwargs,
  2101. ) -> None:
  2102. super().__init__()
  2103. self.modalities = nn.ModuleDict(modalities)
  2104. self.subsampled_index_dims = subsampled_index_dims
  2105. self.min_padding_size = min_padding_size
  2106. self.output_num_channels = output_num_channels
  2107. self.num_outputs = num_outputs
  2108. self.decoder = PerceiverBasicDecoder(
  2109. config,
  2110. output_index_dims=(num_outputs,),
  2111. output_num_channels=output_num_channels,
  2112. position_encoding_type="none",
  2113. num_channels=self.num_query_channels,
  2114. **decoder_kwargs,
  2115. )
  2116. self.padding = nn.ParameterDict(
  2117. {
  2118. modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels))
  2119. for modality, decoder in modalities.items()
  2120. }
  2121. )
  2122. @property
  2123. def num_query_channels(self) -> int:
  2124. max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items())
  2125. common_channel_size = max_channel_size + self.min_padding_size
  2126. return common_channel_size
  2127. def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None):
  2128. # Partition the flat inputs among the different modalities
  2129. inputs = restructure(modality_sizes, inputs)
  2130. # Obtain modality-specific decoders' queries
  2131. subsampled_points = subsampled_points or {}
  2132. decoder_queries = {}
  2133. for modality, decoder in self.modalities.items():
  2134. # Get input_without_pos for this modality if it exists.
  2135. input_without_pos = None
  2136. if inputs_without_pos is not None:
  2137. input_without_pos = inputs_without_pos.get(modality, None)
  2138. query = decoder.decoder_query(
  2139. inputs=inputs[modality],
  2140. modality_sizes=None,
  2141. inputs_without_pos=input_without_pos,
  2142. subsampled_points=subsampled_points.get(modality, None),
  2143. )
  2144. decoder_queries[modality] = query
  2145. # Pad all queries with trainable position encodings to make them have the same channels
  2146. def embed(modality, x):
  2147. x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
  2148. pos = self.padding[modality]
  2149. pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]])
  2150. return torch.cat([x, pos], dim=2)
  2151. # Apply a predictable ordering to the modalities
  2152. return torch.cat(
  2153. [embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1
  2154. )
  2155. def forward(
  2156. self,
  2157. query: torch.Tensor,
  2158. z: torch.FloatTensor,
  2159. query_mask: Optional[torch.FloatTensor] = None,
  2160. output_attentions: Optional[bool] = False,
  2161. ) -> torch.Tensor:
  2162. # B x 1 x num_classes -> B x num_classes
  2163. decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
  2164. return decoder_outputs
  2165. # Below: IO pre- and post-processor classes for Perceiver.
  2166. def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor:
  2167. """
  2168. Space to depth transform. Rearranges blocks of spatial data, into depth.
  2169. This function assumes the channels to be first, but will place the channels last after transformation.
  2170. Based on https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15.
  2171. """
  2172. if len(frames.shape) == 4:
  2173. batch_size, num_channels, height, width = frames.shape
  2174. # split up dimensions (height by spatial_block_size, width by spatial_block_size)
  2175. frames = frames.view(
  2176. batch_size,
  2177. num_channels,
  2178. height // spatial_block_size,
  2179. spatial_block_size,
  2180. width // spatial_block_size,
  2181. spatial_block_size,
  2182. )
  2183. # move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C)
  2184. frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous()
  2185. # concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C)
  2186. frames = frames.view(
  2187. batch_size,
  2188. height // spatial_block_size,
  2189. width // spatial_block_size,
  2190. (spatial_block_size**2) * num_channels,
  2191. )
  2192. return frames
  2193. elif len(frames.shape) == 5:
  2194. batch_size, time, num_channels, height, width = frames.shape
  2195. # split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size)
  2196. frames = frames.view(
  2197. batch_size,
  2198. time // temporal_block_size,
  2199. temporal_block_size,
  2200. num_channels,
  2201. height // spatial_block_size,
  2202. spatial_block_size,
  2203. width // spatial_block_size,
  2204. spatial_block_size,
  2205. )
  2206. # move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C)
  2207. frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
  2208. # concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C)
  2209. frames = frames.view(
  2210. batch_size,
  2211. time // temporal_block_size,
  2212. height // spatial_block_size,
  2213. width // spatial_block_size,
  2214. temporal_block_size * (spatial_block_size**2) * num_channels,
  2215. )
  2216. return frames
  2217. else:
  2218. raise ValueError(
  2219. "Frames should be of rank 4 (batch, channels, height, width)"
  2220. " or rank 5 (batch, time, channels, height, width)"
  2221. )
  2222. class Conv2dSamePadding(nn.Conv2d):
  2223. """
  2224. Conv2d layer with padding="same" support. Source:
  2225. https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
  2226. """
  2227. def __init__(self, *args, **kwargs):
  2228. super(Conv2dSamePadding, self).__init__(*args, **kwargs)
  2229. self.zero_pad_2d = nn.ZeroPad2d(
  2230. reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]])
  2231. )
  2232. def forward(self, input):
  2233. return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
  2234. class Conv2DDownsample(nn.Module):
  2235. """Downsamples 4x by applying a 2D convolution and doing max pooling."""
  2236. def __init__(
  2237. self,
  2238. num_layers: int = 1,
  2239. in_channels: int = 3,
  2240. out_channels: int = 64,
  2241. use_batchnorm: bool = True,
  2242. ):
  2243. """
  2244. Constructs a Conv2DDownsample model.
  2245. Args:
  2246. in_channels (`int`, *optional*, defaults to 3):
  2247. The number of input channels.
  2248. out_channels (`int`, *optional*, defaults to 64):
  2249. The number of conv output channels.
  2250. use_batchnorm (`bool`, *optional*, defaults to `True`):
  2251. Whether to use batchnorm.
  2252. """
  2253. super().__init__()
  2254. self.conv = Conv2dSamePadding(
  2255. in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
  2256. )
  2257. self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
  2258. self.relu = nn.ReLU()
  2259. self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
  2260. def forward(self, inputs: torch.Tensor) -> torch.Tensor:
  2261. out = self.conv(inputs)
  2262. out = self.batchnorm(out)
  2263. out = self.relu(out)
  2264. out = self.max_pool(out)
  2265. return out
  2266. def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False):
  2267. """
  2268. Generate a Fourier frequency position encoding with linear spacing.
  2269. Args:
  2270. pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`):
  2271. The Tensor containing the position of n points in d dimensional space.
  2272. num_bands (`int`):
  2273. The number of frequency bands (K) to use.
  2274. max_resolution (`Tuple[int]`, *optional*, defaults to (224, 224)):
  2275. The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension.
  2276. concat_pos (`bool`, *optional*, defaults to `True`):
  2277. Whether to concatenate the input position encoding to the Fourier features.
  2278. sine_only (`bool`, *optional*, defaults to `False`):
  2279. Whether to use a single phase (sin) or two (sin/cos) for each frequency band.
  2280. Returns:
  2281. `torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If
  2282. `concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d,
  2283. sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1),
  2284. ..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the
  2285. kth frequency band.
  2286. """
  2287. batch_size = pos.shape[0]
  2288. min_freq = 1.0
  2289. # Nyquist frequency at the target resolution:
  2290. freq_bands = torch.stack(
  2291. [torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0
  2292. )
  2293. # Get frequency bands for each spatial dimension.
  2294. # Output is size [n, d * num_bands]
  2295. per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :]
  2296. per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
  2297. if sine_only:
  2298. # Output is size [n, d * num_bands]
  2299. per_pos_features = torch.sin(np.pi * (per_pos_features))
  2300. else:
  2301. # Output is size [n, 2 * d * num_bands]
  2302. per_pos_features = torch.cat(
  2303. [torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1
  2304. )
  2305. # Concatenate the raw input positions.
  2306. if concat_pos:
  2307. # Adds d bands to the encoding.
  2308. per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1)
  2309. return per_pos_features
  2310. def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
  2311. """
  2312. Generate an array of position indices for an N-D input array.
  2313. Args:
  2314. index_dims (`List[int]`):
  2315. The shape of the index dimensions of the input array.
  2316. output_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):
  2317. The min and max values taken by each input index dimension.
  2318. Returns:
  2319. `torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`.
  2320. """
  2321. def _linspace(n_xels_per_dim):
  2322. return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)
  2323. dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
  2324. array_index_grid = meshgrid(*dim_ranges, indexing="ij")
  2325. return torch.stack(array_index_grid, dim=-1)
  2326. class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta):
  2327. """Perceiver abstract position encoding."""
  2328. @property
  2329. @abc.abstractmethod
  2330. def num_dimensions(self) -> int:
  2331. raise NotImplementedError
  2332. @abc.abstractmethod
  2333. def output_size(self, *args, **kwargs) -> int:
  2334. raise NotImplementedError
  2335. @abc.abstractmethod
  2336. def forward(self, batch_size, pos):
  2337. raise NotImplementedError
  2338. class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding):
  2339. """Trainable position encoding."""
  2340. def __init__(self, index_dims, num_channels=128):
  2341. super().__init__()
  2342. self._num_channels = num_channels
  2343. self._index_dims = index_dims
  2344. index_dim = np.prod(index_dims)
  2345. self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels))
  2346. @property
  2347. def num_dimensions(self) -> int:
  2348. if isinstance(self._index_dims, int):
  2349. return 1
  2350. return len(self._index_dims)
  2351. def output_size(self, *args, **kwargs) -> int:
  2352. return self._num_channels
  2353. def interpolate_pos_encoding(self, position_embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
  2354. num_positions = position_embeddings.shape[0]
  2355. new_height = new_width = torch_int(num_positions**0.5)
  2356. # always interpolate when tracing to ensure the exported model works for dynamic input shapes
  2357. if not torch.jit.is_tracing() and height == new_height and width == new_width:
  2358. return position_embeddings
  2359. position_embeddings = position_embeddings.reshape(1, new_height, new_width, self._num_channels).permute(
  2360. 0, 3, 1, 2
  2361. )
  2362. position_embeddings = nn.functional.interpolate(
  2363. position_embeddings,
  2364. size=(new_height, new_width),
  2365. mode="bicubic",
  2366. align_corners=False,
  2367. )
  2368. position_embeddings = position_embeddings.reshape(1, self._num_channels, -1).permute(0, 2, 1).squeeze(0)
  2369. return position_embeddings
  2370. def forward(
  2371. self, batch_size: int, interpolate_pos_encoding: bool = False, input_size: torch.Size = None
  2372. ) -> torch.Tensor:
  2373. position_embeddings = self.position_embeddings
  2374. if interpolate_pos_encoding:
  2375. height, width = input_size
  2376. position_embeddings = self.interpolate_pos_encoding(position_embeddings, height, width)
  2377. if batch_size is not None:
  2378. position_embeddings = position_embeddings.expand(batch_size, -1, -1)
  2379. return position_embeddings
  2380. def _check_or_build_spatial_positions(pos, index_dims, batch_size):
  2381. """
  2382. Checks or builds spatial position features (x, y, ...).
  2383. Args:
  2384. pos (`torch.FloatTensor`):
  2385. None, or an array of position features. If None, position features are built. Otherwise, their size is checked.
  2386. index_dims (`List[int]`):
  2387. An iterable giving the spatial/index size of the data to be featurized.
  2388. batch_size (`int`):
  2389. The batch size of the data to be featurized.
  2390. Returns:
  2391. `torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features.
  2392. """
  2393. if pos is None:
  2394. pos = build_linear_positions(index_dims)
  2395. # equivalent to `torch.broadcast_to(pos[None], (batch_size,) + pos.shape)`
  2396. # but `torch.broadcast_to` cannot be converted to ONNX
  2397. pos = pos[None].expand((batch_size,) + pos.shape)
  2398. pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1])
  2399. else:
  2400. # Just a warning label: you probably don't want your spatial features to
  2401. # have a different spatial layout than your pos coordinate system.
  2402. # But feel free to override if you think it'll work!
  2403. if pos.shape[-1] != len(index_dims):
  2404. raise ValueError("Spatial features have the wrong number of dimensions.")
  2405. return pos
  2406. class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
  2407. """Fourier (Sinusoidal) position encoding."""
  2408. def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
  2409. super().__init__()
  2410. self.num_bands = num_bands
  2411. self.max_resolution = max_resolution
  2412. self.concat_pos = concat_pos
  2413. self.sine_only = sine_only
  2414. @property
  2415. def num_dimensions(self) -> int:
  2416. return len(self.max_resolution)
  2417. def output_size(self):
  2418. """Returns size of positional encodings last dimension."""
  2419. num_dims = len(self.max_resolution)
  2420. encoding_size = self.num_bands * num_dims
  2421. if not self.sine_only:
  2422. encoding_size *= 2
  2423. if self.concat_pos:
  2424. encoding_size += self.num_dimensions
  2425. return encoding_size
  2426. def forward(
  2427. self,
  2428. index_dims: List[int],
  2429. batch_size: int,
  2430. device: torch.device,
  2431. dtype: torch.dtype,
  2432. pos: torch.FloatTensor = None,
  2433. ) -> torch.FloatTensor:
  2434. pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
  2435. fourier_pos_enc = generate_fourier_features(
  2436. pos,
  2437. num_bands=self.num_bands,
  2438. max_resolution=self.max_resolution,
  2439. concat_pos=self.concat_pos,
  2440. sine_only=self.sine_only,
  2441. ).to(device=device, dtype=dtype)
  2442. return fourier_pos_enc
  2443. class AbstractPreprocessor(nn.Module):
  2444. @property
  2445. def num_channels(self) -> int:
  2446. """Returns size of preprocessor output."""
  2447. raise NotImplementedError()
  2448. class PerceiverTextPreprocessor(AbstractPreprocessor):
  2449. """
  2450. Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
  2451. The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
  2452. Args:
  2453. config ([`PerceiverConfig`]):
  2454. Model configuration.
  2455. """
  2456. def __init__(self, config: PerceiverConfig) -> None:
  2457. super().__init__()
  2458. self.config = config
  2459. self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)
  2460. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
  2461. @property
  2462. def num_channels(self) -> int:
  2463. return self.config.d_model
  2464. def forward(
  2465. self,
  2466. inputs: torch.LongTensor,
  2467. pos: Optional[torch.Tensor] = None,
  2468. network_input_is_1d: bool = True,
  2469. interpolate_pos_encoding: bool = False,
  2470. ):
  2471. embeddings_without_pos = self.embeddings(inputs)
  2472. seq_length = inputs.shape[1]
  2473. position_ids = torch.arange(0, seq_length, device=inputs.device)
  2474. embeddings = embeddings_without_pos + self.position_embeddings(position_ids)
  2475. return embeddings, None, embeddings_without_pos
  2476. class PerceiverEmbeddingDecoder(nn.Module):
  2477. """
  2478. Module to decode embeddings (for masked language modeling).
  2479. Args:
  2480. config ([`PerceiverConfig`]):
  2481. Model configuration.
  2482. """
  2483. def __init__(self, config: PerceiverConfig) -> None:
  2484. super().__init__()
  2485. self.config = config
  2486. self.vocab_size = config.vocab_size
  2487. self.bias = nn.Parameter(torch.zeros(self.vocab_size))
  2488. def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:
  2489. batch_size, seq_len, d_model = hidden_states.shape
  2490. # Flatten batch dim
  2491. output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1))
  2492. output = output + self.bias
  2493. return output.reshape([batch_size, seq_len, self.vocab_size])
  2494. class PerceiverMultimodalPostprocessor(nn.Module):
  2495. """
  2496. Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single
  2497. postprocessor.
  2498. Args:
  2499. modalities (`Mapping[str, PostprocessorType]`):
  2500. Dictionary mapping modality name to postprocessor class for that modality.
  2501. input_is_dict (`bool`, *optional*, defaults to `False`):
  2502. If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If
  2503. False, input is a tensor which is sliced up during postprocessing by *modality_sizes*.
  2504. """
  2505. def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False):
  2506. super().__init__()
  2507. self.modalities = nn.ModuleDict(modalities)
  2508. self.input_is_dict = input_is_dict
  2509. def forward(
  2510. self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None
  2511. ) -> Mapping[str, torch.Tensor]:
  2512. if not self.input_is_dict:
  2513. # Slice up modalities by their sizes.
  2514. if modality_sizes is None:
  2515. raise ValueError("Modality sizes should be specified if input is not a dictionary.")
  2516. inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
  2517. outputs = {
  2518. modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None)
  2519. for modality, postprocessor in self.modalities.items()
  2520. }
  2521. return outputs
  2522. class PerceiverClassificationPostprocessor(nn.Module):
  2523. """
  2524. Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.
  2525. Args:
  2526. config ([*PerceiverConfig*]):
  2527. Model configuration.
  2528. in_channels (`int`):
  2529. Number of channels in the input.
  2530. """
  2531. def __init__(self, config: PerceiverConfig, in_channels: int) -> None:
  2532. super().__init__()
  2533. self.classifier = nn.Linear(in_channels, config.num_labels)
  2534. def forward(self, inputs, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
  2535. logits = self.classifier(inputs)
  2536. return logits[:, 0, :]
  2537. class PerceiverAudioPostprocessor(nn.Module):
  2538. """
  2539. Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
  2540. Args:
  2541. config ([*PerceiverConfig*]):
  2542. Model configuration.
  2543. in_channels (`int`):
  2544. Number of channels in the input.
  2545. postproc_type (`str`, *optional*, defaults to `"patches"`):
  2546. Postprocessor type to use. Currently, only "patches" is supported.
  2547. """
  2548. def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None:
  2549. super().__init__()
  2550. if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels'
  2551. raise ValueError("Invalid postproc_type!")
  2552. # Architecture parameters:
  2553. self.classifier = nn.Linear(in_channels, config.samples_per_patch)
  2554. def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
  2555. logits = self.classifier(inputs)
  2556. return torch.reshape(logits, [inputs.shape[0], -1])
  2557. class PerceiverProjectionPostprocessor(nn.Module):
  2558. """
  2559. Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower
  2560. dimension.
  2561. Args:
  2562. in_channels (`int`):
  2563. Number of channels in the input.
  2564. out_channels (`int`):
  2565. Number of channels in the output.
  2566. """
  2567. def __init__(self, in_channels: int, out_channels: int) -> None:
  2568. super().__init__()
  2569. self.classifier = nn.Linear(in_channels, out_channels)
  2570. def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
  2571. logits = self.classifier(inputs)
  2572. return logits
  2573. class PerceiverImagePreprocessor(AbstractPreprocessor):
  2574. """
  2575. Image preprocessing for Perceiver Encoder.
  2576. Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to
  2577. "conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the
  2578. position encoding kwargs are set equal to the *out_channels*.
  2579. Args:
  2580. config ([*PerceiverConfig*]):
  2581. Model configuration.
  2582. prep_type (`str`, *optional*, defaults to `"conv"`):
  2583. Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels".
  2584. spatial_downsample (`int`, *optional*, defaults to 4):
  2585. Spatial downsampling factor.
  2586. temporal_downsample (`int`, *optional*, defaults to 1):
  2587. Temporal downsampling factor (only relevant in case a time dimension is present).
  2588. position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
  2589. Position encoding type. Can be "fourier" or "trainable".
  2590. in_channels (`int`, *optional*, defaults to 3):
  2591. Number of channels in the input.
  2592. out_channels (`int`, *optional*, defaults to 64):
  2593. Number of channels in the output.
  2594. conv_after_patching (`bool`, *optional*, defaults to `False`):
  2595. Whether to apply a convolutional layer after patching.
  2596. conv_after_patching_in_channels (`int`, *optional*, defaults to 54):
  2597. Number of channels in the input of the convolutional layer after patching.
  2598. conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`):
  2599. Whether to use batch normalization in the convolutional layer.
  2600. concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
  2601. How to concatenate the position encoding to the input. Can be "concat" or "add".
  2602. project_pos_dim (`int`, *optional*, defaults to -1):
  2603. Dimension of the position encoding to project to. If -1, no projection is applied.
  2604. **position_encoding_kwargs (`Dict`, *optional*):
  2605. Keyword arguments for the position encoding.
  2606. """
  2607. def __init__(
  2608. self,
  2609. config,
  2610. prep_type="conv",
  2611. spatial_downsample: int = 4,
  2612. temporal_downsample: int = 1,
  2613. position_encoding_type: str = "fourier",
  2614. in_channels: int = 3,
  2615. out_channels: int = 64,
  2616. conv_after_patching: bool = False,
  2617. conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True
  2618. conv2d_use_batchnorm: bool = True,
  2619. concat_or_add_pos: str = "concat",
  2620. project_pos_dim: int = -1,
  2621. **position_encoding_kwargs,
  2622. ):
  2623. super().__init__()
  2624. self.config = config
  2625. if prep_type not in ("conv", "patches", "pixels", "conv1x1"):
  2626. raise ValueError(f"Prep_type {prep_type} is invalid")
  2627. if concat_or_add_pos not in ["concat", "add"]:
  2628. raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.")
  2629. self.in_channels = in_channels
  2630. self.prep_type = prep_type
  2631. self.spatial_downsample = spatial_downsample
  2632. self.temporal_downsample = temporal_downsample
  2633. self.position_encoding_type = position_encoding_type
  2634. self.concat_or_add_pos = concat_or_add_pos
  2635. self.conv_after_patching = conv_after_patching
  2636. self.out_channels = out_channels
  2637. if self.prep_type == "conv":
  2638. # Downsampling with conv is currently restricted
  2639. convnet_num_layers = math.log(spatial_downsample, 4)
  2640. convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers)
  2641. if not convnet_num_layers_is_int or temporal_downsample != 1:
  2642. raise ValueError(
  2643. "Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv."
  2644. )
  2645. self.convnet = Conv2DDownsample(
  2646. in_channels=in_channels,
  2647. num_layers=int(convnet_num_layers),
  2648. out_channels=out_channels,
  2649. use_batchnorm=conv2d_use_batchnorm,
  2650. )
  2651. elif self.prep_type == "conv1x1":
  2652. if temporal_downsample != 1:
  2653. raise ValueError("Conv1x1 does not downsample in time.")
  2654. self.convnet_1x1 = nn.Conv2d(
  2655. in_channels=in_channels,
  2656. out_channels=out_channels,
  2657. kernel_size=(1, 1),
  2658. # spatial_downsample is unconstrained for 1x1 convolutions.
  2659. stride=(spatial_downsample, spatial_downsample),
  2660. )
  2661. # Position embeddings
  2662. self.project_pos_dim = project_pos_dim
  2663. self.position_embeddings, self.positions_projection = build_position_encoding(
  2664. position_encoding_type=position_encoding_type,
  2665. out_channels=out_channels,
  2666. project_pos_dim=project_pos_dim,
  2667. **position_encoding_kwargs,
  2668. )
  2669. # Optional convolutional layer after patches.
  2670. self.conv_after_patches = (
  2671. nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity()
  2672. )
  2673. @property
  2674. def num_channels(self) -> int:
  2675. # Let's assume that the number of resolutions (in the context of image preprocessing)
  2676. # of the input data is 2 or 3 depending on whether we are processing image or video respectively.
  2677. # In this case, for convenience, we will declare is_temporal variable,
  2678. # which will show whether the data has a temporal dimension or not.
  2679. is_temporal = self.position_embeddings.num_dimensions > 2
  2680. # position embedding
  2681. if self.project_pos_dim > 0:
  2682. pos_dim = self.project_pos_dim
  2683. else:
  2684. pos_dim = self.position_embeddings.output_size()
  2685. if self.concat_or_add_pos == "add":
  2686. return pos_dim
  2687. # inputs
  2688. if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"):
  2689. inp_dim = self.out_channels
  2690. elif self.prep_type == "pixels":
  2691. inp_dim = self.in_channels
  2692. if not is_temporal:
  2693. inp_dim = math.ceil(inp_dim / self.spatial_downsample)
  2694. elif self.prep_type == "patches":
  2695. if self.conv_after_patching:
  2696. inp_dim = self.out_channels
  2697. else:
  2698. inp_dim = self.in_channels * self.spatial_downsample**2
  2699. if is_temporal:
  2700. inp_dim *= self.temporal_downsample
  2701. return inp_dim + pos_dim
  2702. def _build_network_inputs(
  2703. self, inputs: torch.Tensor, network_input_is_1d: bool = True, interpolate_pos_encoding: bool = False
  2704. ):
  2705. """
  2706. Construct the final input, including position encoding.
  2707. This method expects the inputs to always have channels as last dimension.
  2708. """
  2709. batch_size = inputs.shape[0]
  2710. input_size = inputs.shape[1:3]
  2711. index_dims = inputs.shape[1:-1]
  2712. indices = np.prod(index_dims)
  2713. # Flatten input features to a 1D index dimension if necessary.
  2714. if len(inputs.shape) > 3 and network_input_is_1d:
  2715. inputs = torch.reshape(inputs, [batch_size, indices, -1])
  2716. # Construct the position encoding.
  2717. if self.position_encoding_type == "trainable":
  2718. pos_enc = self.position_embeddings(batch_size, interpolate_pos_encoding, input_size)
  2719. elif self.position_encoding_type == "fourier":
  2720. pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
  2721. # Optionally project them to a target dimension.
  2722. pos_enc = self.positions_projection(pos_enc)
  2723. if not network_input_is_1d:
  2724. # Reshape pos to match the input feature shape
  2725. # if the network takes non-1D inputs
  2726. sh = inputs.shape
  2727. pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1])
  2728. if self.concat_or_add_pos == "concat":
  2729. inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
  2730. elif self.concat_or_add_pos == "add":
  2731. inputs_with_pos = inputs + pos_enc
  2732. return inputs_with_pos, inputs
  2733. def forward(
  2734. self,
  2735. inputs: torch.Tensor,
  2736. pos: Optional[torch.Tensor] = None,
  2737. network_input_is_1d: bool = True,
  2738. interpolate_pos_encoding: bool = False,
  2739. ):
  2740. if self.prep_type == "conv":
  2741. # Convnet image featurization.
  2742. # Downsamples spatially by a factor of 4
  2743. inputs = self.convnet(inputs)
  2744. elif self.prep_type == "conv1x1":
  2745. # map inputs to self.out_channels
  2746. inputs = self.convnet_1x1(inputs)
  2747. elif self.prep_type == "pixels":
  2748. # if requested, downsamples in the crudest way
  2749. if inputs.ndim == 4:
  2750. inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample]
  2751. elif inputs.ndim == 5:
  2752. inputs = inputs[
  2753. :, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample
  2754. ]
  2755. else:
  2756. raise ValueError("Unsupported data format for pixels.")
  2757. elif self.prep_type == "patches":
  2758. # Space2depth featurization.
  2759. # Video: B x T x C x H x W
  2760. inputs = space_to_depth(
  2761. inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample
  2762. )
  2763. if inputs.ndim == 5 and inputs.shape[1] == 1:
  2764. # for flow
  2765. inputs = inputs.squeeze(dim=1)
  2766. # Optionally apply conv layer.
  2767. inputs = self.conv_after_patches(inputs)
  2768. if self.prep_type != "patches":
  2769. # move channels to last dimension, as the _build_network_inputs method below expects this
  2770. if inputs.ndim == 4:
  2771. inputs = inputs.permute(0, 2, 3, 1)
  2772. elif inputs.ndim == 5:
  2773. inputs = inputs.permute(0, 1, 3, 4, 2)
  2774. else:
  2775. raise ValueError("Unsupported data format for conv1x1.")
  2776. inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d, interpolate_pos_encoding)
  2777. modality_sizes = None # Size for each modality, only needed for multimodal
  2778. return inputs, modality_sizes, inputs_without_pos
  2779. class PerceiverOneHotPreprocessor(AbstractPreprocessor):
  2780. """
  2781. One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.
  2782. Args:
  2783. config ([`PerceiverConfig`]):
  2784. Model configuration.
  2785. """
  2786. def __init__(self, config: PerceiverConfig) -> None:
  2787. super().__init__()
  2788. self.config: PerceiverConfig = config
  2789. @property
  2790. def num_channels(self) -> int:
  2791. return self.config.num_labels
  2792. def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
  2793. # Add a dummy index dimension.
  2794. inputs = inputs[:, None, :]
  2795. # No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
  2796. # outputs are identical.
  2797. return inputs, None, inputs
  2798. class PerceiverAudioPreprocessor(AbstractPreprocessor):
  2799. """
  2800. Audio preprocessing for Perceiver Encoder.
  2801. Args:
  2802. config ([*PerceiverConfig*]):
  2803. Model configuration.
  2804. prep_type (`str`, *optional*, defaults to `"patches"`):
  2805. Preprocessor type to use. Only "patches" is supported.
  2806. samples_per_patch (`int`, *optional*, defaults to 96):
  2807. Number of samples per patch.
  2808. position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
  2809. Type of position encoding to use. Can be "trainable" or "fourier".
  2810. concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
  2811. How to concatenate the position encoding to the input. Can be "concat" or "add".
  2812. out_channels (`int`, *optional*, defaults to 64):
  2813. Number of channels in the output.
  2814. project_pos_dim (`int`, *optional*, defaults to -1):
  2815. Dimension of the position encoding to project to. If -1, no projection is applied.
  2816. **position_encoding_kwargs (`Dict`, *optional*):
  2817. Keyword arguments for the position encoding.
  2818. """
  2819. def __init__(
  2820. self,
  2821. config,
  2822. prep_type: str = "patches",
  2823. samples_per_patch: int = 96,
  2824. position_encoding_type: str = "fourier",
  2825. concat_or_add_pos: str = "concat",
  2826. out_channels=64,
  2827. project_pos_dim=-1,
  2828. **position_encoding_kwargs,
  2829. ):
  2830. super().__init__()
  2831. self.config = config
  2832. if prep_type not in ("patches",):
  2833. raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
  2834. if concat_or_add_pos not in ["concat", "add"]:
  2835. raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
  2836. self.samples_per_patch = samples_per_patch
  2837. self.position_encoding_type = position_encoding_type
  2838. self.concat_or_add_pos = concat_or_add_pos
  2839. self.project_pos_dim = project_pos_dim
  2840. # Position embeddings
  2841. self.position_embeddings, self.positions_projection = build_position_encoding(
  2842. position_encoding_type=position_encoding_type,
  2843. out_channels=out_channels,
  2844. project_pos_dim=project_pos_dim,
  2845. **position_encoding_kwargs,
  2846. )
  2847. @property
  2848. def num_channels(self) -> int:
  2849. # position embedding
  2850. if self.project_pos_dim > 0:
  2851. pos_dim = self.project_pos_dim
  2852. else:
  2853. pos_dim = self.position_embeddings.output_size()
  2854. if self.concat_or_add_pos == "add":
  2855. return pos_dim
  2856. return self.samples_per_patch + pos_dim
  2857. def _build_network_inputs(self, inputs):
  2858. """Construct the final input, including position encoding."""
  2859. batch_size = inputs.shape[0]
  2860. index_dims = inputs.shape[1:-1]
  2861. # Construct the position encoding.
  2862. if self.position_encoding_type == "trainable":
  2863. pos_enc = self.position_embeddings(batch_size)
  2864. elif self.position_encoding_type == "fourier":
  2865. pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
  2866. # Optionally project them to a target dimension.
  2867. pos_enc = self.positions_projection(pos_enc)
  2868. if self.concat_or_add_pos == "concat":
  2869. inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
  2870. elif self.concat_or_add_pos == "add":
  2871. inputs_with_pos = inputs + pos_enc
  2872. return inputs_with_pos, inputs
  2873. def forward(
  2874. self,
  2875. inputs: torch.Tensor,
  2876. pos: Optional[torch.Tensor] = None,
  2877. network_input_is_1d: bool = True,
  2878. interpolate_pos_encoding: bool = False,
  2879. ):
  2880. inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
  2881. inputs, inputs_without_pos = self._build_network_inputs(inputs)
  2882. modality_sizes = None # Size for each modality, only needed for multimodal
  2883. return inputs, modality_sizes, inputs_without_pos
  2884. class PerceiverMultimodalPreprocessor(AbstractPreprocessor):
  2885. """
  2886. Multimodal preprocessing for Perceiver Encoder.
  2887. Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number
  2888. of channels.
  2889. Args:
  2890. modalities (`Mapping[str, PreprocessorType]`):
  2891. Dict mapping modality name to preprocessor.
  2892. mask_probs (`Dict[str, float]`):
  2893. Dict mapping modality name to masking probability of that modality.
  2894. min_padding_size (`int`, *optional*, defaults to 2):
  2895. The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
  2896. channels across all modalities plus min_padding_size.
  2897. """
  2898. def __init__(
  2899. self,
  2900. modalities: Mapping[str, PreprocessorType],
  2901. mask_probs: Optional[Mapping[str, float]] = None,
  2902. min_padding_size: int = 2,
  2903. ):
  2904. super().__init__()
  2905. self.modalities = nn.ModuleDict(modalities)
  2906. self.min_padding_size = min_padding_size
  2907. self.mask_probs = mask_probs if mask_probs is not None else {}
  2908. self.padding = nn.ParameterDict(
  2909. {
  2910. modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels))
  2911. for modality, preprocessor in modalities.items()
  2912. }
  2913. )
  2914. self.mask = nn.ParameterDict(
  2915. {modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()}
  2916. )
  2917. @property
  2918. def num_channels(self) -> int:
  2919. max_channel_size = max(processor.num_channels for _, processor in self.modalities.items())
  2920. common_channel_size = max_channel_size + self.min_padding_size
  2921. return common_channel_size
  2922. def forward(
  2923. self,
  2924. inputs: Mapping[str, torch.Tensor],
  2925. pos: Optional[torch.Tensor] = None,
  2926. network_input_is_1d: bool = True,
  2927. interpolate_pos_encoding: bool = False,
  2928. ) -> PreprocessorOutputType:
  2929. padded = {}
  2930. modality_sizes = {}
  2931. inputs_without_pos = {}
  2932. for modality, preprocessor in self.modalities.items():
  2933. # preprocess each modality using the respective preprocessor.
  2934. output, _, inputs_without_pos[modality] = preprocessor(
  2935. inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d
  2936. )
  2937. # pad to the same common_channel_size.
  2938. batch_size, num_samples, num_channels = output.shape
  2939. pos_enc = self.padding[modality].expand(batch_size, -1, -1)
  2940. padding = torch.broadcast_to(
  2941. pos_enc,
  2942. [batch_size, num_samples, self.num_channels - num_channels],
  2943. )
  2944. output_padded = torch.cat([output, padding], dim=2)
  2945. # mask if required
  2946. if modality in self.mask_probs:
  2947. mask_token = self.mask[modality].expand(batch_size, -1, -1)
  2948. mask_prob = self.mask_probs[modality]
  2949. mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))
  2950. mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)
  2951. output_padded = (1 - mask) * output_padded + mask * mask_token
  2952. padded[modality] = output_padded
  2953. modality_sizes[modality] = output_padded.shape[1]
  2954. # Apply a predictable ordering to the modalities
  2955. padded_ls = [padded[k] for k in sorted(padded.keys())]
  2956. # Finally, concatenate along the time dimension
  2957. final_inputs = torch.cat(padded_ls, dim=1)
  2958. return final_inputs, modality_sizes, inputs_without_pos