__init__.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import TYPE_CHECKING
  15. from ...utils import (
  16. OptionalDependencyNotAvailable,
  17. _LazyModule,
  18. is_flax_available,
  19. is_sentencepiece_available,
  20. is_tokenizers_available,
  21. is_torch_available,
  22. )
  23. _import_structure = {
  24. "configuration_llama": ["LlamaConfig"],
  25. }
  26. try:
  27. if not is_sentencepiece_available():
  28. raise OptionalDependencyNotAvailable()
  29. except OptionalDependencyNotAvailable:
  30. pass
  31. else:
  32. _import_structure["tokenization_llama"] = ["LlamaTokenizer"]
  33. try:
  34. if not is_tokenizers_available():
  35. raise OptionalDependencyNotAvailable()
  36. except OptionalDependencyNotAvailable:
  37. pass
  38. else:
  39. _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"]
  40. try:
  41. if not is_torch_available():
  42. raise OptionalDependencyNotAvailable()
  43. except OptionalDependencyNotAvailable:
  44. pass
  45. else:
  46. _import_structure["modeling_llama"] = [
  47. "LlamaForCausalLM",
  48. "LlamaModel",
  49. "LlamaPreTrainedModel",
  50. "LlamaForSequenceClassification",
  51. "LlamaForQuestionAnswering",
  52. "LlamaForTokenClassification",
  53. ]
  54. try:
  55. if not is_flax_available():
  56. raise OptionalDependencyNotAvailable()
  57. except OptionalDependencyNotAvailable:
  58. pass
  59. else:
  60. _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]
  61. if TYPE_CHECKING:
  62. from .configuration_llama import LlamaConfig
  63. try:
  64. if not is_sentencepiece_available():
  65. raise OptionalDependencyNotAvailable()
  66. except OptionalDependencyNotAvailable:
  67. pass
  68. else:
  69. from .tokenization_llama import LlamaTokenizer
  70. try:
  71. if not is_tokenizers_available():
  72. raise OptionalDependencyNotAvailable()
  73. except OptionalDependencyNotAvailable:
  74. pass
  75. else:
  76. from .tokenization_llama_fast import LlamaTokenizerFast
  77. try:
  78. if not is_torch_available():
  79. raise OptionalDependencyNotAvailable()
  80. except OptionalDependencyNotAvailable:
  81. pass
  82. else:
  83. from .modeling_llama import (
  84. LlamaForCausalLM,
  85. LlamaForQuestionAnswering,
  86. LlamaForSequenceClassification,
  87. LlamaForTokenClassification,
  88. LlamaModel,
  89. LlamaPreTrainedModel,
  90. )
  91. try:
  92. if not is_flax_available():
  93. raise OptionalDependencyNotAvailable()
  94. except OptionalDependencyNotAvailable:
  95. pass
  96. else:
  97. from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel
  98. else:
  99. import sys
  100. sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)