__init__.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import TYPE_CHECKING
  15. from ...utils import (
  16. OptionalDependencyNotAvailable,
  17. _LazyModule,
  18. is_flax_available,
  19. is_sentencepiece_available,
  20. is_tokenizers_available,
  21. is_torch_available,
  22. )
  23. _import_structure = {
  24. "configuration_gemma": ["GemmaConfig"],
  25. }
  26. try:
  27. if not is_sentencepiece_available():
  28. raise OptionalDependencyNotAvailable()
  29. except OptionalDependencyNotAvailable:
  30. pass
  31. else:
  32. _import_structure["tokenization_gemma"] = ["GemmaTokenizer"]
  33. try:
  34. if not is_tokenizers_available():
  35. raise OptionalDependencyNotAvailable()
  36. except OptionalDependencyNotAvailable:
  37. pass
  38. else:
  39. _import_structure["tokenization_gemma_fast"] = ["GemmaTokenizerFast"]
  40. try:
  41. if not is_torch_available():
  42. raise OptionalDependencyNotAvailable()
  43. except OptionalDependencyNotAvailable:
  44. pass
  45. else:
  46. _import_structure["modeling_gemma"] = [
  47. "GemmaForCausalLM",
  48. "GemmaModel",
  49. "GemmaPreTrainedModel",
  50. "GemmaForSequenceClassification",
  51. "GemmaForTokenClassification",
  52. ]
  53. try:
  54. if not is_flax_available():
  55. raise OptionalDependencyNotAvailable()
  56. except OptionalDependencyNotAvailable:
  57. pass
  58. else:
  59. _import_structure["modeling_flax_gemma"] = [
  60. "FlaxGemmaForCausalLM",
  61. "FlaxGemmaModel",
  62. "FlaxGemmaPreTrainedModel",
  63. ]
  64. if TYPE_CHECKING:
  65. from .configuration_gemma import GemmaConfig
  66. try:
  67. if not is_sentencepiece_available():
  68. raise OptionalDependencyNotAvailable()
  69. except OptionalDependencyNotAvailable:
  70. pass
  71. else:
  72. from .tokenization_gemma import GemmaTokenizer
  73. try:
  74. if not is_tokenizers_available():
  75. raise OptionalDependencyNotAvailable()
  76. except OptionalDependencyNotAvailable:
  77. pass
  78. else:
  79. from .tokenization_gemma_fast import GemmaTokenizerFast
  80. try:
  81. if not is_torch_available():
  82. raise OptionalDependencyNotAvailable()
  83. except OptionalDependencyNotAvailable:
  84. pass
  85. else:
  86. from .modeling_gemma import (
  87. GemmaForCausalLM,
  88. GemmaForSequenceClassification,
  89. GemmaForTokenClassification,
  90. GemmaModel,
  91. GemmaPreTrainedModel,
  92. )
  93. try:
  94. if not is_flax_available():
  95. raise OptionalDependencyNotAvailable()
  96. except OptionalDependencyNotAvailable:
  97. pass
  98. else:
  99. from .modeling_flax_gemma import (
  100. FlaxGemmaForCausalLM,
  101. FlaxGemmaModel,
  102. FlaxGemmaPreTrainedModel,
  103. )
  104. else:
  105. import sys
  106. sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)