Dense.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. from __future__ import annotations
  2. import json
  3. import os
  4. import torch
  5. from safetensors.torch import load_model as load_safetensors_model
  6. from safetensors.torch import save_model as save_safetensors_model
  7. from torch import Tensor, nn
  8. from sentence_transformers.util import fullname, import_from_string
  9. class Dense(nn.Module):
  10. """
  11. Feed-forward function with activation function.
  12. This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
  13. Args:
  14. in_features: Size of the input dimension
  15. out_features: Output size
  16. bias: Add a bias vector
  17. activation_function: Pytorch activation function applied on
  18. output
  19. init_weight: Initial value for the matrix of the linear layer
  20. init_bias: Initial value for the bias of the linear layer
  21. """
  22. def __init__(
  23. self,
  24. in_features: int,
  25. out_features: int,
  26. bias: bool = True,
  27. activation_function=nn.Tanh(),
  28. init_weight: Tensor = None,
  29. init_bias: Tensor = None,
  30. ):
  31. super().__init__()
  32. self.in_features = in_features
  33. self.out_features = out_features
  34. self.bias = bias
  35. self.activation_function = activation_function
  36. self.linear = nn.Linear(in_features, out_features, bias=bias)
  37. if init_weight is not None:
  38. self.linear.weight = nn.Parameter(init_weight)
  39. if init_bias is not None:
  40. self.linear.bias = nn.Parameter(init_bias)
  41. def forward(self, features: dict[str, Tensor]):
  42. features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
  43. return features
  44. def get_sentence_embedding_dimension(self) -> int:
  45. return self.out_features
  46. def get_config_dict(self):
  47. return {
  48. "in_features": self.in_features,
  49. "out_features": self.out_features,
  50. "bias": self.bias,
  51. "activation_function": fullname(self.activation_function),
  52. }
  53. def save(self, output_path, safe_serialization: bool = True) -> None:
  54. with open(os.path.join(output_path, "config.json"), "w") as fOut:
  55. json.dump(self.get_config_dict(), fOut)
  56. if safe_serialization:
  57. save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
  58. else:
  59. torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
  60. def __repr__(self):
  61. return f"Dense({self.get_config_dict()})"
  62. @staticmethod
  63. def load(input_path):
  64. with open(os.path.join(input_path, "config.json")) as fIn:
  65. config = json.load(fIn)
  66. config["activation_function"] = import_from_string(config["activation_function"])()
  67. model = Dense(**config)
  68. if os.path.exists(os.path.join(input_path, "model.safetensors")):
  69. load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
  70. else:
  71. model.load_state_dict(
  72. torch.load(
  73. os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
  74. )
  75. )
  76. return model