BinaryClassificationEvaluator.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. from __future__ import annotations
  2. import csv
  3. import logging
  4. import os
  5. from contextlib import nullcontext
  6. from typing import TYPE_CHECKING
  7. import numpy as np
  8. from sklearn.metrics import average_precision_score
  9. from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
  10. from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
  11. from sentence_transformers.readers import InputExample
  12. from sentence_transformers.similarity_functions import SimilarityFunction
  13. if TYPE_CHECKING:
  14. from sentence_transformers.SentenceTransformer import SentenceTransformer
  15. logger = logging.getLogger(__name__)
  16. class BinaryClassificationEvaluator(SentenceEvaluator):
  17. """
  18. Evaluate a model based on the similarity of the embeddings by calculating the accuracy of identifying similar and
  19. dissimilar sentences.
  20. The metrics are the cosine similarity, dot score, Euclidean and Manhattan distance
  21. The returned score is the accuracy with a specified metric.
  22. The results are written in a CSV. If a CSV already exists, then values are appended.
  23. The labels need to be 0 for dissimilar pairs and 1 for similar pairs.
  24. Args:
  25. sentences1 (List[str]): The first column of sentences.
  26. sentences2 (List[str]): The second column of sentences.
  27. labels (List[int]): labels[i] is the label for the pair (sentences1[i], sentences2[i]). Must be 0 or 1.
  28. name (str, optional): Name for the output. Defaults to "".
  29. batch_size (int, optional): Batch size used to compute embeddings. Defaults to 32.
  30. show_progress_bar (bool, optional): If true, prints a progress bar. Defaults to False.
  31. write_csv (bool, optional): Write results to a CSV file. Defaults to True.
  32. truncate_dim (Optional[int], optional): The dimension to truncate sentence embeddings to. `None` uses the model's current truncation dimension. Defaults to None.
  33. Example:
  34. ::
  35. from sentence_transformers import SentenceTransformer
  36. from sentence_transformers.evaluation import BinaryClassificationEvaluator
  37. from datasets import load_dataset
  38. # Load a model
  39. model = SentenceTransformer('all-mpnet-base-v2')
  40. # Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
  41. eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
  42. # Initialize the evaluator
  43. binary_acc_evaluator = BinaryClassificationEvaluator(
  44. sentences1=eval_dataset["sentence1"],
  45. sentences2=eval_dataset["sentence2"],
  46. labels=eval_dataset["label"],
  47. name="quora-duplicates-dev",
  48. )
  49. results = binary_acc_evaluator(model)
  50. '''
  51. Binary Accuracy Evaluation of the model on the quora-duplicates-dev dataset:
  52. Accuracy with Cosine-Similarity: 81.60 (Threshold: 0.8352)
  53. F1 with Cosine-Similarity: 75.27 (Threshold: 0.7715)
  54. Precision with Cosine-Similarity: 65.81
  55. Recall with Cosine-Similarity: 87.89
  56. Average Precision with Cosine-Similarity: 76.03
  57. Accuracy with Dot-Product: 81.60 (Threshold: 0.8352)
  58. F1 with Dot-Product: 75.27 (Threshold: 0.7715)
  59. Precision with Dot-Product: 65.81
  60. Recall with Dot-Product: 87.89
  61. Average Precision with Dot-Product: 76.03
  62. Accuracy with Manhattan-Distance: 81.50 (Threshold: 12.0727)
  63. F1 with Manhattan-Distance: 74.97 (Threshold: 15.2269)
  64. Precision with Manhattan-Distance: 63.89
  65. Recall with Manhattan-Distance: 90.68
  66. Average Precision with Manhattan-Distance: 75.66
  67. Accuracy with Euclidean-Distance: 81.60 (Threshold: 0.5741)
  68. F1 with Euclidean-Distance: 75.27 (Threshold: 0.6760)
  69. Precision with Euclidean-Distance: 65.81
  70. Recall with Euclidean-Distance: 87.89
  71. Average Precision with Euclidean-Distance: 76.03
  72. '''
  73. print(binary_acc_evaluator.primary_metric)
  74. # => "quora-duplicates-dev_max_ap"
  75. print(results[binary_acc_evaluator.primary_metric])
  76. # => 0.760277070888393
  77. """
  78. def __init__(
  79. self,
  80. sentences1: list[str],
  81. sentences2: list[str],
  82. labels: list[int],
  83. name: str = "",
  84. batch_size: int = 32,
  85. show_progress_bar: bool = False,
  86. write_csv: bool = True,
  87. truncate_dim: int | None = None,
  88. ):
  89. self.sentences1 = sentences1
  90. self.sentences2 = sentences2
  91. self.labels = labels
  92. self.truncate_dim = truncate_dim
  93. self.primary_metric = "max_ap"
  94. assert len(self.sentences1) == len(self.sentences2)
  95. assert len(self.sentences1) == len(self.labels)
  96. for label in labels:
  97. assert label == 0 or label == 1
  98. self.write_csv = write_csv
  99. self.name = name
  100. self.batch_size = batch_size
  101. if show_progress_bar is None:
  102. show_progress_bar = (
  103. logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
  104. )
  105. self.show_progress_bar = show_progress_bar
  106. self.csv_file = "binary_classification_evaluation" + ("_" + name if name else "") + "_results.csv"
  107. self.csv_headers = ["epoch", "steps"]
  108. metrics = [
  109. "accuracy",
  110. "accuracy_threshold",
  111. "f1",
  112. "precision",
  113. "recall",
  114. "f1_threshold",
  115. "ap",
  116. ]
  117. for v in SimilarityFunction.possible_values():
  118. for m in metrics:
  119. self.csv_headers.append(f"{v}_{m}")
  120. @classmethod
  121. def from_input_examples(cls, examples: list[InputExample], **kwargs):
  122. sentences1 = []
  123. sentences2 = []
  124. scores = []
  125. for example in examples:
  126. sentences1.append(example.texts[0])
  127. sentences2.append(example.texts[1])
  128. scores.append(example.label)
  129. return cls(sentences1, sentences2, scores, **kwargs)
  130. def __call__(
  131. self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
  132. ) -> dict[str, float]:
  133. """
  134. Compute the evaluation metrics for the given model.
  135. Args:
  136. model (SentenceTransformer): The model to evaluate.
  137. output_path (str, optional): Path to save the evaluation results CSV file. Defaults to None.
  138. epoch (int, optional): The epoch number. Defaults to -1.
  139. steps (int, optional): The number of steps. Defaults to -1.
  140. Returns:
  141. Dict[str, float]: A dictionary containing the evaluation metrics.
  142. """
  143. if epoch != -1:
  144. if steps == -1:
  145. out_txt = f" after epoch {epoch}"
  146. else:
  147. out_txt = f" in epoch {epoch} after {steps} steps"
  148. else:
  149. out_txt = ""
  150. if self.truncate_dim is not None:
  151. out_txt += f" (truncated to {self.truncate_dim})"
  152. logger.info(f"Binary Accuracy Evaluation of the model on the {self.name} dataset{out_txt}:")
  153. scores = self.compute_metrices(model)
  154. file_output_data = [epoch, steps]
  155. for header_name in self.csv_headers:
  156. if "_" in header_name:
  157. sim_fct, metric = header_name.split("_", maxsplit=1)
  158. file_output_data.append(scores[sim_fct][metric])
  159. if output_path is not None and self.write_csv:
  160. csv_path = os.path.join(output_path, self.csv_file)
  161. if not os.path.isfile(csv_path):
  162. with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
  163. writer = csv.writer(f)
  164. writer.writerow(self.csv_headers)
  165. writer.writerow(file_output_data)
  166. else:
  167. with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
  168. writer = csv.writer(f)
  169. writer.writerow(file_output_data)
  170. metrics = {
  171. f"{short_name}_{metric}": value
  172. for short_name, values in scores.items()
  173. for metric, value in values.items()
  174. }
  175. metrics.update(
  176. {f"max_{metric}": max(scores[short_name][metric] for short_name in scores) for metric in scores["cosine"]}
  177. )
  178. metrics = self.prefix_name_to_metrics(metrics, self.name)
  179. self.store_metrics_in_model_card_data(model, metrics)
  180. return metrics
  181. def compute_metrices(self, model):
  182. with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
  183. try:
  184. # If the sentences are hashable, then we can use a set to avoid embedding the same sentences multiple
  185. # times
  186. sentences = list(set(self.sentences1 + self.sentences2))
  187. except TypeError:
  188. # Otherwise we just embed everything, e.g. if the sentences are images for evaluating a CLIP model
  189. embeddings = model.encode(
  190. self.sentences1 + self.sentences2,
  191. batch_size=self.batch_size,
  192. show_progress_bar=self.show_progress_bar,
  193. convert_to_numpy=True,
  194. )
  195. embeddings1 = embeddings[: len(self.sentences1)]
  196. embeddings2 = embeddings[len(self.sentences1) :]
  197. else:
  198. embeddings = model.encode(
  199. sentences,
  200. batch_size=self.batch_size,
  201. show_progress_bar=self.show_progress_bar,
  202. convert_to_numpy=True,
  203. )
  204. emb_dict = {sent: emb for sent, emb in zip(sentences, embeddings)}
  205. embeddings1 = [emb_dict[sent] for sent in self.sentences1]
  206. embeddings2 = [emb_dict[sent] for sent in self.sentences2]
  207. cosine_scores = 1 - paired_cosine_distances(embeddings1, embeddings2)
  208. manhattan_distances = paired_manhattan_distances(embeddings1, embeddings2)
  209. euclidean_distances = paired_euclidean_distances(embeddings1, embeddings2)
  210. embeddings1_np = np.asarray(embeddings1)
  211. embeddings2_np = np.asarray(embeddings2)
  212. dot_scores = np.sum(embeddings1_np * embeddings2_np, axis=-1)
  213. labels = np.asarray(self.labels)
  214. output_scores = {}
  215. for short_name, name, scores, reverse in [
  216. [SimilarityFunction.COSINE.value, "Cosine-Similarity", cosine_scores, True],
  217. [SimilarityFunction.DOT_PRODUCT.value, "Dot-Product", dot_scores, True],
  218. [SimilarityFunction.MANHATTAN.value, "Manhattan-Distance", manhattan_distances, False],
  219. [SimilarityFunction.EUCLIDEAN.value, "Euclidean-Distance", euclidean_distances, False],
  220. ]:
  221. acc, acc_threshold = self.find_best_acc_and_threshold(scores, labels, reverse)
  222. f1, precision, recall, f1_threshold = self.find_best_f1_and_threshold(scores, labels, reverse)
  223. ap = average_precision_score(labels, scores * (1 if reverse else -1))
  224. logger.info(f"Accuracy with {name}: {acc * 100:.2f}\t(Threshold: {acc_threshold:.4f})")
  225. logger.info(f"F1 with {name}: {f1 * 100:.2f}\t(Threshold: {f1_threshold:.4f})")
  226. logger.info(f"Precision with {name}: {precision * 100:.2f}")
  227. logger.info(f"Recall with {name}: {recall * 100:.2f}")
  228. logger.info(f"Average Precision with {name}: {ap * 100:.2f}\n")
  229. output_scores[short_name] = {
  230. "accuracy": acc,
  231. "accuracy_threshold": acc_threshold,
  232. "f1": f1,
  233. "f1_threshold": f1_threshold,
  234. "precision": precision,
  235. "recall": recall,
  236. "ap": ap,
  237. }
  238. return output_scores
  239. @staticmethod
  240. def find_best_acc_and_threshold(scores, labels, high_score_more_similar: bool):
  241. assert len(scores) == len(labels)
  242. rows = list(zip(scores, labels))
  243. rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
  244. max_acc = 0
  245. best_threshold = -1
  246. positive_so_far = 0
  247. remaining_negatives = sum(labels == 0)
  248. for i in range(len(rows) - 1):
  249. score, label = rows[i]
  250. if label == 1:
  251. positive_so_far += 1
  252. else:
  253. remaining_negatives -= 1
  254. acc = (positive_so_far + remaining_negatives) / len(labels)
  255. if acc > max_acc:
  256. max_acc = acc
  257. best_threshold = (rows[i][0] + rows[i + 1][0]) / 2
  258. return max_acc, best_threshold
  259. @staticmethod
  260. def find_best_f1_and_threshold(scores, labels, high_score_more_similar: bool):
  261. assert len(scores) == len(labels)
  262. scores = np.asarray(scores)
  263. labels = np.asarray(labels)
  264. rows = list(zip(scores, labels))
  265. rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
  266. best_f1 = best_precision = best_recall = 0
  267. threshold = 0
  268. nextract = 0
  269. ncorrect = 0
  270. total_num_duplicates = sum(labels)
  271. for i in range(len(rows) - 1):
  272. score, label = rows[i]
  273. nextract += 1
  274. if label == 1:
  275. ncorrect += 1
  276. if ncorrect > 0:
  277. precision = ncorrect / nextract
  278. recall = ncorrect / total_num_duplicates
  279. f1 = 2 * precision * recall / (precision + recall)
  280. if f1 > best_f1:
  281. best_f1 = f1
  282. best_precision = precision
  283. best_recall = recall
  284. threshold = (rows[i][0] + rows[i + 1][0]) / 2
  285. return best_f1, best_precision, best_recall, threshold