trainer_callback.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. # coding=utf-8
  2. # Copyright 2020-present the HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Callbacks to use with the Trainer class and customize the training loop.
  17. """
  18. import dataclasses
  19. import json
  20. from dataclasses import dataclass
  21. from typing import Dict, List, Optional, Union
  22. import numpy as np
  23. from tqdm.auto import tqdm
  24. from .trainer_utils import IntervalStrategy, has_length
  25. from .training_args import TrainingArguments
  26. from .utils import logging
  27. logger = logging.get_logger(__name__)
  28. @dataclass
  29. class TrainerState:
  30. """
  31. A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing
  32. and passed to the [`TrainerCallback`].
  33. <Tip>
  34. In all this class, one step is to be understood as one update step. When using gradient accumulation, one update
  35. step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update
  36. step requires going through *n* batches.
  37. </Tip>
  38. Args:
  39. epoch (`float`, *optional*):
  40. Only set during training, will represent the epoch the training is at (the decimal part being the
  41. percentage of the current epoch completed).
  42. global_step (`int`, *optional*, defaults to 0):
  43. During training, represents the number of update steps completed.
  44. max_steps (`int`, *optional*, defaults to 0):
  45. The number of update steps to do during the current training.
  46. logging_steps (`int`, *optional*, defaults to 500):
  47. Log every X updates steps
  48. eval_steps (`int`, *optional*):
  49. Run an evaluation every X steps.
  50. save_steps (`int`, *optional*, defaults to 500):
  51. Save checkpoint every X updates steps.
  52. train_batch_size (`int`, *optional*):
  53. The batch size for the training dataloader. Only needed when
  54. `auto_find_batch_size` has been used.
  55. num_input_tokens_seen (`int`, *optional*, defaults to 0):
  56. The number of tokens seen during training (number of input tokens, not the number of prediction tokens).
  57. total_flos (`float`, *optional*, defaults to 0):
  58. The total number of floating operations done by the model since the beginning of training (stored as floats
  59. to avoid overflow).
  60. log_history (`List[Dict[str, float]]`, *optional*):
  61. The list of logs done since the beginning of training.
  62. best_metric (`float`, *optional*):
  63. When tracking the best model, the value of the best metric encountered so far.
  64. best_model_checkpoint (`str`, *optional*):
  65. When tracking the best model, the value of the name of the checkpoint for the best model encountered so
  66. far.
  67. is_local_process_zero (`bool`, *optional*, defaults to `True`):
  68. Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
  69. several machines) main process.
  70. is_world_process_zero (`bool`, *optional*, defaults to `True`):
  71. Whether or not this process is the global main process (when training in a distributed fashion on several
  72. machines, this is only going to be `True` for one process).
  73. is_hyper_param_search (`bool`, *optional*, defaults to `False`):
  74. Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
  75. impact the way data will be logged in TensorBoard.
  76. stateful_callbacks (`List[StatefulTrainerCallback]`, *optional*):
  77. Callbacks attached to the `Trainer` that should have their states be saved or restored.
  78. Relevent callbacks should implement a `state` and `from_state` function.
  79. """
  80. epoch: Optional[float] = None
  81. global_step: int = 0
  82. max_steps: int = 0
  83. logging_steps: int = 500
  84. eval_steps: int = 500
  85. save_steps: int = 500
  86. train_batch_size: int = None
  87. num_train_epochs: int = 0
  88. num_input_tokens_seen: int = 0
  89. total_flos: float = 0
  90. log_history: List[Dict[str, float]] = None
  91. best_metric: Optional[float] = None
  92. best_model_checkpoint: Optional[str] = None
  93. is_local_process_zero: bool = True
  94. is_world_process_zero: bool = True
  95. is_hyper_param_search: bool = False
  96. trial_name: str = None
  97. trial_params: Dict[str, Union[str, float, int, bool]] = None
  98. stateful_callbacks: List["TrainerCallback"] = None
  99. def __post_init__(self):
  100. if self.log_history is None:
  101. self.log_history = []
  102. if self.stateful_callbacks is None:
  103. self.stateful_callbacks = {}
  104. elif isinstance(self.stateful_callbacks, dict):
  105. # We are loading the callbacks in from the state file, no need to process them
  106. pass
  107. else:
  108. # Saveable callbacks get stored as dict of kwargs
  109. stateful_callbacks = {}
  110. for callback in self.stateful_callbacks:
  111. if not isinstance(callback, (ExportableState)):
  112. raise TypeError(
  113. f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}"
  114. )
  115. name = callback.__class__.__name__
  116. if name in stateful_callbacks:
  117. # We can have multiple versions of the same callback
  118. # if so, we store them as a list of states to restore
  119. if not isinstance(stateful_callbacks[name], list):
  120. stateful_callbacks[name] = [stateful_callbacks[name]]
  121. stateful_callbacks[name].append(callback.state())
  122. else:
  123. stateful_callbacks[name] = callback.state()
  124. self.stateful_callbacks = stateful_callbacks
  125. def save_to_json(self, json_path: str):
  126. """Save the content of this instance in JSON format inside `json_path`."""
  127. json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
  128. with open(json_path, "w", encoding="utf-8") as f:
  129. f.write(json_string)
  130. @classmethod
  131. def load_from_json(cls, json_path: str):
  132. """Create an instance from the content of `json_path`."""
  133. with open(json_path, "r", encoding="utf-8") as f:
  134. text = f.read()
  135. return cls(**json.loads(text))
  136. class ExportableState:
  137. """
  138. A class for objects that include the ability to have its state
  139. be saved during `Trainer._save_checkpoint` and loaded back in during
  140. `Trainer._load_from_checkpoint`.
  141. These must implement a `state` function that gets called during the respective
  142. Trainer function call. It should only include parameters and attributes needed to
  143. recreate the state at a particular time, to avoid utilizing pickle/maintain standard
  144. file IO writing.
  145. Example:
  146. ```python
  147. class EarlyStoppingCallback(TrainerCallback, ExportableState):
  148. def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
  149. self.early_stopping_patience = early_stopping_patience
  150. self.early_stopping_threshold = early_stopping_threshold
  151. # early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
  152. self.early_stopping_patience_counter = 0
  153. def state(self) -> dict:
  154. return {
  155. "args": {
  156. "early_stopping_patience": self.early_stopping_patience,
  157. "early_stopping_threshold": self.early_stopping_threshold,
  158. },
  159. "attributes": {
  160. "early_stopping_patience_counter": self.early_stopping_patience_counter,
  161. }
  162. }
  163. ```"""
  164. def state(self) -> dict:
  165. raise NotImplementedError("You must implement a `state` function to utilize this class.")
  166. @classmethod
  167. def from_state(cls, state):
  168. instance = cls(**state["args"])
  169. for k, v in state["attributes"].items():
  170. setattr(instance, k, v)
  171. return instance
  172. @dataclass
  173. class TrainerControl(ExportableState):
  174. """
  175. A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some
  176. switches in the training loop.
  177. Args:
  178. should_training_stop (`bool`, *optional*, defaults to `False`):
  179. Whether or not the training should be interrupted.
  180. If `True`, this variable will not be set back to `False`. The training will just stop.
  181. should_epoch_stop (`bool`, *optional*, defaults to `False`):
  182. Whether or not the current epoch should be interrupted.
  183. If `True`, this variable will be set back to `False` at the beginning of the next epoch.
  184. should_save (`bool`, *optional*, defaults to `False`):
  185. Whether or not the model should be saved at this step.
  186. If `True`, this variable will be set back to `False` at the beginning of the next step.
  187. should_evaluate (`bool`, *optional*, defaults to `False`):
  188. Whether or not the model should be evaluated at this step.
  189. If `True`, this variable will be set back to `False` at the beginning of the next step.
  190. should_log (`bool`, *optional*, defaults to `False`):
  191. Whether or not the logs should be reported at this step.
  192. If `True`, this variable will be set back to `False` at the beginning of the next step.
  193. """
  194. should_training_stop: bool = False
  195. should_epoch_stop: bool = False
  196. should_save: bool = False
  197. should_evaluate: bool = False
  198. should_log: bool = False
  199. def _new_training(self):
  200. """Internal method that resets the variable for a new training."""
  201. self.should_training_stop = False
  202. def _new_epoch(self):
  203. """Internal method that resets the variable for a new epoch."""
  204. self.should_epoch_stop = False
  205. def _new_step(self):
  206. """Internal method that resets the variable for a new step."""
  207. self.should_save = False
  208. self.should_evaluate = False
  209. self.should_log = False
  210. def state(self) -> dict:
  211. return {
  212. "args": {
  213. "should_training_stop": self.should_training_stop,
  214. "should_epoch_stop": self.should_epoch_stop,
  215. "should_save": self.should_save,
  216. "should_evaluate": self.should_evaluate,
  217. "should_log": self.should_log,
  218. },
  219. "attributes": {},
  220. }
  221. class TrainerCallback:
  222. # no-format
  223. """
  224. A class for objects that will inspect the state of the training loop at some events and take some decisions. At
  225. each of those events the following arguments are available:
  226. Args:
  227. args ([`TrainingArguments`]):
  228. The training arguments used to instantiate the [`Trainer`].
  229. state ([`TrainerState`]):
  230. The current state of the [`Trainer`].
  231. control ([`TrainerControl`]):
  232. The object that is returned to the [`Trainer`] and can be used to make some decisions.
  233. model ([`PreTrainedModel`] or `torch.nn.Module`):
  234. The model being trained.
  235. tokenizer ([`PreTrainedTokenizer`]):
  236. The tokenizer used for encoding the data. This is deprecated in favour of `processing_class`.
  237. processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]):
  238. The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor.
  239. optimizer (`torch.optim.Optimizer`):
  240. The optimizer used for the training steps.
  241. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
  242. The scheduler used for setting the learning rate.
  243. train_dataloader (`torch.utils.data.DataLoader`, *optional*):
  244. The current dataloader used for training.
  245. eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
  246. The current dataloader used for evaluation.
  247. metrics (`Dict[str, float]`):
  248. The metrics computed by the last evaluation phase.
  249. Those are only accessible in the event `on_evaluate`.
  250. logs (`Dict[str, float]`):
  251. The values to log.
  252. Those are only accessible in the event `on_log`.
  253. The `control` object is the only one that can be changed by the callback, in which case the event that changes it
  254. should return the modified version.
  255. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`.
  256. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the
  257. simple [`~transformers.PrinterCallback`].
  258. Example:
  259. ```python
  260. class PrinterCallback(TrainerCallback):
  261. def on_log(self, args, state, control, logs=None, **kwargs):
  262. _ = logs.pop("total_flos", None)
  263. if state.is_local_process_zero:
  264. print(logs)
  265. ```"""
  266. def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  267. """
  268. Event called at the end of the initialization of the [`Trainer`].
  269. """
  270. pass
  271. def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  272. """
  273. Event called at the beginning of training.
  274. """
  275. pass
  276. def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  277. """
  278. Event called at the end of training.
  279. """
  280. pass
  281. def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  282. """
  283. Event called at the beginning of an epoch.
  284. """
  285. pass
  286. def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  287. """
  288. Event called at the end of an epoch.
  289. """
  290. pass
  291. def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  292. """
  293. Event called at the beginning of a training step. If using gradient accumulation, one training step might take
  294. several inputs.
  295. """
  296. pass
  297. def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  298. """
  299. Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients.
  300. """
  301. pass
  302. def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  303. """
  304. Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients.
  305. """
  306. pass
  307. def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  308. """
  309. Event called at the end of an substep during gradient accumulation.
  310. """
  311. pass
  312. def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  313. """
  314. Event called at the end of a training step. If using gradient accumulation, one training step might take
  315. several inputs.
  316. """
  317. pass
  318. def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  319. """
  320. Event called after an evaluation phase.
  321. """
  322. pass
  323. def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs):
  324. """
  325. Event called after a successful prediction.
  326. """
  327. pass
  328. def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  329. """
  330. Event called after a checkpoint save.
  331. """
  332. pass
  333. def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  334. """
  335. Event called after logging the last logs.
  336. """
  337. pass
  338. def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  339. """
  340. Event called after a prediction step.
  341. """
  342. pass
  343. class CallbackHandler(TrainerCallback):
  344. """Internal class that just calls the list of callbacks in order."""
  345. def __init__(self, callbacks, model, processing_class, optimizer, lr_scheduler):
  346. self.callbacks = []
  347. for cb in callbacks:
  348. self.add_callback(cb)
  349. self.model = model
  350. self.processing_class = processing_class
  351. self.optimizer = optimizer
  352. self.lr_scheduler = lr_scheduler
  353. self.train_dataloader = None
  354. self.eval_dataloader = None
  355. if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
  356. logger.warning(
  357. "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
  358. + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
  359. + "callbacks is\n:"
  360. + self.callback_list
  361. )
  362. def add_callback(self, callback):
  363. cb = callback() if isinstance(callback, type) else callback
  364. cb_class = callback if isinstance(callback, type) else callback.__class__
  365. if cb_class in [c.__class__ for c in self.callbacks]:
  366. logger.warning(
  367. f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
  368. + "list of callbacks is\n:"
  369. + self.callback_list
  370. )
  371. self.callbacks.append(cb)
  372. def pop_callback(self, callback):
  373. if isinstance(callback, type):
  374. for cb in self.callbacks:
  375. if isinstance(cb, callback):
  376. self.callbacks.remove(cb)
  377. return cb
  378. else:
  379. for cb in self.callbacks:
  380. if cb == callback:
  381. self.callbacks.remove(cb)
  382. return cb
  383. def remove_callback(self, callback):
  384. if isinstance(callback, type):
  385. for cb in self.callbacks:
  386. if isinstance(cb, callback):
  387. self.callbacks.remove(cb)
  388. return
  389. else:
  390. self.callbacks.remove(callback)
  391. @property
  392. def callback_list(self):
  393. return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
  394. def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  395. return self.call_event("on_init_end", args, state, control)
  396. def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  397. control.should_training_stop = False
  398. return self.call_event("on_train_begin", args, state, control)
  399. def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  400. return self.call_event("on_train_end", args, state, control)
  401. def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  402. control.should_epoch_stop = False
  403. return self.call_event("on_epoch_begin", args, state, control)
  404. def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  405. return self.call_event("on_epoch_end", args, state, control)
  406. def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  407. control.should_log = False
  408. control.should_evaluate = False
  409. control.should_save = False
  410. return self.call_event("on_step_begin", args, state, control)
  411. def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  412. return self.call_event("on_pre_optimizer_step", args, state, control)
  413. def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  414. return self.call_event("on_optimizer_step", args, state, control)
  415. def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  416. return self.call_event("on_substep_end", args, state, control)
  417. def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  418. return self.call_event("on_step_end", args, state, control)
  419. def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
  420. control.should_evaluate = False
  421. return self.call_event("on_evaluate", args, state, control, metrics=metrics)
  422. def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
  423. return self.call_event("on_predict", args, state, control, metrics=metrics)
  424. def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  425. control.should_save = False
  426. return self.call_event("on_save", args, state, control)
  427. def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
  428. control.should_log = False
  429. return self.call_event("on_log", args, state, control, logs=logs)
  430. def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
  431. return self.call_event("on_prediction_step", args, state, control)
  432. def call_event(self, event, args, state, control, **kwargs):
  433. for callback in self.callbacks:
  434. result = getattr(callback, event)(
  435. args,
  436. state,
  437. control,
  438. model=self.model,
  439. processing_class=self.processing_class,
  440. optimizer=self.optimizer,
  441. lr_scheduler=self.lr_scheduler,
  442. train_dataloader=self.train_dataloader,
  443. eval_dataloader=self.eval_dataloader,
  444. **kwargs,
  445. )
  446. # A Callback can skip the return of `control` if it doesn't change it.
  447. if result is not None:
  448. control = result
  449. return control
  450. class DefaultFlowCallback(TrainerCallback):
  451. """
  452. A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints.
  453. """
  454. def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  455. # Log
  456. if state.global_step == 1 and args.logging_first_step:
  457. control.should_log = True
  458. if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0:
  459. control.should_log = True
  460. # Evaluate
  461. if (
  462. args.eval_strategy == IntervalStrategy.STEPS
  463. and state.global_step % state.eval_steps == 0
  464. and args.eval_delay <= state.global_step
  465. ):
  466. control.should_evaluate = True
  467. # Save
  468. if (
  469. args.save_strategy == IntervalStrategy.STEPS
  470. and state.save_steps > 0
  471. and state.global_step % state.save_steps == 0
  472. ):
  473. control.should_save = True
  474. # End training
  475. if state.global_step >= state.max_steps:
  476. control.should_training_stop = True
  477. # Save the model at the end if we have a save strategy
  478. if args.save_strategy != IntervalStrategy.NO:
  479. control.should_save = True
  480. return control
  481. def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
  482. # Log
  483. if args.logging_strategy == IntervalStrategy.EPOCH:
  484. control.should_log = True
  485. # Evaluate
  486. if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch:
  487. control.should_evaluate = True
  488. # Save
  489. if args.save_strategy == IntervalStrategy.EPOCH:
  490. control.should_save = True
  491. return control
  492. class ProgressCallback(TrainerCallback):
  493. """
  494. A [`TrainerCallback`] that displays the progress of training or evaluation.
  495. """
  496. def __init__(self):
  497. self.training_bar = None
  498. self.prediction_bar = None
  499. def on_train_begin(self, args, state, control, **kwargs):
  500. if state.is_world_process_zero:
  501. self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True)
  502. self.current_step = 0
  503. def on_step_end(self, args, state, control, **kwargs):
  504. if state.is_world_process_zero:
  505. self.training_bar.update(state.global_step - self.current_step)
  506. self.current_step = state.global_step
  507. def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
  508. if state.is_world_process_zero and has_length(eval_dataloader):
  509. if self.prediction_bar is None:
  510. self.prediction_bar = tqdm(
  511. total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True
  512. )
  513. self.prediction_bar.update(1)
  514. def on_evaluate(self, args, state, control, **kwargs):
  515. if state.is_world_process_zero:
  516. if self.prediction_bar is not None:
  517. self.prediction_bar.close()
  518. self.prediction_bar = None
  519. def on_predict(self, args, state, control, **kwargs):
  520. if state.is_world_process_zero:
  521. if self.prediction_bar is not None:
  522. self.prediction_bar.close()
  523. self.prediction_bar = None
  524. def on_log(self, args, state, control, logs=None, **kwargs):
  525. if state.is_world_process_zero and self.training_bar is not None:
  526. # make a shallow copy of logs so we can mutate the fields copied
  527. # but avoid doing any value pickling.
  528. shallow_logs = {}
  529. for k, v in logs.items():
  530. shallow_logs[k] = v
  531. _ = shallow_logs.pop("total_flos", None)
  532. # round numbers so that it looks better in console
  533. if "epoch" in shallow_logs:
  534. shallow_logs["epoch"] = round(shallow_logs["epoch"], 2)
  535. self.training_bar.write(str(shallow_logs))
  536. def on_train_end(self, args, state, control, **kwargs):
  537. if state.is_world_process_zero:
  538. self.training_bar.close()
  539. self.training_bar = None
  540. class PrinterCallback(TrainerCallback):
  541. """
  542. A bare [`TrainerCallback`] that just prints the logs.
  543. """
  544. def on_log(self, args, state, control, logs=None, **kwargs):
  545. _ = logs.pop("total_flos", None)
  546. if state.is_local_process_zero:
  547. print(logs)
  548. class EarlyStoppingCallback(TrainerCallback, ExportableState):
  549. """
  550. A [`TrainerCallback`] that handles early stopping.
  551. Args:
  552. early_stopping_patience (`int`):
  553. Use with `metric_for_best_model` to stop training when the specified metric worsens for
  554. `early_stopping_patience` evaluation calls.
  555. early_stopping_threshold(`float`, *optional*):
  556. Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the
  557. specified metric must improve to satisfy early stopping conditions. `
  558. This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric
  559. in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the
  560. early stopping will not occur until the next save step.
  561. """
  562. def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
  563. self.early_stopping_patience = early_stopping_patience
  564. self.early_stopping_threshold = early_stopping_threshold
  565. # early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
  566. self.early_stopping_patience_counter = 0
  567. def check_metric_value(self, args, state, control, metric_value):
  568. # best_metric is set by code for load_best_model
  569. operator = np.greater if args.greater_is_better else np.less
  570. if state.best_metric is None or (
  571. operator(metric_value, state.best_metric)
  572. and abs(metric_value - state.best_metric) > self.early_stopping_threshold
  573. ):
  574. self.early_stopping_patience_counter = 0
  575. else:
  576. self.early_stopping_patience_counter += 1
  577. def on_train_begin(self, args, state, control, **kwargs):
  578. assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
  579. assert (
  580. args.metric_for_best_model is not None
  581. ), "EarlyStoppingCallback requires metric_for_best_model is defined"
  582. assert (
  583. args.eval_strategy != IntervalStrategy.NO
  584. ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
  585. def on_evaluate(self, args, state, control, metrics, **kwargs):
  586. metric_to_check = args.metric_for_best_model
  587. if not metric_to_check.startswith("eval_"):
  588. metric_to_check = f"eval_{metric_to_check}"
  589. metric_value = metrics.get(metric_to_check)
  590. if metric_value is None:
  591. logger.warning(
  592. f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping"
  593. " is disabled"
  594. )
  595. return
  596. self.check_metric_value(args, state, control, metric_value)
  597. if self.early_stopping_patience_counter >= self.early_stopping_patience:
  598. control.should_training_stop = True
  599. def state(self) -> dict:
  600. return {
  601. "args": {
  602. "early_stopping_patience": self.early_stopping_patience,
  603. "early_stopping_threshold": self.early_stopping_threshold,
  604. },
  605. "attributes": {
  606. "early_stopping_patience_counter": self.early_stopping_patience_counter,
  607. },
  608. }