text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and ( # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# this checks the FSDP state dict when `FULL_STATE_DICT` is used or os.path.isfile(os.path.join(resume_from_checkpoint, f"{FSDP_MODEL_NAME}.bin")) ) # if multiple adapters exist, they get saved in sub directories adapter_subdirs = ( [ folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) and ( os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_WEIGHTS_NAME)) or os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_SAFE_WEIGHTS_NAME)) ) ] if os.path.isdir(resume_from_checkpoint) else [] )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if is_fsdp_ckpt and not self.is_fsdp_enabled: raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP") if not ( any( os.path.isfile(f) for f in [ weights_file, safe_weights_file, weights_index_file, safe_weights_index_file, adapter_weights_file, adapter_safe_weights_file, ] ) or is_fsdp_ckpt or adapter_subdirs ): raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") logger.info(f"Loading model from {resume_from_checkpoint}.")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if os.path.isfile(config_file): config = PretrainedConfig.from_json_file(config_file) checkpoint_version = config.transformers_version if checkpoint_version is not None and checkpoint_version != __version__: logger.warning( f"You are resuming training from a checkpoint trained with {checkpoint_version} of " f"Transformers but your current version is {__version__}. This is not recommended and could " "yield to errors or unwanted behaviors." )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: weights_only_kwarg = {"weights_only": True} # If the model is on the GPU, it still works! if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if hasattr(self.args, "fp16") and self.args.fp16 is True: logger.warning(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
"Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." ) state_dict = torch.load( weights_file, map_location="cpu", **weights_only_kwarg, ) # Required for smp to not auto-translate state_dict from hf to smp (is already smp). state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) # release memory del state_dict elif self.is_fsdp_enabled: load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, resume_from_checkpoint, **_get_fsdp_ckpt_kwargs(), ) else:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(safe_weights_file): state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu") else: state_dict = torch.load( weights_file, map_location="cpu", **weights_only_kwarg, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) # release memory del state_dict self._issue_warnings_after_load(load_result)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Load adapters following PR # 24096 elif _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): if os.path.exists(resume_from_checkpoint): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapters = model.active_adapters if len(active_adapters) > 1: logger.warning("Multiple active adapters detected will only consider the first adapter") active_adapter = active_adapters[0] else: active_adapter = model.active_adapter
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if adapter_subdirs: for subdir_name in adapter_subdirs: peft_id = os.path.join(resume_from_checkpoint, subdir_name) model.load_adapter(peft_id, subdir_name, is_trainable=(subdir_name == active_adapter)) model.set_adapter(active_adapter) else: model.load_adapter(resume_from_checkpoint, active_adapter, is_trainable=True) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") else:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# We load the sharded checkpoint load_result = load_sharded_checkpoint( model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _load_best_model(self): logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, self.state.best_model_checkpoint, load_module_strict=not _is_peft_model(self.model), ) elif self.is_fsdp_enabled: load_result = load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, self.state.best_model_checkpoint, **_get_fsdp_ckpt_kwargs(), ) elif ( os.path.exists(best_model_path) or os.path.exists(best_safe_model_path) or os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path) ): has_been_loaded = True weights_only_kwarg = {"weights_only": True} if is_sagemaker_mp_enabled():
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=self.state.best_model_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False, ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
best_model_path, map_location="cpu", **weights_only_kwarg, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) else: if _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapter = model.active_adapters[0] if len(model.active_adapters) > 1: logger.warning("Detected multiple active adapters, will only consider the first one") else:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
active_adapter = model.active_adapter
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): try: model.load_adapter(self.state.best_model_checkpoint, active_adapter) except RuntimeError as exc: if model.peft_config[active_adapter].is_prompt_learning: # for context: https://github.com/huggingface/peft/issues/2256 msg = ( "When using prompt learning PEFT methods such as " f"{model.peft_config[active_adapter].peft_type.value}, setting " "load_best_model_at_end=True can lead to errors, it is recommended " "to set this to False and to load the model manually from the checkpoint "
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
"directory using PeftModel.from_pretrained(base_model, <path>) after training " "has finished." ) raise RuntimeError(msg) from exc else: raise # Load_adapter has no return value present, modify it when appropriate. from torch.nn.modules.module import _IncompatibleKeys
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
load_result = _IncompatibleKeys([], []) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) has_been_loaded = False else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") has_been_loaded = False else: # We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(best_safe_model_path):
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load( best_model_path, map_location="cpu", **weights_only_kwarg, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# If the model is on the GPU, it still works! # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled() and has_been_loaded: self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_INDEX_NAME)) or os.path.exists( os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME) ): load_result = load_sharded_checkpoint( model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) else: logger.warning(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
f"Could not locate the best model at {best_model_path}, if you are running a distributed training " "on multiple nodes, you should activate `--save_on_each_node`." )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _issue_warnings_after_load(self, load_result): if len(load_result.missing_keys) != 0: if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( self.model._keys_to_ignore_on_save ): self.model.tie_weights() else: logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") if len(load_result.unexpected_keys) != 0: logger.warning( f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." ) def _evaluate(self, trial, ignore_keys_for_eval, skip_scheduler=False): metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Run delayed LR scheduler now that metrics are populated if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) and not skip_scheduler: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: self.lr_scheduler.step(metrics[metric_to_check]) except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', " f"which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. " f"Please ensure that the `compute_metrics` function returns a dictionary that includes '{metric_to_check}' or " f"consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
return metrics
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time): if self.control.should_log and self.state.global_step > self._globalstep_last_logged: if is_torch_xla_available(): xm.mark_step() logs: Dict[str, float] = {} # all_gather + mean() to get average loss over all processes tr_loss_scalar = self._nested_gather(tr_loss).mean().item() # reset tr_loss to zero tr_loss -= tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) if grad_norm is not None: logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm logs["learning_rate"] = self._get_learning_rate() self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.log(logs, start_time) metrics = None if self.control.should_evaluate: metrics = self._evaluate(trial, ignore_keys_for_eval) is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial) if self.args.save_strategy == SaveStrategy.BEST: self.control.should_save = is_new_best_metric if self.control.should_save: self._save_checkpoint(model, trial) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _load_rng_state(self, checkpoint): # Load RNG states from `checkpoint` if checkpoint is None: return
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.world_size > 1: process_index = self.args.process_index rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
with safe_globals(): checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_xla_available(): xm.set_rng_state(checkpoint_rng_state["xla"]) if is_torch_npu_available():
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.npu.random.set_rng_state_all(checkpoint_rng_state["npu"]) else: try: torch.npu.random.set_rng_state(checkpoint_rng_state["npu"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the NPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.mlu.random.set_rng_state_all(checkpoint_rng_state["mlu"]) else: try: torch.mlu.random.set_rng_state(checkpoint_rng_state["mlu"]) except Exception as e: logger.info(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
f"Didn't manage to set back the RNG states of the MLU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.musa.set_rng_state_all(checkpoint_rng_state["musa"]) else: try: torch.musa.set_rng_state(checkpoint_rng_state["musa"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the MUSA because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _determine_best_metric(self, metrics, trial): """ Determine if the model should be saved based on the evaluation metrics. Returns: bool: True if a new best metric was found, else False """ is_new_best_metric = False if self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: metric_value = metrics[metric_to_check] except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
operator = np.greater if self.args.greater_is_better else np.less if self.state.best_metric is None: self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf") if operator(metric_value, self.state.best_metric): run_dir = self._get_output_dir(trial=trial) checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" output_dir = os.path.join(run_dir, checkpoint_folder) self.state.best_metric = metric_value self.state.best_model_checkpoint = output_dir is_new_best_metric = True return is_new_best_metric def _save_checkpoint(self, model, trial): # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we # want to save except FullyShardedDDP. # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Save model checkpoint checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is None and trial is None: self.store_flos() run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) if not self.args.save_only_model: # Save optimizer and scheduler self._save_optimizer_and_scheduler(output_dir) # Save RNG state self._save_rng_state(output_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Save the Trainer state if self.args.should_save: # Update `ExportableState` callbacks and `TrainerControl` state to where we are currently for cb in [ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ]: cb_name = cb.__class__.__name__ cb_state = cb.state() if isinstance(self.state.stateful_callbacks[cb_name], list): self.state.stateful_callbacks[cb_name].append(cb_state) else: self.state.stateful_callbacks[cb_name] = cb_state self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) if self.args.push_to_hub: self._push_from_checkpoint(output_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Maybe delete some older checkpoints. if self.args.should_save: # Solely rely on numerical checkpoint id for rotation. # mtime is not reliable especially on some fuse fs in cloud environments. self._rotate_checkpoints(use_mtime=False, output_dir=run_dir) def _save_rng_state(self, output_dir): # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if is_torch_xla_available(): rng_states["xla"] = xm.get_rng_state() if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["npu"] = torch.npu.random.get_rng_state_all() else: rng_states["npu"] = torch.npu.random.get_rng_state() if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["mlu"] = torch.mlu.random.get_rng_state_all() else: rng_states["mlu"] = torch.mlu.random.get_rng_state() if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["musa"] = torch.musa.get_rng_state_all() else: rng_states["musa"] = torch.musa.get_rng_state()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may # not yet exist. os.makedirs(output_dir, exist_ok=True) if self.args.world_size <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _save_optimizer_and_scheduler(self, output_dir): if is_torch_xla_available(): xm.rendezvous("saving_optimizer_states") if self.is_fsdp_xla_v1_enabled: optm = { "optimizer": self.optimizer.state_dict(), "shard_metadata": self.model.get_shard_metadata(), } xm.save( optm, os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), master_only=False, ) else: xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) elif is_sagemaker_mp_enabled():
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) smp.barrier() if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: smp.save( opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME), partial=True, v3=smp.state.cfg.shard_optimizer_state, ) elif self.is_deepspeed_enabled: # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed # config `stage3_gather_16bit_weights_on_model_save` is True accept_exclude_frozen_parameters = "exclude_frozen_parameters" in set( inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys() ) if accept_exclude_frozen_parameters and _is_peft_model(self.model): self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True) else:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.model_wrapped.save_checkpoint(output_dir) elif self.is_fsdp_enabled: # save fsdp specific ckpt for resuming from ckpt save_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs() ) save_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir ) elif self.args.should_save: # deepspeed.save_checkpoint above saves model/optim/sched torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Save SCHEDULER & SCALER is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( self.lr_scheduler, DeepSpeedSchedulerWrapper ) if ( self.args.should_save and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) and not is_torch_xla_available() ): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): """If optimizer and scheduler states exist, load them.""" if checkpoint is None: return
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.is_deepspeed_enabled: # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings) return
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") if is_sagemaker_mp_enabled() else ( os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) or ( os.path.isdir(checkpoint) and any( OPTIMIZER_NAME_BIN.split(".")[0] in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) ) ) ) checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, f"rank*-of-{self.args.world_size}-{OPTIMIZER_NAME}")) if self.is_fsdp_xla_v1_enabled else checkpoint_file_exists ) if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Load in optimizer and scheduler states if is_torch_xla_available(): # On TPU we have to take some extra precautions to properly load the states on the right device. if self.is_fsdp_xla_v1_enabled: optimizer_state = torch.load( os.path.join( checkpoint, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), map_location="cpu", ) # We only need `optimizer` when resuming from checkpoint optimizer_state = optimizer_state["optimizer"] else: optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") with warnings.catch_warnings(record=True) as caught_warnings: lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
reissue_pt_warnings(caught_warnings)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): # Optimizer checkpoint was saved with smp >= 1.10 def opt_load_hook(mod, opt): opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
else: # Optimizer checkpoint was saved with smp < 1.10 def opt_load_hook(mod, opt): if IS_SAGEMAKER_MP_POST_1_10: opt.load_state_dict( smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) ) else: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.model_wrapped.register_post_step_hook(opt_load_hook) else: # We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models. # In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more # likely to get OOM on CPU (since we load num_gpu times the optimizer state map_location = self.args.device if self.args.world_size > 1 else "cpu" if self.is_fsdp_enabled: load_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, checkpoint, **_get_fsdp_ckpt_kwargs(), ) else: self.optimizer.load_state_dict(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) ) with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _load_callback_state(self): """If callback states exist and were passed in, restore their states if enabled""" if not self.args.restore_callback_states_from_checkpoint: return # Callback states are stored in stateful_callbacks not_found = [] new_callbacks = [] original_callbacks = self.callback_handler.callbacks + [self.control] for stored_callback, data in self.state.stateful_callbacks.items(): if not isinstance(data, list): data = [data] if any(callback.__class__.__name__ == stored_callback for callback in original_callbacks): # We can load/restore from multiple callbacks of the same type. duplicates = [ callback for callback in original_callbacks if callback.__class__.__name__ == stored_callback ] for callback, callback_data in zip(duplicates, data): args = callback_data.get("args", {})
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
attributes = callback_data.get("attributes", {}) new_callback = type(callback)(**args) for attribute, value in attributes.items(): setattr(new_callback, attribute, value) if isinstance(callback, TrainerControl): # Specifically for restoring the `control` state self.control = new_callback else: new_callbacks.append(new_callback) # We remove the existing callback and add it to the list of new callbacks self.callback_handler.remove_callback(type(new_callback)) logger.info("Continuing training from checkpoint, restoring any callbacks that were passed in") else: not_found.append(stored_callback) if len(not_found) > 0: logger.warning(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
f"Checkpoint included callbacks not included in current configuration. Ignoring. ({', '.join(not_found)})" ) for callback in new_callbacks: self.callback_handler.add_callback(callback)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: Union[str, List[str]] = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs, ) -> Union[BestRun, List[BestRun]]: """ Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. <Tip warning={true}>
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom optimizer/scheduler. </Tip>
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Args: hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): A function that defines the hyperparameter search space. Will default to [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` method. Will default to [`~trainer_utils.default_compute_objective`]. n_trials (`int`, *optional*, defaults to 100): The number of trial runs to test. direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`): If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): A function that defines the trial/run name. Will default to None. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments for each backend:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
- `optuna`: parameters from [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run). If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available). If `progress_reporter` is not set in the `kwargs`, [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used. - `sigopt`: the parameter `proxies` from
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
[sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy).
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Returns: [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray backend. """ if backend is None: backend = default_hp_search_backend() backend = HPSearchBackend(backend) backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() backend_obj.ensure_available() self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
best_run = backend_obj.run(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: Dict[str, float], start_time: Optional[float] = None) -> None: """ Log `logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (`Dict[str, float]`): The values to log. start_time (`Optional[float]`): The start of training. """ if self.state.epoch is not None: logs["epoch"] = self.state.epoch if self.args.include_num_input_tokens_seen: logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen if start_time is not None: speed_metrics("train", start_time, num_tokens=self.state.num_input_tokens_seen)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: """ Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. """ if isinstance(data, Mapping): return type(data)({k: self._prepare_input(v) for k, v in data.items()}) elif isinstance(data, (tuple, list)): return type(data)(self._prepare_input(v) for v in data) elif isinstance(data, torch.Tensor): kwargs = {"device": self.args.device} if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): # NLP models inputs are int/uint and those get adjusted to the right dtype of the # embedding. Other models such as wav2vec2's inputs are already float and thus # may need special handling to match the dtypes of the model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) return data.to(**kwargs) return data
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ inputs = self._prepare_input(inputs) if len(inputs) == 0: raise ValueError( "The batch received was empty, your model won't be able to train on it. Double-check that your " f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." ) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs def compute_loss_context_manager(self): """ A helper wrapper to group together context managers. """ return self.autocast_smart_context_manager()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): """ A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired arguments, depending on the situation. """ if self.use_cpu_amp: ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) else: ctx_manager = contextlib.nullcontext() return ctx_manager def training_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], num_items_in_batch=None ) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to train. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. Return: `torch.Tensor`: The tensor with training loss on this batch. """ model.train() if hasattr(self.optimizer, "train") and callable(self.optimizer.train): self.optimizer.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
del inputs if ( self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0 ): if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_musa_available(): torch.musa.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() elif is_torch_mps_available(min_version="2.0"): torch.mps.empty_cache() else: torch.cuda.empty_cache() kwargs = {} # For LOMO optimizers you need to explicitly use the learnign rate if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs["learning_rate"] = self._get_learning_rate()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: # Finally we need to normalize the loss for reporting if not self.model_accepts_loss_kwargs and self.compute_loss_func is None: loss = loss / self.args.gradient_accumulation_steps self.accelerator.backward(loss, **kwargs) return loss.detach() def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None): """ How the loss is computed by Trainer. By default, all models return the loss in the first element.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Subclass and override for custom behavior. """ if (self.label_smoother is not None or self.compute_loss_func is not None) and "labels" in inputs: labels = inputs.pop("labels") else: labels = None if self.model_accepts_loss_kwargs: loss_kwargs = {} if num_items_in_batch is not None: loss_kwargs["num_items_in_batch"] = num_items_in_batch inputs = {**inputs, **loss_kwargs} outputs = model(**inputs) # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index]
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if labels is not None: unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): model_name = unwrapped_model.base_model.model._get_name() else: model_name = unwrapped_model._get_name() # User-defined compute_loss function if self.compute_loss_func is not None: loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch) elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: "
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.average_tokens_across_devices and self.model_accepts_loss_kwargs: loss *= self.accelerator.num_processes return (loss, outputs) if return_outputs else loss def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ return self.args.local_process_index == 0 def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). """ # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global # process index. if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.args.process_index == 0
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): """ Will save the model, so you can reload it using `from_pretrained()`. Will only save from the main process. """ if output_dir is None: output_dir = self.args.output_dir
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if is_torch_xla_available(): self._save_tpu(output_dir) elif is_sagemaker_mp_enabled(): # Calling the state_dict needs to be done on the wrapped model and on all processes. os.makedirs(output_dir, exist_ok=True) state_dict = self.model_wrapped.state_dict() if self.args.should_save: self._save(output_dir, state_dict=state_dict) if IS_SAGEMAKER_MP_POST_1_10: # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 Path(os.path.join(output_dir, "user_content.pt")).touch() elif self.is_fsdp_enabled: if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( version.parse(accelerate_version) > version.parse("0.24.1") ): state_dict = self.accelerator.get_state_dict(self.model) if self.args.should_save:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning( " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" " zero_to_fp32.py to recover weights" ) if self.args.should_save: self._save(output_dir, state_dict={}) # remove the dummy state_dict remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model_wrapped.save_checkpoint(output_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
elif self.args.should_save: self._save(output_dir) # Push to the Hub when `save_model` is called by the user. if self.args.push_to_hub and not _internal_call: self.push_to_hub(commit_message="Model save") def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f"Saving model checkpoint to {output_dir}") model = self.model xm.mark_step() if xm.is_master_ordinal(local=False): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` supported_classes = (PushToHubMixin,) xm.rendezvous("saving_checkpoint") if self.is_fsdp_xla_v1_enabled: ckpt = { "model": model.state_dict(), "shard_metadata": model.get_shard_metadata(), } ckpt_path = os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{WEIGHTS_NAME}" ) # All ranks save sharded checkpoint xm.save(ckpt, ckpt_path, master_only=False) # Make sure all ranks have saved checkpoints xm.rendezvous("save_full_checkpoints") # Master save full checkpoint if self.args.should_save: from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
full_state_dict, _ = consolidate_sharded_model_checkpoints( ckpt_prefix=os.path.join(output_dir, ""), ckpt_suffix=f"rank*-of-*-{WEIGHTS_NAME}", save_model=False, ) model = model.module.module unwrapped_model = self.accelerator.unwrap_model(model) if isinstance(unwrapped_model, supported_classes): unwrapped_model.save_pretrained( output_dir, state_dict=full_state_dict, save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") xm.save(full_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) elif not isinstance(model, supported_classes):
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if isinstance(self.accelerator.unwrap_model(model), supported_classes): self.accelerator.unwrap_model(model).save_pretrained( output_dir, is_main_process=self.args.should_save, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = xm._maybe_convert_to_cpu(model.state_dict()) xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: model.save_pretrained( output_dir, is_main_process=self.args.should_save, save_function=xm.save, safe_serialization=self.args.save_safetensors,
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
state_dict=xm._maybe_convert_to_cpu(model.state_dict()), ) if self.processing_class is not None and self.args.should_save: self.processing_class.save_pretrained(output_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, supported_classes): if state_dict is None: state_dict = self.model.state_dict()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if isinstance(self.accelerator.unwrap_model(self.model), supported_classes): self.accelerator.unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if self.args.save_safetensors: safetensors.torch.save_file( state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"} ) else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) if self.processing_class is not None: self.processing_class.save_pretrained(output_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def store_flos(self): # Storing the number of floating-point operations that went into the model if self.args.parallel_mode == ParallelMode.DISTRIBUTED: self.state.total_flos += ( distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() ) self.current_flos = 0 else: self.state.total_flos += self.current_flos self.current_flos = 0 def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False ) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if ( self.state.best_model_checkpoint is not None and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted ): best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) for i in range(best_model_index, len(checkpoints_sorted) - 2): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[-1] != self.state.best_model_checkpoint ): save_total_limit = 2
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def evaluate( self, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Args: eval_dataset (Union[`Dataset`, Dict[str, `Dataset`]), *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the `__len__` method. <Tip>
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run separate evaluations on each dataset. This can be useful to monitor how training affects other datasets or simply to get a more fine-grained evaluation. When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`. </Tip>
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ # handle multipe eval datasets override = eval_dataset is not None eval_dataset = eval_dataset if override else self.eval_dataset if isinstance(eval_dataset, dict): metrics = {} for eval_dataset_name, _eval_dataset in eval_dataset.items(): dataset_metrics = self.evaluate( eval_dataset=_eval_dataset if override else eval_dataset_name, ignore_keys=ignore_keys, metric_key_prefix=f"{metric_key_prefix}_{eval_dataset_name}", ) metrics.update(dataset_metrics) return metrics # memory metrics - must set up as early as possible self._memory_tracker.start()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
eval_dataloader = self.get_eval_dataloader(eval_dataset) if self.is_fsdp_xla_v2_enabled: eval_dataloader = tpu_spmd_dataloader(eval_dataloader) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.log(output.metrics) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report())
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return output.metrics def predict( self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" ) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"test"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "test_bleu" if the prefix is "test" (default) <Tip> If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
</Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ # memory metrics - must set up as early as possible self._memory_tracker.start() test_dataloader = self.get_test_dataloader(test_dataset) start_time = time.time()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py