text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
conversation = [ { "role": "user", "content": [ {"type": "image", "image": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Please describe this image in detail."}, ], }, ] Args: conversation (`List[Dict, str, str]`): The conversation to format. chat_template (`Optional[str]`, *optional*): The Jinja template to use for formatting the conversation. If not provided, the tokenizer's chat template is used. """
123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/processing_utils.py
if chat_template is None: if self.chat_template is not None: chat_template = self.chat_template else: raise ValueError( "No chat template is set for this processor. Please either set the `chat_template` attribute, " "or provide a chat template as an argument. See " "https://huggingface.co/docs/transformers/main/en/chat_templating for more information." ) text_kwargs = {} for key in TextKwargs.__annotations__.keys(): value = kwargs.pop(key, None) if value is not None: text_kwargs[key] = value chat_template_kwargs = {} for key in ChatTemplateKwargs.__annotations__.keys(): value = kwargs.pop(key, getattr(ChatTemplateKwargs, key)) chat_template_kwargs[key] = value
123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/processing_utils.py
# Pop kwargs that should not be used by tokenizer's `apply_chat_template` tokenize = chat_template_kwargs.pop("tokenize") return_dict = chat_template_kwargs.pop("return_dict") num_frames = chat_template_kwargs.pop("num_frames") video_load_backend = chat_template_kwargs.pop("video_load_backend") prompt = self.tokenizer.apply_chat_template( conversation, chat_template=chat_template, tokenize=False, return_dict=False, **text_kwargs, **chat_template_kwargs, )
123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/processing_utils.py
# we will have to return all processed inputs in a dict if tokenize: images, videos = [], [] for message in conversation: visuals = [content for content in message["content"] if content["type"] in ["image", "video"]] for vision_info in visuals: if vision_info["type"] == "image": for key in ["image", "url", "path", "base64"]: if key in vision_info: images.append(load_image(vision_info[key])) elif vision_info["type"] == "video": for key in ["video", "url", "path"]: if key in vision_info: videos.append( load_video(vision_info[key], num_frames=num_frames, backend=video_load_backend) )
123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/processing_utils.py
out = self( text=prompt, images=images if images else None, videos=videos if videos else None, **kwargs, ) if return_dict: return out else: return out["input_ids"] return prompt def post_process_image_text_to_text(self, generated_outputs): """ Post-process the output of a vlm to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. Returns: `List[str]`: The decoded text. """ return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=True)
123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/processing_utils.py
class BatchFeature(BaseBatchFeature): r""" Holds the output of the image processor specific `__call__` methods. This class is derived from a python dictionary and can be used as a dictionary. Args: data (`dict`): Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.). tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """
124
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
class ImageProcessingMixin(PushToHubMixin): """ This is an image processor mixin used to provide saving/loading functionality for sequential and image feature extractors. """ _auto_class = None def __init__(self, **kwargs): """Set elements of `kwargs` as attributes.""" # This key was saved while we still used `XXXFeatureExtractor` for image processing. Now we use # `XXXImageProcessor`, this attribute and its value are misleading. kwargs.pop("feature_extractor_type", None) # Pop "processor_class" as it should be saved as private attribute self._processor_class = kwargs.pop("processor_class", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @classmethod def from_pretrained( cls: Type[ImageProcessorType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ) -> ImageProcessorType: r""" Instantiate a type of [`~image_processing_utils.ImageProcessingMixin`] from an image processor. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either:
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
- a string, the *model id* of a pretrained image_processor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a image processor file saved using the [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved image processor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model image processor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the image processor files and override the cached versions if they exist. resume_download:
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
<Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip>
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final image processor object. If `True`, then this functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of `kwargs` which has not been used to update `image_processor` and is otherwise ignored. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are image processor attributes will be used to override the
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is controlled by the `return_unused_kwargs` keyword parameter.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
Returns: A image processor of type [`~image_processing_utils.ImageProcessingMixin`]. Examples:
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
```python # We can't instantiate directly the base class *ImageProcessingMixin* so let's show the examples on a # derived class: *CLIPImageProcessor* image_processor = CLIPImageProcessor.from_pretrained( "openai/clip-vit-base-patch32" ) # Download image_processing_config from huggingface.co and cache. image_processor = CLIPImageProcessor.from_pretrained( "./test/saved_model/" ) # E.g. image processor (or model) was saved using *save_pretrained('./test/saved_model/')* image_processor = CLIPImageProcessor.from_pretrained("./test/saved_model/preprocessor_config.json") image_processor = CLIPImageProcessor.from_pretrained( "openai/clip-vit-base-patch32", do_normalize=False, foo=False ) assert image_processor.do_normalize is False image_processor, unused_kwargs = CLIPImageProcessor.from_pretrained(
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
"openai/clip-vit-base-patch32", do_normalize=False, foo=False, return_unused_kwargs=True ) assert image_processor.do_normalize is False assert unused_kwargs == {"foo": False} ```""" kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token image_processor_dict, kwargs = cls.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(image_processor_dict, **kwargs)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
Args: save_directory (`str` or `os.PathLike`): Directory where the image processor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME) self.to_json_file(output_image_processor_file) logger.info(f"Image processor saved in {output_image_processor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return [output_image_processor_file]
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
@classmethod def get_image_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a image processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. image_processor_filename (`str`, *optional*, defaults to `"config.json"`): The name of the file in the model directory to use for the image processor config.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") image_processor_filename = kwargs.pop("image_processor_filename", IMAGE_PROCESSOR_NAME) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "image processor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): image_processor_file = os.path.join(pretrained_model_name_or_path, image_processor_filename) if os.path.isfile(pretrained_model_name_or_path): resolved_image_processor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): image_processor_file = pretrained_model_name_or_path resolved_image_processor_file = download_url(pretrained_model_name_or_path) else: image_processor_file = image_processor_filename try: # Load from local folder or from cache or download from model Hub and cache resolved_image_processor_file = cached_file( pretrained_model_name_or_path, image_processor_file,
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {image_processor_filename} file" )
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
try: # Load image_processor dict with open(resolved_image_processor_file, "r", encoding="utf-8") as reader: text = reader.read() image_processor_dict = json.loads(text) except json.JSONDecodeError: raise EnvironmentError( f"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file." )
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
if is_local: logger.info(f"loading configuration file {resolved_image_processor_file}") else: logger.info( f"loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}" ) if "auto_map" in image_processor_dict: image_processor_dict["auto_map"] = add_model_info_to_auto_map( image_processor_dict["auto_map"], pretrained_model_name_or_path ) if "custom_pipelines" in image_processor_dict: image_processor_dict["custom_pipelines"] = add_model_info_to_custom_pipelines( image_processor_dict["custom_pipelines"], pretrained_model_name_or_path ) return image_processor_dict, kwargs
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
@classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters. Args: image_processor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the image processor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~image_processing_utils.ImageProcessingMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the image processor object. Returns: [`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those parameters. """ image_processor_dict = image_processor_dict.copy() return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
# The `size` parameter is a dict and was previously an int or tuple in feature extractors. # We set `size` here directly to the `image_processor_dict` so that it is converted to the appropriate # dict within the image processor and isn't overwritten if `size` is passed in as a kwarg. if "size" in kwargs and "size" in image_processor_dict: image_processor_dict["size"] = kwargs.pop("size") if "crop_size" in kwargs and "crop_size" in image_processor_dict: image_processor_dict["crop_size"] = kwargs.pop("crop_size") image_processor = cls(**image_processor_dict) # Update image_processor with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(image_processor, key): setattr(image_processor, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None)
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
logger.info(f"Image processor {image_processor}") if return_unused_kwargs: return image_processor, kwargs else: return image_processor def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance. """ output = copy.deepcopy(self.__dict__) output["image_processor_type"] = self.__class__.__name__ return output @classmethod def from_json_file(cls, json_file: Union[str, os.PathLike]): """ Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
Returns: A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object instantiated from that JSON file. """ with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() image_processor_dict = json.loads(text) return cls(**image_processor_dict) def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist()
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
# make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this image_processor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}"
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
@classmethod def register_for_auto_class(cls, auto_class="AutoImageProcessor"): """ Register this class with a given auto class. This should only be used for custom image processors as the ones in the library are already mapped with `AutoImageProcessor `. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`): The auto class to register this new image processor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
def fetch_images(self, image_url_or_urls: Union[str, List[str]]): """ Convert a single or a list of urls into the corresponding `PIL.Image` objects.
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
If a single url is passed, the return value will be a single object. If a list is passed a list of objects is returned. """ headers = { "User-Agent": ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0" " Safari/537.36" ) } if isinstance(image_url_or_urls, list): return [self.fetch_images(x) for x in image_url_or_urls] elif isinstance(image_url_or_urls, str): response = requests.get(image_url_or_urls, stream=True, headers=headers) response.raise_for_status() return Image.open(BytesIO(response.content)) else: raise TypeError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
125
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_base.py
class Seq2SeqTrainingArguments(TrainingArguments): """ Args: predict_with_generate (`bool`, *optional*, defaults to `False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`int`, *optional*): The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `max_length` value of the model configuration. generation_num_beams (`int`, *optional*): The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `num_beams` value of the model configuration. generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either:
126
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_seq2seq.py
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - a path to a *directory* containing a configuration file saved using the [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. - a [`~generation.GenerationConfig`] object. """
126
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_seq2seq.py
sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) generation_max_length: Optional[int] = field( default=None, metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) }, ) generation_num_beams: Optional[int] = field( default=None, metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) }, ) generation_config: Optional[Union[str, Path, GenerationConfig]] = field(
126
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_seq2seq.py
default=None, metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." }, )
126
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_seq2seq.py
def to_dict(self): """ Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON serialization support). It obfuscates the token values by removing their value. """ # filter out fields that are defined as field(init=False) d = super().to_dict() for k, v in d.items(): if isinstance(v, GenerationConfig): d[k] = v.to_dict() return d
126
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_seq2seq.py
class ModelCard: r""" Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. """
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
def __init__(self, **kwargs): warnings.warn( "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning ) # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) self.model_details = kwargs.pop("model_details", {}) self.intended_use = kwargs.pop("intended_use", {}) self.factors = kwargs.pop("factors", {}) self.metrics = kwargs.pop("metrics", {}) self.evaluation_data = kwargs.pop("evaluation_data", {}) self.training_data = kwargs.pop("training_data", {}) self.quantitative_analyses = kwargs.pop("quantitative_analyses", {}) self.ethical_considerations = kwargs.pop("ethical_considerations", {}) self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {})
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
# Open additional attributes for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def save_pretrained(self, save_directory_or_file): """Save a model card object to the directory or file `save_directory_or_file`.""" if os.path.isdir(save_directory_or_file): # If we save using the predefined names, we can load using `from_pretrained` output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) else: output_model_card_file = save_directory_or_file self.to_json_file(output_model_card_file) logger.info(f"Model card saved in {output_model_card_file}")
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
@classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading.
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
- The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool:
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
- If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples:
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) ```""" cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) from_pipeline = kwargs.pop("_from_pipeline", None) user_agent = {"file_type": "model_card"} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_model_card_file = pretrained_model_name_or_path is_local = True else: try: # Load from URL or cache if already cached resolved_model_card_file = cached_file( pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent, ) if is_local: logger.info(f"loading model card file {resolved_model_card_file}") else: logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") # Load model card modelcard = cls.from_json_file(resolved_model_card_file)
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
except (EnvironmentError, json.JSONDecodeError): # We fall back on creating an empty model card modelcard = cls() # Update model card with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(modelcard, key): setattr(modelcard, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Model card: {modelcard}") if return_unused_kwargs: return modelcard, kwargs else: return modelcard @classmethod def from_dict(cls, json_object): """Constructs a `ModelCard` from a Python dictionary of parameters.""" return cls(**json_object)
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
@classmethod def from_json_file(cls, json_file): """Constructs a `ModelCard` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() dict_obj = json.loads(text) return cls(**dict_obj) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string())
127
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
class TrainingSummary: model_name: str language: Optional[Union[str, List[str]]] = None license: Optional[str] = None tags: Optional[Union[str, List[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, List[str]]] = None dataset: Optional[Union[str, List[str]]] = None dataset_tags: Optional[Union[str, List[str]]] = None dataset_args: Optional[Union[str, List[str]]] = None dataset_metadata: Optional[Dict[str, Any]] = None eval_results: Optional[Dict[str, float]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[str, Any]] = None source: Optional[str] = "trainer"
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
def __post_init__(self): # Infer default license from the checkpoint used, if possible. if ( self.license is None and not is_offline_mode() and self.finetuned_from is not None and len(self.finetuned_from) > 0 ): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith("license:"): self.license = tag[8:] except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {"name": self.model_name}
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
# Dataset mapping tag -> name dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = dict(zip(dataset_tags, dataset_names)) dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = { task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING } model_index["results"] = []
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} # One entry per dataset and per task all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for task_tag, ds_tag in all_possibilities: result = {} if task_tag is not None: result["task"] = {"name": task_mapping[task_tag], "type": task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result["dataset"] = { "name": dataset_mapping[ds_tag], "type": ds_tag, **metadata, } if dataset_arg_mapping[ds_tag] is not None: result["dataset"]["args"] = dataset_arg_mapping[ds_tag]
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if len(metric_mapping) > 0: result["metrics"] = [] for metric_tag, metric_name in metric_mapping.items(): result["metrics"].append( { "name": metric_name, "type": metric_tag, "value": self.eval_results[metric_name], } ) # Remove partial results to avoid the model card being rejected. if "task" in result and "dataset" in result and "metrics" in result: model_index["results"].append(result) else: logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}") return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results)
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
metadata = {} metadata = _insert_value(metadata, "library_name", "transformers") metadata = _insert_values_as_list(metadata, "language", self.language) metadata = _insert_value(metadata, "license", self.license) if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and len(self.finetuned_from) > 0: metadata = _insert_value(metadata, "base_model", self.finetuned_from) metadata = _insert_values_as_list(metadata, "tags", self.tags) metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags) metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys())) metadata["model-index"] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = "" metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f"---\n{metadata}---\n"
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
# Now the model card for realsies. if self.source == "trainer": model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f"\n# {self.model_name}\n\n" if self.finetuned_from is None: model_card += "This model was trained from scratch on " else: model_card += ( "This model is a fine-tuned version of" f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " )
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if self.dataset is None: model_card += "an unknown dataset." else: if isinstance(self.dataset, str): model_card += f"the {self.dataset} dataset." elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f"the {self.dataset[0]} dataset." else: model_card += ( ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." ) if self.eval_results is not None: model_card += "\nIt achieves the following results on the evaluation set:\n" model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) model_card += "\n"
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
model_card += "\n## Model description\n\nMore information needed\n" model_card += "\n## Intended uses & limitations\n\nMore information needed\n" model_card += "\n## Training and evaluation data\n\nMore information needed\n" model_card += "\n## Training procedure\n" model_card += "\n### Training hyperparameters\n" if self.hyperparameters is not None: model_card += "\nThe following hyperparameters were used during training:\n" model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) model_card += "\n" else: model_card += "\nMore information needed\n" if self.eval_lines is not None: model_card += "\n### Training results\n\n" model_card += make_markdown_table(self.eval_lines) model_card += "\n" model_card += "\n### Framework versions\n\n" model_card += f"- Transformers {__version__}\n"
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if self.source == "trainer" and is_torch_available(): import torch model_card += f"- Pytorch {torch.__version__}\n" elif self.source == "keras" and is_tf_available(): import tensorflow as tf model_card += f"- TensorFlow {tf.__version__}\n" if is_datasets_available(): import datasets model_card += f"- Datasets {datasets.__version__}\n" if is_tokenizers_available(): import tokenizers model_card += f"- Tokenizers {tokenizers.__version__}\n" return model_card
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
@classmethod def from_trainer( cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None, ): # Infer default from dataset one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_metadata is None: dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}] if dataset_tags is None:
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name]
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(trainer.model.config, "_name_or_path") and not os.path.isdir(trainer.model.config._name_or_path) ): finetuned_from = trainer.model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = trainer.model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
# Add `generated_from_trainer` to the tags if tags is None: tags = ["generated_from_trainer"] elif isinstance(tags, str) and tags != "generated_from_trainer": tags = [tags, "generated_from_trainer"] elif "generated_from_trainer" not in tags: tags.append("generated_from_trainer") _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, )
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
@classmethod def from_keras( cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None, ): # Infer default from dataset if dataset is not None: if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): default_tag = dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
# Infer default finetuned_from if ( finetuned_from is None and hasattr(model.config, "_name_or_path") and not os.path.isdir(model.config._name_or_path) ): finetuned_from = model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task # Add `generated_from_keras_callback` to the tags if tags is None: tags = ["generated_from_keras_callback"] elif isinstance(tags, str) and tags != "generated_from_keras_callback": tags = [tags, "generated_from_keras_callback"] elif "generated_from_keras_callback" not in tags: tags.append("generated_from_keras_callback")
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
if keras_history is not None: _, eval_lines, eval_results = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source="keras", )
128
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modelcard.py
class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay Regularization](https://arxiv.org/abs/1711.05101).
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
Parameters: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*, defaults to 0.001): The learning rate to use. betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`): Adam's betas parameters (b1, b2). eps (`float`, *optional*, defaults to 1e-06): Adam's epsilon for numerical stability. weight_decay (`float`, *optional*, defaults to 0.0): Decoupled weight decay to apply. correct_bias (`bool`, *optional*, defaults to `True`): Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`). no_deprecation_warning (`bool`, *optional*, defaults to `False`): A flag used to disable the deprecation warning (set to `True` to disable the warning). """
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
def __init__( self, params: Iterable[nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, no_deprecation_warning: bool = False, ): if not no_deprecation_warning: warnings.warn( "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch" " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this" " warning", FutureWarning, ) require_version("torch>=1.5.0") # add_ with alpha if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") if not 0.0 <= betas[1] < 1.0:
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0") defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias} super().__init__(params, defaults)
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
@torch.no_grad() def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p]
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
# State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"])
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size)
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
# Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) return loss
129
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`.
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
Arguments: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*): The external learning rate. eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`): Regularization constants for square gradient and parameter scale respectively clip_threshold (`float`, *optional*, defaults to 1.0): Threshold of root mean square of final gradient update decay_rate (`float`, *optional*, defaults to -0.8): Coefficient used to compute running averages of square beta1 (`float`, *optional*): Coefficient used for computing running averages of gradient weight_decay (`float`, *optional*, defaults to 0.0): Weight decay (L2 penalty) scale_parameter (`bool`, *optional*, defaults to `True`): If True, learning rate is scaled by root mean square
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
relative_step (`bool`, *optional*, defaults to `True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (`bool`, *optional*, defaults to `False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): - Training without LR warmup or clip_threshold is not recommended. - use scheduled LR warm-up to fixed LR - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) - Disable relative updates - Use scale_parameter=False - Additional optimizer operations like gradient clipping should not be used alongside Adafactor Example: ```python Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) ``` Others reported the following combination to work well: ```python Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) ```
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] scheduler as following: ```python from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) ``` Usage: ```python # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) ```"""
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): require_version("torch>=1.5.0") # add_ with alpha if lr is not None and relative_step: raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") if warmup_init and not relative_step: raise ValueError("`warmup_init=True` requires `relative_step=True`") defaults = { "lr": lr, "eps": eps, "clip_threshold": clip_threshold, "decay_rate": decay_rate, "beta1": beta1, "weight_decay": weight_decay, "scale_parameter": scale_parameter, "relative_step": relative_step, "warmup_init": warmup_init, } super().__init__(params, defaults)
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
@staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5)
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
@staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): # copy from fairseq's adafactor implementation: # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure()
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
p_data_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) lr = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad**2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t))
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
# Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(lr) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) p_data_fp32.add_(-update)
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_data_fp32) return loss
130
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
class AdafactorSchedule(LambdaLR): """ Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g., for logging), this class creates a proxy object that retrieves the current lr values from the optimizer. It returns `initial_lr` during startup and the actual `lr` during stepping. """ def __init__(self, optimizer, initial_lr=0.0): def lr_lambda(_): return initial_lr for group in optimizer.param_groups: group["initial_lr"] = initial_lr super().__init__(optimizer, lr_lambda) for group in optimizer.param_groups: del group["initial_lr"] def get_lr(self): opt = self.optimizer lrs = [ opt._get_lr(group, opt.state[group["params"][0]]) for group in opt.param_groups if group["params"][0].grad is not None ] if len(lrs) == 0: lrs = self.base_lrs # if called before stepping return lrs
131
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization.py
class TFTrainingArguments(TrainingArguments): """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line.
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
Parameters: output_dir (`str`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (`bool`, *optional*, defaults to `False`): If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_eval (`bool`, *optional*): Whether to run evaluation on the validation set or not. Will be set to `True` if `eval_strategy` is different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_predict (`bool`, *optional*, defaults to `False`): Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. eval_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are:
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
- `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `eval_steps`. - `"epoch"`: Evaluation is done at the end of each epoch. per_device_train_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip>
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for Adam. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero). adam_beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the Adam optimizer. adam_beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the Adam optimizer. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the Adam optimizer. max_grad_norm (`float`, *optional*, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform. max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`.
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. logging_dir (`str`, *optional*): [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to *runs/**CURRENT_DATETIME_HOSTNAME***. logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are:
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
- `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. logging_first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. logging_steps (`int`, *optional*, defaults to 500): Number of update steps between two logs if `logging_strategy="steps"`. save_strategy (`str` or [`~trainer_utils.SaveStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`.
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
save_steps (`int`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `save_strategy="steps"`. save_total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. no_cuda (`bool`, *optional*, defaults to `False`): Whether to not use CUDA even when it is available or not. seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. fp16 (`bool`, *optional*, defaults to `False`): Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training. fp16_opt_level (`str`, *optional*, defaults to 'O1'): For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the [Apex documentation](https://nvidia.github.io/apex/amp).
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
local_rank (`int`, *optional*, defaults to -1): During distributed training, the rank of the process. tpu_num_cores (`int`, *optional*): When training on TPU, the number of TPU cores (automatically passed by launcher script). debug (`bool`, *optional*, defaults to `False`): Whether to activate the trace to record computation graphs and profiling information or not. dataloader_drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (`int`, *optional*, defaults to 1000): Number of update steps before two evaluations. past_index (`int`, *optional*, defaults to -1): Some models like [TransformerXL](../model_doc/transformerxl) or :doc*XLNet <../model_doc/xlnet>* can make
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py