text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError(f"Invalid padding strategy:{padding_side}")
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
return encoded_inputs
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`List[str]`): The token to join in a string.
Returns:
`str`: The joined tokens.
"""
raise NotImplementedError
def batch_decode(
self,
sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = None,
**kwargs,
) -> List[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Args:
sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Returns:
`List[str]`: The list of decoded sentences.
"""
return [
self.decode(
seq,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
for seq in sequences
]
def decode(
self,
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = None,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
# Convert inputs to python lists
token_ids = to_py_obj(token_ids)
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
return self._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = None,
**kwargs,
) -> str:
raise NotImplementedError
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument. "
"Or set `return_special_tokens_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
)
all_special_ids = self.all_special_ids # cache the property
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
return special_tokens_mask
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (`str`): The text to clean up.
Returns:
`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
"""
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
corresponding model
Args:
ids (`List[str]`): The ids produced by the tokenization
max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
verbose (`bool`): Whether or not to print more information and warnings.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
"""
if max_length is None and len(ids) > self.model_max_length and verbose:
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
"will result in indexing errors"
)
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
def _switch_to_input_mode(self):
"""
Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
"""
pass
def _switch_to_target_mode(self):
"""
Private method to put the tokenizer in target mode (when it has different modes for input/outputs)
"""
pass
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
warnings.warn(
"`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your "
"labels by using the argument `text_target` of the regular `__call__` method (either in the same call as "
"your input texts if you use the same keyword arguments, or in a separate call."
)
self._switch_to_target_mode()
self._in_target_context_manager = True
yield
self._in_target_context_manager = False
self._switch_to_input_mode()
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
@classmethod
def register_for_auto_class(cls, auto_class="AutoTokenizer"):
"""
Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
library are already mapped with `AutoTokenizer`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
The auto class to register this new tokenizer with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: str = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Prepare model inputs for translation. For best performance, translate one sentence at a time.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Arguments:
src_texts (`List[str]`):
List of documents to summarize or source language texts.
tgt_texts (`list`, *optional*):
List of summaries or target language texts.
max_length (`int`, *optional*):
Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
required by one of the truncation/padding parameters. If the model has no specific maximum input length
(like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (`int`, *optional*):
Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
to `None`, this will use the max_length value.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
Activates and controls truncation. Accepts the following values:
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
**kwargs:
Additional keyword arguments passed along to `self.__call__`.
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **labels** -- List of token ids for tgt_texts.
The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
Otherwise, input_ids, attention_mask will be the only keys.
"""
# docstyle-ignore
formatted_warning = """
`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
`__call__` method to prepare your inputs and targets.
Here is a short example:
model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)
If you either need to use different keyword arguments for the source and target texts, you should do two calls like
this:
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
model_inputs = tokenizer(src_texts, ...)
labels = tokenizer(text_target=tgt_texts, ...)
model_inputs["labels"] = labels["input_ids"]
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
For a more complete example, see the implementation of `prepare_seq2seq_batch`.
"""
warnings.warn(formatted_warning, FutureWarning)
# mBART-specific kwargs that should be ignored by other models.
kwargs.pop("src_lang", None)
kwargs.pop("tgt_lang", None)
if max_length is None:
max_length = self.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = max_length
with self.as_target_tokenizer():
labels = self(
tgt_texts,
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
| 75 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
|
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
| 76 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
| 76 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
| 76 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
| 76 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
| 77 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
| 78 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
| 79 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
def __repr__(self):
return f"captured: {self.out}\n"
| 79 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class TemporaryHubRepo:
"""Create a temporary Hub repository and return its `RepoUrl` object. This is similar to
`tempfile.TemporaryDirectory` and can be used as a context manager. For example:
with TemporaryHubRepo(token=self._token) as temp_repo:
...
Upon exiting the context, the repository and everything contained in it are removed.
Example:
```python
with TemporaryHubRepo(token=self._token) as temp_repo:
model.push_to_hub(tmp_repo.repo_id, token=self._token)
```
"""
def __init__(self, namespace: Optional[str] = None, token: Optional[str] = None) -> None:
self.token = token
with tempfile.TemporaryDirectory() as tmp_dir:
repo_id = Path(tmp_dir).name
if namespace is not None:
repo_id = f"{namespace}/{repo_id}"
self.repo_url = huggingface_hub.create_repo(repo_id, token=self.token)
def __enter__(self):
return self.repo_url
| 80 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
def __exit__(self, exc, value, tb):
delete_repo(repo_id=self.repo_url.repo_id, token=self.token, missing_ok=True)
| 80 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
| 81 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
| 82 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class SubprocessCallException(Exception):
pass
| 83 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self._thread_id = threading.get_ident()
self._extra_info = []
def patched_with_thread_info(func):
def wrap(*args, **kwargs):
self._extra_info.append(threading.get_ident())
return func(*args, **kwargs)
return wrap
| 84 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", side_effect=patched_with_thread_info(urllib3.connectionpool.log.debug)
)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
assert len(self.mock.call_args_list) == len(self._extra_info)
for thread_id, call in zip(self._extra_info, self.mock.call_args_list):
if thread_id != self._thread_id:
continue
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
| 84 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
| 85 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
| 85 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
| 85 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
| 85 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
| 86 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
| 86 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
| 86 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
| 86 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
| 87 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
| 87 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py
|
class PytorchGELUTanh(nn.Module):
"""
A fast C implementation of the tanh approximation of the GeLU activation function. See
https://arxiv.org/abs/1606.08415.
This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
match due to rounding errors.
"""
def __init__(self):
super().__init__()
if version.parse(torch.__version__) < version.parse("1.12.0"):
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
"PytorchGELUTanh. Please upgrade torch."
)
def forward(self, input: Tensor) -> Tensor:
return nn.functional.gelu(input, approximate="tanh")
| 88 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class NewGELUActivation(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
| 89 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class GELUActivation(nn.Module):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def __init__(self, use_gelu_python: bool = False):
super().__init__()
if use_gelu_python:
self.act = self._gelu_python
else:
self.act = nn.functional.gelu
def _gelu_python(self, input: Tensor) -> Tensor:
return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
def forward(self, input: Tensor) -> Tensor:
return self.act(input)
| 90 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class FastGELUActivation(nn.Module):
"""
Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
| 91 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class QuickGELUActivation(nn.Module):
"""
Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return input * torch.sigmoid(1.702 * input)
| 92 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class ClippedGELUActivation(nn.Module):
"""
Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://arxiv.org/abs/2004.09602.
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
"""
def __init__(self, min: float, max: float):
if min > max:
raise ValueError(f"min should be < max (got min: {min}, max: {max})")
super().__init__()
self.min = min
self.max = max
def forward(self, x: Tensor) -> Tensor:
return torch.clip(gelu(x), self.min, self.max)
| 93 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class AccurateGELUActivation(nn.Module):
"""
Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
https://github.com/hendrycks/GELUs
Implemented along with MEGA (Moving Average Equipped Gated Attention)
"""
def __init__(self):
super().__init__()
self.precomputed_constant = math.sqrt(2 / math.pi)
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
| 94 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class MishActivation(nn.Module):
"""
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
"""
def __init__(self):
super().__init__()
if version.parse(torch.__version__) < version.parse("1.9.0"):
self.act = self._mish_python
else:
self.act = nn.functional.mish
def _mish_python(self, input: Tensor) -> Tensor:
return input * torch.tanh(nn.functional.softplus(input))
def forward(self, input: Tensor) -> Tensor:
return self.act(input)
| 95 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class LinearActivation(nn.Module):
"""
Applies the linear activation function, i.e. forwarding input directly to output.
"""
def forward(self, input: Tensor) -> Tensor:
return input
| 96 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class LaplaceActivation(nn.Module):
"""
Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
https://arxiv.org/abs/2209.10655
Inspired by squared relu, but with bounded range and gradient for better stability
"""
def forward(self, input, mu=0.707107, sigma=0.282095):
input = (input - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(input))
| 97 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class ReLUSquaredActivation(nn.Module):
"""
Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
"""
def forward(self, input):
relu_applied = nn.functional.relu(input)
squared = torch.square(relu_applied)
return squared
| 98 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class ClassInstantier(OrderedDict):
def __getitem__(self, key):
content = super().__getitem__(key)
cls, kwargs = content if isinstance(content, tuple) else (content, {})
return cls(**kwargs)
| 99 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py
|
class EvalPrediction:
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
inputs (`np.ndarray`, *optional*): Input data passed to the model.
losses (`np.ndarray`, *optional*): Loss values computed during evaluation.
"""
| 100 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
def __init__(
self,
predictions: Union[np.ndarray, Tuple[np.ndarray]],
label_ids: Union[np.ndarray, Tuple[np.ndarray]],
inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None,
losses: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None,
):
self.predictions = predictions
self.label_ids = label_ids
self.inputs = inputs
self.losses = losses
self.elements = (self.predictions, self.label_ids)
if self.inputs is not None:
self.elements += (self.inputs,)
if self.losses is not None:
self.elements += (self.losses,)
def __iter__(self):
return iter(self.elements)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.elements):
raise IndexError("tuple index out of range")
return self.elements[idx]
| 100 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
| 101 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
| 102 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
| 103 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
| 104 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class SaveStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
BEST = "best"
| 105 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
| 106 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
| 107 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class BestRun(NamedTuple):
"""
The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
run_summary (`Optional[Any]`):
A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend.
"""
run_id: str
objective: Union[float, List[float]]
hyperparameters: Dict[str, Any]
run_summary: Optional[Any] = None
| 108 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
| 109 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class SchedulerType(ExplicitEnum):
"""
Scheduler names for the parameter `lr_scheduler_type` in [`TrainingArguments`].
By default, it uses "linear". Internally, this retrieves `get_linear_schedule_with_warmup` scheduler from [`Trainer`].
Scheduler types:
- "linear" = get_linear_schedule_with_warmup
- "cosine" = get_cosine_schedule_with_warmup
- "cosine_with_restarts" = get_cosine_with_hard_restarts_schedule_with_warmup
- "polynomial" = get_polynomial_decay_schedule_with_warmup
- "constant" = get_constant_schedule
- "constant_with_warmup" = get_constant_schedule_with_warmup
- "inverse_sqrt" = get_inverse_sqrt_schedule
- "reduce_lr_on_plateau" = get_reduce_on_plateau_schedule
- "cosine_with_min_lr" = get_cosine_with_min_lr_schedule_with_warmup
- "warmup_stable_decay" = get_wsd_schedule
"""
| 110 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
INVERSE_SQRT = "inverse_sqrt"
REDUCE_ON_PLATEAU = "reduce_lr_on_plateau"
COSINE_WITH_MIN_LR = "cosine_with_min_lr"
WARMUP_STABLE_DECAY = "warmup_stable_decay"
| 110 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available() or is_torch_mlu_available() or is_torch_musa_available():
import torch
self.torch = torch
self.gpu = {}
elif is_torch_mps_available():
import torch
self.torch = torch
self.gpu = {}
elif is_torch_xpu_available():
import torch
self.torch = torch
self.gpu = {}
elif is_torch_npu_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
if self.torch is not None:
if torch.cuda.is_available():
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
elif is_torch_mlu_available():
self.torch.mlu.reset_peak_memory_stats()
self.torch.mlu.empty_cache()
elif is_torch_musa_available():
self.torch.musa.reset_peak_memory_stats()
self.torch.musa.empty_cache()
elif is_torch_xpu_available():
self.torch.xpu.reset_peak_memory_stats()
self.torch.xpu.empty_cache()
elif is_torch_npu_available():
self.torch.npu.reset_peak_memory_stats()
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
# gpu
if self.torch is not None:
if torch.cuda.is_available():
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
elif is_torch_mlu_available():
self.gpu_mem_used_at_start = self.torch.mlu.memory_allocated()
elif is_torch_musa_available():
self.gpu_mem_used_at_start = self.torch.musa.memory_allocated()
elif is_torch_xpu_available():
self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated()
elif is_torch_npu_available():
self.gpu_mem_used_at_start = self.torch.npu.memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
if self.torch is not None:
if torch.cuda.is_available():
self.torch.cuda.empty_cache()
elif is_torch_mlu_available():
self.torch.mlu.empty_cache()
elif is_torch_musa_available():
self.torch.musa.empty_cache()
elif is_torch_xpu_available():
self.torch.xpu.empty_cache()
elif is_torch_npu_available():
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
# gpu
if self.torch is not None:
if torch.cuda.is_available():
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
elif is_torch_mlu_available():
self.gpu_mem_used_now = self.torch.mlu.memory_allocated()
self.gpu_mem_used_peak = self.torch.mlu.max_memory_allocated()
elif is_torch_musa_available():
self.gpu_mem_used_now = self.torch.musa.memory_allocated()
self.gpu_mem_used_peak = self.torch.musa.max_memory_allocated()
elif is_torch_xpu_available():
self.gpu_mem_used_now = self.torch.xpu.memory_allocated()
self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated()
elif is_torch_npu_available():
self.gpu_mem_used_now = self.torch.npu.memory_allocated()
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_now = self.torch.mps.current_allocated_memory()
# self.torch.mps.max_memory_allocated() does not exist yet
self.gpu_mem_used_peak = None
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
else:
raise ValueError("No available GPU device found!")
self.gpu[self.cur_stage] = {
"begin": self.gpu_mem_used_at_start,
"end": self.gpu_mem_used_now,
"alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start),
}
if self.gpu_mem_used_peak is not None:
self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now)
else:
self.gpu[self.cur_stage]["peaked"] = "Not available"
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = {
"begin": self.cpu_mem_used_at_start,
"end": self.cpu_mem_used_now,
"alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start),
"peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
}
# reset - cycle finished
self.cur_stage = None
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
| 111 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.