text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
if max_new_tokens is not None:
forward_params["max_new_tokens"] = max_new_tokens
if generate_kwargs is not None:
if max_new_tokens is not None and "max_new_tokens" in generate_kwargs:
raise ValueError(
"`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use"
" only 1 version"
)
forward_params.update(generate_kwargs)
if self.assistant_model is not None:
forward_params["assistant_model"] = self.assistant_model
if self.assistant_tokenizer is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, {}
def __call__(self, inputs: Union[str, List[str], "Image.Image", List["Image.Image"]] = None, **kwargs):
"""
Assign labels to the image(s) passed as inputs.
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
Args:
inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a HTTP(s) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images.
max_new_tokens (`int`, *optional*):
The amount of maximum tokens to generate. By default it will use `generate` default.
generate_kwargs (`Dict`, *optional*):
Pass it to send all of these arguments directly to `generate` allowing full control of this function.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following key:
- **generated_text** (`str`) -- The generated text.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "images" in kwargs:
inputs = kwargs.pop("images")
if inputs is None:
raise ValueError("Cannot call the image-to-text pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
def preprocess(self, image, prompt=None, timeout=None):
image = load_image(image, timeout=timeout)
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
if prompt is not None:
logger.warning_once(
"Passing `prompt` to the `image-to-text` pipeline is deprecated and will be removed in version 4.48"
" of π€ Transformers. Use the `image-text-to-text` pipeline instead",
)
if not isinstance(prompt, str):
raise ValueError(
f"Received an invalid text input, got - {type(prompt)} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation."
)
model_type = self.model.config.model_type
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
if model_type == "git":
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids
input_ids = [self.tokenizer.cls_token_id] + input_ids
input_ids = torch.tensor(input_ids).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
text_inputs = self.tokenizer(prompt, return_tensors=self.framework)
model_inputs.update(text_inputs)
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation")
else:
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
if self.model.config.model_type == "git" and prompt is None:
model_inputs["input_ids"] = None
return model_inputs
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
def _forward(self, model_inputs, **generate_kwargs):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"], list)
and all(x is None for x in model_inputs["input_ids"])
):
model_inputs["input_ids"] = None
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
inputs = model_inputs.pop(self.model.main_input_name)
model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs)
return model_outputs
def postprocess(self, model_outputs):
records = []
for output_ids in model_outputs:
record = {
"generated_text": self.tokenizer.decode(
output_ids,
skip_special_tokens=True,
)
}
records.append(record)
return records
| 423 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_text.py
|
class ImageFeatureExtractionPipeline(Pipeline):
"""
Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
>>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image.
torch.Size([1, 197, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"image-feature-extraction"`.
| 424 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_feature_extraction.py
|
All vision models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs):
preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs
postprocess_params = {}
if pool is not None:
postprocess_params["pool"] = pool
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
return preprocess_params, {}, postprocess_params
| 424 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_feature_extraction.py
|
def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]:
image = load_image(image, timeout=timeout)
model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, pool=None, return_tensors=False):
pool = pool if pool is not None else False
| 424 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_feature_extraction.py
|
if pool:
if "pooler_output" not in model_outputs:
raise ValueError(
"No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option."
)
outputs = model_outputs["pooler_output"]
else:
# [0] is the first available tensor, logits or last_hidden_state.
outputs = model_outputs[0]
if return_tensors:
return outputs
if self.framework == "pt":
return outputs.tolist()
elif self.framework == "tf":
return outputs.numpy().tolist()
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
| 424 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_feature_extraction.py
|
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
the call may block forever.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
| 424 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_feature_extraction.py
|
class AudioClassificationPipeline(Pipeline):
"""
Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
formats.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
>>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
[{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"audio-classification"`.
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
"""
def __init__(self, *args, **kwargs):
# Default, might be overriden by the model.config.
kwargs["top_k"] = kwargs.get("top_k", 5)
super().__init__(*args, **kwargs)
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)
def __call__(
self,
inputs: Union[np.ndarray, bytes, str],
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
information.
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
"raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
`"array"` is used to denote the raw audio waveform.
top_k (`int`, *optional*, defaults to None):
The number of top labels that will be returned by the pipeline. If the provided number is `None` or
higher than the number of labels available in the model configuration, it will default to the number of
labels.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
Return:
A list of `dict` with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
"""
return super().__call__(inputs, **kwargs)
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
def _sanitize_parameters(self, top_k=None, function_to_apply=None, **kwargs):
# No parameters on this pipeline right now
postprocess_params = {}
if top_k is not None:
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
postprocess_params["top_k"] = top_k
if function_to_apply is not None:
if function_to_apply not in ["softmax", "sigmoid", "none"]:
raise ValueError(
f"Invalid value for `function_to_apply`: {function_to_apply}. "
"Valid options are ['softmax', 'sigmoid', 'none']"
)
postprocess_params["function_to_apply"] = function_to_apply
else:
postprocess_params["function_to_apply"] = "softmax"
return {}, {}, postprocess_params
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
def preprocess(self, inputs):
if isinstance(inputs, str):
if inputs.startswith("http://") or inputs.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
inputs = requests.get(inputs).content
else:
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
if isinstance(inputs, dict):
inputs = inputs.copy() # So we don't mutate the original dictionary outside the pipeline
# Accepting `"array"` which is the key defined in `datasets` for
# better integration
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
raise ValueError(
"When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "
'"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
)
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
_inputs = inputs.pop("raw", None)
if _inputs is None:
# Remove path which will not be used from `datasets`.
inputs.pop("path", None)
_inputs = inputs.pop("array", None)
in_sampling_rate = inputs.pop("sampling_rate")
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
import torch
if is_torchaudio_available():
from torchaudio import functional as F
else:
raise ImportError(
"torchaudio is required to resample audio samples in AudioClassificationPipeline. "
"The torchaudio package can be installed through: `pip install torchaudio`."
)
inputs = F.resample(
torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate
).numpy()
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
if not isinstance(inputs, np.ndarray):
raise TypeError("We expect a numpy ndarray as input")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")
processed = self.feature_extractor(
inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
)
return processed
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"):
if function_to_apply == "softmax":
probs = model_outputs.logits[0].softmax(-1)
elif function_to_apply == "sigmoid":
probs = model_outputs.logits[0].sigmoid()
else:
probs = model_outputs.logits[0]
scores, ids = probs.topk(top_k)
scores = scores.tolist()
ids = ids.tolist()
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
return labels
| 425 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/audio_classification.py
|
class VideoClassificationPipeline(Pipeline):
"""
Video classification pipeline using any `AutoModelForVideoClassification`. This pipeline predicts the class of a
video.
This video classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"video-classification"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=video-classification).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "av")
self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES)
def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None, function_to_apply=None):
preprocess_params = {}
if frame_sampling_rate is not None:
preprocess_params["frame_sampling_rate"] = frame_sampling_rate
if num_frames is not None:
preprocess_params["num_frames"] = num_frames
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
postprocess_params = {}
if top_k is not None:
postprocess_params["top_k"] = top_k
if function_to_apply is not None:
if function_to_apply not in ["softmax", "sigmoid", "none"]:
raise ValueError(
f"Invalid value for `function_to_apply`: {function_to_apply}. "
"Valid options are ['softmax', 'sigmoid', 'none']"
)
postprocess_params["function_to_apply"] = function_to_apply
else:
postprocess_params["function_to_apply"] = "softmax"
return preprocess_params, {}, postprocess_params
def __call__(self, inputs: Union[str, List[str]] = None, **kwargs):
"""
Assign labels to the video(s) passed as inputs.
Args:
inputs (`str`, `List[str]`):
The pipeline handles three types of videos:
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
- A string containing a http link pointing to a video
- A string containing a local path to a video
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
The pipeline accepts either a single video or a batch of videos, which must then be passed as a string.
Videos in a batch must all be in the same format: all as http links or all as local paths.
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`):
The number of frames sampled from the video to run the classification on. If not provided, will default
to the number of frames specified in the model configuration.
frame_sampling_rate (`int`, *optional*, defaults to 1):
The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
frame will be used.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
Return:
A dictionary or a list of dictionaries containing result. If the input is a single video, will return a
dictionary, if the input is a list of several videos, will return a list of dictionaries corresponding to
the videos.
The dictionaries contain the following keys:
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "videos" in kwargs:
warnings.warn(
"The `videos` argument has been renamed to `inputs`. In version 5 of Transformers, `videos` will no longer be accepted",
FutureWarning,
)
inputs = kwargs.pop("videos")
if inputs is None:
raise ValueError("Cannot call the video-classification pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
def preprocess(self, video, num_frames=None, frame_sampling_rate=1):
if num_frames is None:
num_frames = self.model.config.num_frames
if video.startswith("http://") or video.startswith("https://"):
video = BytesIO(requests.get(video).content)
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
container = av.open(video)
start_idx = 0
end_idx = num_frames * frame_sampling_rate - 1
indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64)
video = read_video_pyav(container, indices)
video = list(video)
model_inputs = self.image_processor(video, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"):
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
if self.framework == "pt":
if function_to_apply == "softmax":
probs = model_outputs.logits[0].softmax(-1)
elif function_to_apply == "sigmoid":
probs = model_outputs.logits[0].sigmoid()
else:
probs = model_outputs.logits[0]
scores, ids = probs.topk(top_k)
else:
raise ValueError(f"Unsupported framework: {self.framework}")
scores = scores.tolist()
ids = ids.tolist()
return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
| 426 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/video_classification.py
|
class ImageToImagePipeline(Pipeline):
"""
Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous
image input.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import pipeline
>>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64")
>>> img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
>>> img = img.resize((64, 64))
>>> upscaled_img = upscaler(img)
>>> img.size
(64, 64)
>>> upscaled_img.size
(144, 144)
```
This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"image-to-image"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image).
"""
| 427 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_image.py
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
postprocess_params = {}
forward_params = {}
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
if "head_mask" in kwargs:
forward_params["head_mask"] = kwargs["head_mask"]
return preprocess_params, forward_params, postprocess_params
def __call__(
self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs
) -> Union["Image.Image", List["Image.Image"]]:
"""
Transform the image(s) passed as inputs.
Args:
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
| 427 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_image.py
|
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
the call may block forever.
| 427 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_image.py
|
Return:
An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a
single image, the return will be also a single image, if the input is a list of several images, it will
return a list of transformed images.
"""
return super().__call__(images, **kwargs)
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def preprocess(self, image, timeout=None):
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors="pt")
if self.framework == "pt":
inputs = inputs.to(self.torch_dtype)
return inputs
| 427 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_image.py
|
def postprocess(self, model_outputs):
images = []
if "reconstruction" in model_outputs.keys():
outputs = model_outputs.reconstruction
for output in outputs:
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output = np.moveaxis(output, source=0, destination=-1)
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
images.append(Image.fromarray(output))
return images if len(images) > 1 else images[0]
| 427 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_to_image.py
|
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
| 428 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/feature_extraction.py
|
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
| 428 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/feature_extraction.py
|
def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
| 428 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/feature_extraction.py
|
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
| 428 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/feature_extraction.py
|
class DepthEstimationPipeline(Pipeline):
"""
Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.
Example:
```python
>>> from transformers import pipeline
>>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf")
>>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
>>> # This is a tensor with the values being the depth expressed in meters for each pixel
>>> output["predicted_depth"].shape
torch.Size([1, 384, 384])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"depth-estimation"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation).
"""
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
def __call__(self, inputs: Union[str, List[str], "Image.Image", List["Image.Image"]] = None, **kwargs):
"""
Predict the depth(s) of the image(s) passed as inputs.
Args:
inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
parameters (`Dict`, *optional*):
A dictionary of argument names to parameter values, to control pipeline behaviour.
The only parameter available right now is `timeout`, which is the length of time, in seconds,
that the pipeline should wait before giving up on trying to download an image.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
Return:
A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
the images.
The dictionaries contain the following keys:
- **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`.
- **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "images" in kwargs:
inputs = kwargs.pop("images")
if inputs is None:
raise ValueError("Cannot call the depth-estimation pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
def _sanitize_parameters(self, timeout=None, parameters=None, **kwargs):
preprocess_params = {}
if timeout is not None:
preprocess_params["timeout"] = timeout
if isinstance(parameters, dict) and "timeout" in parameters:
preprocess_params["timeout"] = parameters["timeout"]
return preprocess_params, {}, {}
def preprocess(self, image, timeout=None):
image = load_image(image, timeout)
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
model_inputs["target_size"] = image.size[::-1]
return model_inputs
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
model_outputs = self.model(**model_inputs)
model_outputs["target_size"] = target_size
return model_outputs
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
def postprocess(self, model_outputs):
outputs = self.image_processor.post_process_depth_estimation(
model_outputs,
# this acts as `source_sizes` for ZoeDepth and as `target_sizes` for the rest of the models so do *not*
# replace with `target_sizes = [model_outputs["target_size"]]`
[model_outputs["target_size"]],
)
formatted_outputs = []
for output in outputs:
depth = output["predicted_depth"].detach().cpu().numpy()
depth = (depth - depth.min()) / (depth.max() - depth.min())
depth = Image.fromarray((depth * 255).astype("uint8"))
formatted_outputs.append({"predicted_depth": output["predicted_depth"], "depth": depth})
return formatted_outputs[0] if len(outputs) == 1 else formatted_outputs
| 429 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/depth_estimation.py
|
class ZeroShotAudioClassificationPipeline(Pipeline):
"""
Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you
provide an audio and a set of `candidate_labels`.
<Tip warning={true}>
The default `hypothesis_template` is : `"This is a sound of {}."`. Make sure you update it for your usage.
</Tip>
Example:
```python
>>> from transformers import pipeline
>>> from datasets import load_dataset
>>> dataset = load_dataset("ashraq/esc50")
>>> audio = next(iter(dataset["train"]["audio"]))["array"]
>>> classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused")
>>> classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
[{'score': 0.9996, 'label': 'Sound of a dog'}, {'score': 0.0004, 'label': 'Sound of vaccum cleaner'}]
```
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This audio
classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-audio-classification"`. See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=zero-shot-audio-classification).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
# No specific FOR_XXX available yet
def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs):
"""
Assign labels to the audio(s) passed as inputs.
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
Args:
audios (`str`, `List[str]`, `np.array` or `List[np.array]`):
The pipeline handles three types of inputs:
- A string containing a http link pointing to an audio
- A string containing a local path to an audio
- An audio loaded in numpy
candidate_labels (`List[str]`):
The candidate labels for this audio. They will be formatted using *hypothesis_template*.
hypothesis_template (`str`, *optional*, defaults to `"This is a sound of {}"`):
The format used in conjunction with *candidate_labels* to attempt the audio classification by
replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are
already formatted.
Return:
A list of dictionaries containing one entry per proposed label. Each dictionary contains the
following keys:
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
- **label** (`str`) -- One of the suggested *candidate_labels*.
- **score** (`float`) -- The score attributed by the model to that label. It is a value between
0 and 1, computed as the `softmax` of `logits_per_audio`.
"""
return super().__call__(audios, **kwargs)
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
if "candidate_labels" in kwargs:
preprocess_params["candidate_labels"] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."):
if isinstance(audio, str):
if audio.startswith("http://") or audio.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
audio = requests.get(audio).content
else:
with open(audio, "rb") as f:
audio = f.read()
if isinstance(audio, bytes):
audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate)
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
if not isinstance(audio, np.ndarray):
raise TypeError("We expect a numpy ndarray as input")
if len(audio.shape) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline")
inputs = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
)
if self.framework == "pt":
inputs = inputs.to(self.torch_dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True)
inputs["text_inputs"] = [text_inputs]
return inputs
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
def _forward(self, model_inputs):
candidate_labels = model_inputs.pop("candidate_labels")
text_inputs = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0], UserDict):
text_inputs = text_inputs[0]
else:
# Batching case.
text_inputs = text_inputs[0][0]
outputs = self.model(**text_inputs, **model_inputs)
model_outputs = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def postprocess(self, model_outputs):
candidate_labels = model_outputs.pop("candidate_labels")
logits = model_outputs["logits"][0]
if self.framework == "pt":
probs = logits.softmax(dim=0)
scores = probs.tolist()
else:
raise ValueError("`tf` framework not supported.")
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
result = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0])
]
return result
| 430 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_audio_classification.py
|
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",") if label.strip()]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
| 431 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
if isinstance(sequences, str):
sequences = [sequences]
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs, sequences
| 431 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
class ZeroShotClassificationPipeline(ChunkPipeline):
"""
NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural
language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a
hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is
**much** more flexible.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate
label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model
config's :attr:*~transformers.PretrainedConfig.label2id*.
Example:
```python
>>> from transformers import pipeline
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
>>> oracle = pipeline(model="facebook/bart-large-mnli")
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["english", "german"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-classification"`.
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
of available models on [huggingface.co/models](https://huggingface.co/models?search=nli).
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
self._args_parser = args_parser
super().__init__(*args, **kwargs)
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
)
@property
def entailment_id(self):
for label, ind in self.model.config.label2id.items():
if label.lower().startswith("entail"):
return ind
return -1
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
def _parse_and_tokenize(
self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs
):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
return_tensors = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`"
)
self.tokenizer.pad_token = self.tokenizer.eos_token
try:
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=truncation,
)
except Exception as e:
if "too short" in str(e):
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=TruncationStrategy.DO_NOT_TRUNCATE,
)
else:
raise e
return inputs
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
def _sanitize_parameters(self, **kwargs):
if kwargs.get("multi_class", None) is not None:
kwargs["multi_label"] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)
preprocess_params = {}
if "candidate_labels" in kwargs:
preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"])
if "hypothesis_template" in kwargs:
preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
postprocess_params = {}
if "multi_label" in kwargs:
postprocess_params["multi_label"] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
def __call__(
self,
sequences: Union[str, List[str]],
*args,
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
information.
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
Args:
sequences (`str` or `List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (`str` or `List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
works well in many cases, but it may be worthwhile to experiment with different templates depending on
the task setting.
multi_label (`bool`, *optional*, defaults to `False`):
Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **sequence** (`str`) -- The sequence for which this is the output.
- **labels** (`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (`List[float]`) -- The probabilities for each of the labels.
"""
if len(args) == 0:
pass
elif len(args) == 1 and "candidate_labels" not in kwargs:
kwargs["candidate_labels"] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}")
return super().__call__(sequences, **kwargs)
def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."):
sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template)
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)):
model_input = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(candidate_labels) - 1,
**model_input,
}
def _forward(self, inputs):
candidate_label = inputs["candidate_label"]
sequence = inputs["sequence"]
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
# `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported
model_forward = self.model.forward if self.framework == "pt" else self.model.call
if "use_cache" in inspect.signature(model_forward).parameters.keys():
model_inputs["use_cache"] = False
outputs = self.model(**model_inputs)
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
model_outputs = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def postprocess(self, model_outputs, multi_label=False):
candidate_labels = [outputs["candidate_label"] for outputs in model_outputs]
sequences = [outputs["sequence"] for outputs in model_outputs]
if self.framework == "pt":
logits = np.concatenate([output["logits"].float().numpy() for output in model_outputs])
else:
logits = np.concatenate([output["logits"].numpy() for output in model_outputs])
N = logits.shape[0]
n = len(candidate_labels)
num_sequences = N // n
reshaped_outputs = logits.reshape((num_sequences, n, -1))
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
if multi_label or len(candidate_labels) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
entailment_id = self.entailment_id
contradiction_id = -1 if entailment_id == 0 else 0
entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., self.entailment_id]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
top_inds = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 432 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/zero_shot_classification.py
|
class ReturnType(enum.Enum):
TENSORS = 0
NEW_TEXT = 1
FULL_TEXT = 2
| 433 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
class Chat:
"""This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
to this format because the rest of the pipeline code tends to assume that lists of messages are
actually a batch of samples rather than messages in the same conversation."""
def __init__(self, messages: Dict, images: Union[str, List[str], "Image.Image", List["Image.Image"]]):
for message in messages:
if not ("role" in message and "content" in message):
raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
images = retrieve_images_in_messages(messages, images)
self.messages = messages
self.images = images
| 434 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
class ImageTextToTextPipeline(Pipeline):
"""
Image-text-to-text pipeline using an `AutoModelForImageTextToText`. This pipeline generates text given an image and text.
When the underlying model is a conversational model, it can also accept one or more chats,
in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s).
Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.
Example:
```python
>>> from transformers import pipeline
>>> pipe = pipeline(task="image-text-to-text", model="Salesforce/blip-image-captioning-base")
>>> pipe("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", text="A photo of")
[{'generated_text': 'a photo of two birds'}]
```
```python
>>> from transformers import pipeline
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
>>> pipe = pipeline("image-text-to-text", model="llava-hf/llava-interleave-qwen-0.5b-hf")
>>> messages = [
>>> {
>>> "role": "user",
>>> "content": [
>>> {
>>> "type": "image",
>>> "url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
>>> },
>>> {"type": "text", "text": "Describe this image."},
>>> ],
>>> },
>>> {
>>> "role": "assistant",
>>> "content": [
>>> {"type": "text", "text": "There is a dog and"},
>>> ],
>>> },
>>> ]
>>> pipe(text=messages, max_new_tokens=20, return_full_text=False)
[{'input_text': [{'role': 'user',
'content': [{'type': 'image',
'url': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'},
{'type': 'text', 'text': 'Describe this image.'}]},
{'role': 'assistant',
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
'content': [{'type': 'text', 'text': 'There is a dog and'}]}],
'generated_text': ' a person in the image. The dog is sitting on the sand, and the person is sitting on'}]
```
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image-text to text pipeline can currently be loaded from pipeline() using the following task identifier:
"image-text-to-text".
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-text-to-text).
"""
_load_processor = True
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
def _sanitize_parameters(
self,
max_new_tokens=None,
generate_kwargs=None,
timeout=None,
return_full_text=None,
return_tensors=None,
return_type=None,
continue_final_message=None,
**kwargs: Unpack[ProcessingKwargs],
):
forward_kwargs = {}
preprocess_params = {}
postprocess_params = {}
preprocess_params["processing_kwargs"] = kwargs
if timeout is not None:
preprocess_params["timeout"] = timeout
if continue_final_message is not None:
preprocess_params["continue_final_message"] = continue_final_message
if generate_kwargs is not None:
forward_kwargs["generate_kwargs"] = generate_kwargs
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
forward_kwargs["generate_kwargs"] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one"
)
forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
if return_full_text is not None and return_type is None:
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
return_type = ReturnType.TENSORS
if return_type is not None:
postprocess_params["return_type"] = return_type
if continue_final_message is not None:
postprocess_params["continue_final_message"] = continue_final_message
return preprocess_params, forward_kwargs, postprocess_params
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
def __call__(
self,
images: Optional[
Union[str, List[str], List[List[str]], "Image.Image", List["Image.Image"], List[List["Image.Image"]]]
] = None,
text: Optional[Union[str, List[str], List[dict]]] = None,
**kwargs,
):
"""
Generate a text given text and the image(s) passed as inputs.
Args:
images (`str`, `List[str]`, `PIL.Image or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a HTTP(s) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
The pipeline accepts either a single image or a batch of images.
text (str, List[str], `List[Dict[str, Union[str, PIL.Image]]]`):
The text to be used for generation. If a list of strings is passed, the length of the list should be the
same as the number of images. Text can also follow the chat format: a list of dictionaries where each
dictionary represents a message in a conversation. Each dictionary should have two keys: 'role' and
'content'. 'role' should be one of 'user', 'system' or 'assistant'. 'content' should be a list of dictionary
containing the text of the message and the type of the message. The type of the message can be either
'text' or 'image'. If the type is 'image', no text is needed.
return_tensors (`bool`, *optional*, defaults to `False`):
Returns the tensors of predictions (as token indices) in the outputs. If set to
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
`True`, the decoded text is not returned.
return_text (`bool`, *optional*):
Returns the decoded texts in the outputs.
return_full_text (`bool`, *optional*, defaults to `True`):
If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
specified at the same time as `return_text`.
continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
By default this is `True` when the final message in the input chat has the `assistant` role and
`False` otherwise, but you can manually override that behaviour by setting this flag.
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following key (cannot return a combination
of both `generated_text` and `generated_token_ids`):
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
- **input_text** (`str`) -- The input text.
"""
if images is None and text is None:
raise ValueError("You must at least provide either text or images.")
if images is not None and text is None and not valid_images(images):
"""
Supports the following format
- {"image": image, "text": text}
- [{"image": image, "text": text}]
- Generator and datasets
This is a common pattern in other multimodal pipelines, so we support it here as well.
"""
return super().__call__(images, **kwargs)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
if isinstance(text, (list, tuple, KeyDataset)) and isinstance(text[0], (list, tuple, dict)):
# We have one or more prompts in list-of-dicts format, so this is chat mode
if isinstance(text[0], dict):
return super().__call__(Chat(text, images), **kwargs)
else:
if images is None:
images = [None] * len(text)
chats = [Chat(chat, image) for chat, image in zip(text, images)] # π π π
return super().__call__(chats, **kwargs)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
# encourage the user to use the chat format if supported
if getattr(self.processor, "chat_template", None) is not None:
logger.warning_once(
"The input data was not formatted as a chat with dicts containing 'role' and 'content' keys, even though this model supports chat. "
"Consider using the chat format for better results. For more information, see https://huggingface.co/docs/transformers/en/chat_templating"
)
# support text only generation
if images is None:
return super().__call__(text, **kwargs)
if text is None:
raise ValueError("You must provide text for this pipeline.")
return super().__call__({"images": images, "text": text}, **kwargs)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
def preprocess(self, inputs=None, timeout=None, continue_final_message=None, processing_kwargs=None):
# In case we only have text inputs
if isinstance(inputs, (list, tuple, str)):
images = None
text = inputs
inputs_text = inputs
else:
if isinstance(inputs, Chat):
# If the user passes a chat that ends in an assistant message, we treat it as a prefill by default
# because very few models support multiple separate, consecutive assistant messages
if continue_final_message is None:
continue_final_message = inputs.messages[-1]["role"] == "assistant"
text = self.processor.apply_chat_template(
inputs.messages,
add_generation_prompt=not continue_final_message,
continue_final_message=continue_final_message,
return_tensors=self.framework,
)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
inputs_text = inputs
images = inputs.images
else:
text = inputs["text"]
inputs_text = inputs["text"]
images = inputs["images"]
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
images = load_images(images)
# if batched text inputs, we set padding to True unless specified otherwise
if isinstance(text, (list, tuple)) and len(text) > 1:
processing_kwargs.setdefault("padding", True)
model_inputs = self.processor(
images=images, text=text, return_tensors=self.framework, legacy=False, **processing_kwargs
).to(dtype=self.torch_dtype)
model_inputs["text"] = inputs_text
return model_inputs
def _forward(self, model_inputs, generate_kwargs=None):
generate_kwargs = {} if generate_kwargs is None else generate_kwargs
prompt_text = model_inputs.pop("text")
input_ids = (
model_inputs["input_ids"] if "input_ids" in model_inputs else model_inputs["decoder_input_ids"]
) # for decoder-only models
generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
return {"generated_sequence": generated_sequence, "prompt_text": prompt_text, "input_ids": input_ids}
def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, continue_final_message=None):
input_texts = model_outputs["prompt_text"]
input_texts = [input_texts] if isinstance(input_texts, (str, Chat)) else input_texts
generated_sequence = model_outputs["generated_sequence"]
input_ids = model_outputs["input_ids"]
if return_type == ReturnType.TENSORS:
return [
{"input_text": input_texts[i], "generated_token_ids": generated_sequence[i]}
for i in range(len(input_texts))
]
# Decode inputs and outputs the same way to remove input text from generated text if present
generated_texts = self.processor.post_process_image_text_to_text(generated_sequence)
decoded_inputs = self.processor.post_process_image_text_to_text(input_ids)
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
# Force consistent behavior for including the input text in the output
if return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Remove the input text from the generated text if the generated text starts with the input text
# (accounting for the possibility of a space between the input and generated text)
new_generated_texts = []
for text_generated, decoded_input in zip(generated_texts, decoded_inputs):
# There can be added characters before the input text, so we need to find the beginning of the input text in the generated text
index_input_text = text_generated.find(decoded_input)
# Limit the search to 2 residual characters, like spaces or new lines, to avoid removing a large part of the answer
if 0 <= index_input_text <= 2:
# If the input text is found, we remove it
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
new_generated_texts.append(text_generated[index_input_text + len(decoded_input) :])
else:
new_generated_texts.append(text_generated)
generated_texts = new_generated_texts
if return_type == ReturnType.FULL_TEXT:
full_texts = []
for prompt_text, generated_text in zip(input_texts, generated_texts):
if isinstance(prompt_text, str):
generated_text = prompt_text + generated_text
elif isinstance(prompt_text, Chat):
if continue_final_message is None:
# If the user passes a chat ending in an assistant message, we treat it as a prefill by
# default because very few models support multiple separate, consecutive assistant messages
continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
if continue_final_message:
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
# With assistant prefill, concat onto the end of the last message
new_text = dict(prompt_text.messages[-1]["content"][-1].items())
new_text["text"] += generated_text
generated_text = list(prompt_text.messages)[:-1] + [
{
"role": prompt_text.messages[-1]["role"],
"content": prompt_text.messages[-1]["content"][:-1] + [new_text],
}
]
else:
# When we're not starting from a prefill, the output is a new assistant message
generated_text = list(prompt_text.messages) + [
{"role": "assistant", "content": generated_text}
]
full_texts.append(generated_text)
generated_texts = full_texts
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
records = [
{
"input_text": input_text.messages if isinstance(input_text, Chat) else input_text,
"generated_text": generated_text,
}
for input_text, generated_text in zip(input_texts, generated_texts)
]
return records
| 435 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/image_text_to_text.py
|
class ClassificationFunction(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
| 436 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_classification.py
|
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification
examples](../task_summary#sequence-classification) for more information.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
>>> classifier("This movie is disgustingly good !")
[{'label': 'POSITIVE', 'score': 1.0}]
>>> classifier("Director tried too much.")
[{'label': 'NEGATIVE', 'score': 0.996}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments).
| 437 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_classification.py
|
If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax
over the results. If there is a single label, the pipeline will run a sigmoid over the result. In case of regression
tasks (`model.config.problem_type == "regression"`), will not apply any function on the output.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See
the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=text-classification).
"""
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
| 437 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_classification.py
|
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
| 437 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pipelines/text_classification.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.