text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
class ParameterFormat(Enum): Float = c_float @property def size(self) -> int: """ Number of byte required for this data type Returns: Integer > 0 """ return sizeof(self.value)
333
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/utils.py
class ImageLoss(nn.Module): """ This class computes the losses for DetrForObjectDetection/DetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223"
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. eos_coef (`float`): Relative classification weight applied to the no-object category. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, eos_coef, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.eos_coef = eos_coef self.losses = losses empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight)
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
# removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {"loss_ce": loss_ce} return losses
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
@torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx]
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
# upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation.
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets)
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
# Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses
334
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
class HungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"])
335
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0")
335
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
@torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
335
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets])
335
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
# Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
335
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors)
336
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
class DeformableDetrHungarianMatcher(HungarianMatcher): @torch.no_grad() def forward(self, outputs, targets): """ Differences: - out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax - class_cost uses alpha and gamma """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets])
337
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
# Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
337
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
337
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
class DeformableDetrImageLoss(ImageLoss): def __init__(self, matcher, num_classes, focal_alpha, losses): nn.Module.__init__(self) self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"]
338
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses
338
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
class RTDetrHungarianMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig """ def __init__(self, config): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = config.matcher_class_cost self.bbox_cost = config.matcher_bbox_cost self.giou_cost = config.matcher_giou_cost self.use_focal_loss = config.use_focal_loss self.alpha = config.matcher_alpha self.gamma = config.matcher_gamma if self.class_cost == self.bbox_cost == self.giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0")
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
@torch.no_grad() def forward(self, outputs, targets): """Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2]
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
# We flatten to compute the cost matrices in a batch out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. if self.use_focal_loss: out_prob = F.sigmoid(outputs["logits"].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * (out_prob**self.gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = self.alpha * ((1 - out_prob) ** self.gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class - neg_cost_class else:
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] class_cost = -out_prob[:, target_ids]
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
# Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost betwen boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Compute the final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
339
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
class RTDetrLoss(nn.Module): """ This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box).
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. weight_dict (`Dict`): Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as `weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou` losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. alpha (`float`): Parameter alpha used to compute the focal loss. gamma (`float`): Parameter gamma used to compute the focal loss. eos_coef (`float`): Relative classification weight applied to the no-object category. num_classes (`int`): Number of object categories, omitting the special no-object category. """ def __init__(self, config): super().__init__()
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
self.matcher = RTDetrHungarianMatcher(config) self.num_classes = config.num_labels self.weight_dict = { "loss_vfl": config.weight_loss_vfl, "loss_bbox": config.weight_loss_bbox, "loss_giou": config.weight_loss_giou, } self.losses = ["vfl", "boxes"] self.eos_coef = config.eos_coefficient empty_weight = torch.ones(config.num_labels + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) self.alpha = config.focal_loss_alpha self.gamma = config.focal_loss_gamma def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") if "logits" not in outputs: raise KeyError("No predicted logits found in outputs") idx = self._get_source_permutation_idx(indices)
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([_target["boxes"][i] for _target, (_, i) in zip(targets, indices)], dim=0) ious, _ = box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ious = torch.diag(ious).detach() src_logits = outputs["logits"] target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] target_score_original = torch.zeros_like(target_classes, dtype=src_logits.dtype) target_score_original[idx] = ious.to(target_score_original.dtype) target_score = target_score_original.unsqueeze(-1) * target
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
pred_score = F.sigmoid(src_logits).detach() weight = self.alpha * pred_score.pow(self.gamma) * (1 - target) + target_score loss = F.binary_cross_entropy_with_logits(src_logits, target_score, weight=weight, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_vfl": loss} def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") src_logits = outputs["logits"]
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.class_weight) losses = {"loss_ce": loss_ce} return losses
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
@torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) losses = {} loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none") losses["loss_bbox"] = loss_bbox.sum() / num_boxes
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs")
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1)
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True): src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = F.binary_cross_entropy_with_logits(src_logits, target * 1.0, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_bce": loss} def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True): if "logits" not in outputs: raise KeyError("No logits found in outputs") src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = sigmoid_focal_loss(src_logits, target, self.alpha, self.gamma) loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_focal": loss}
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, "bce": self.loss_labels_bce, "focal": self.loss_labels_focal, "vfl": self.loss_labels_vfl, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) @staticmethod def get_cdn_matched_indices(dn_meta, targets): dn_positive_idx, dn_num_group = dn_meta["dn_positive_idx"], dn_meta["dn_num_group"] num_gts = [len(t["class_labels"]) for t in targets] device = targets[0]["class_labels"].device
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
dn_match_indices = [] for i, num_gt in enumerate(num_gts): if num_gt > 0: gt_idx = torch.arange(num_gt, dtype=torch.int64, device=device) gt_idx = gt_idx.tile(dn_num_group) assert len(dn_positive_idx[i]) == len(gt_idx) dn_match_indices.append((dn_positive_idx[i], gt_idx)) else: dn_match_indices.append( ( torch.zeros(0, dtype=torch.int64, device=device), torch.zeros(0, dtype=torch.int64, device=device), ) ) return dn_match_indices def forward(self, outputs, targets): """ This performs the loss computation.
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if "auxiliary_outputs" not in k} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) num_boxes = torch.clamp(num_boxes, min=1).item()
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
# Compute all the requested losses losses = {} for loss in self.losses: l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} losses.update(l_dict)
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_aux_{i}": v for k, v in l_dict.items()} losses.update(l_dict)
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
# In case of cdn auxiliary losses. For rtdetr if "dn_auxiliary_outputs" in outputs: if "denoising_meta_values" not in outputs: raise ValueError( "The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry." ) indices = self.get_cdn_matched_indices(outputs["denoising_meta_values"], targets) num_boxes = num_boxes * outputs["denoising_meta_values"]["dn_num_group"]
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
for i, auxiliary_outputs in enumerate(outputs["dn_auxiliary_outputs"]): # indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_dn_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses
340
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
class CompressedTensorsHfQuantizer(HfQuantizer): """ Quantizer for the compressed_tensors package. Loads and restores models to quantized state with compressed_tensors """ requires_calibration = True required_packages = ["compressed_tensors"] def __init__(self, quantization_config: CompressedTensorsConfig, **kwargs): super().__init__(quantization_config, **kwargs) if not is_compressed_tensors_available(): raise ImportError( "Using `compressed_tensors` quantized models requires the compressed-tensors library: " "`pip install compressed-tensors`" ) from compressed_tensors.compressors import ModelCompressor self.compressor = ModelCompressor.from_compression_config(quantization_config) self.run_compressed = quantization_config.run_compressed self.quantization_config = quantization_config
341
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
def validate_environment(self, *args, **kwargs): if not is_compressed_tensors_available(): raise ImportError( "Using `compressed_tensors` quantized models requires the compressed-tensors library: " "`pip install compressed-tensors`" ) if not is_torch_available(): # torch already should be installed as part of compressed tensors raise ImportError("torch is required for using compressed-tensors quantization") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: logger.info("Loading model using torch.float16 for compressed-tensors quantization") torch_dtype = torch.float16 elif torch_dtype != torch.float16: logger.info( "We suggest you to set `torch_dtype=torch.float16` for better efficiency with compressed_tensors." ) return torch_dtype
341
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
def _process_model_before_weight_loading(self, model, **kwargs): from compressed_tensors.quantization import apply_quantization_config ct_quantization_config = self.compressor.quantization_config if self.run_compressed and self.is_quantization_compressed: apply_quantization_config(model, ct_quantization_config, run_compressed=True) elif not self.is_quantization_compressed: apply_quantization_config(model, ct_quantization_config) def _process_model_after_weight_loading(self, model, **kwargs): """Decompress loaded model if necessary - need for qat""" if (self.is_quantization_compressed and not self.run_compressed) or self.is_sparsification_compressed: config = kwargs.get("config", None) cache_path = config._name_or_path if not os.path.exists(cache_path): from transformers.utils import cached_file
341
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
config_file_path = cached_file(cache_path, "config.json") cache_path = os.path.sep.join(config_file_path.split(os.path.sep)[:-1]) if self.is_quantization_compressed and not self.run_compressed: from compressed_tensors.quantization import QuantizationStatus self.compressor.quantization_config.quantization_status = QuantizationStatus.FROZEN self.compressor.decompress(model_path=cache_path, model=model) @property def is_quantization_compressed(self): from compressed_tensors.quantization import QuantizationStatus return ( self.quantization_config.quantization_config is not None and self.quantization_config.quantization_config.quantization_status == QuantizationStatus.COMPRESSED ) @property def is_sparsification_compressed(self): from compressed_tensors.config.base import CompressionFormat
341
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
return ( self.quantization_config.sparsity_config is not None and self.quantization_config.sparsity_config.format != CompressionFormat.dense.value ) @property def is_trainable(self): return True def is_qat_trainable(self) -> bool: """Loaded Models can carry out quantization aware training""" # models need to be decompressed carry out qat return not self.run_compressed or not self.is_quantization_compressed def is_serializable(self, safe_serialization=None) -> bool: """Models quantized using compressed tensors can be saved to disk""" return True
341
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
class FbgemmFp8HfQuantizer(HfQuantizer): """ FP8 quantization using fbgemm kernels """ requires_parameters_quantization = True requires_calibration = False required_packages = ["fbgemm-gpu", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
def validate_environment(self, *args, **kwargs): if not is_torch_available() or version.parse(importlib.metadata.version("torch")) < version.parse("2.1.0"): raise ImportError( "Using fbgemm fp8 quantization requires torch > 2.1.0" "Please install the latest version of torch ( pip install --upgrade torch )" ) if not is_fbgemm_gpu_available(): raise ImportError( "Using fbgemm fp8 quantization requires fbgemm-gpu library" "Please install the latest version of fbgemm-gpu library by following : https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries" ) if not is_accelerate_available("0.32.2"): raise ImportError( "Loading an FP8 quantized model requires accelerate > 0.32.1 (`pip install --upgrade accelerate`)" )
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
if not torch.cuda.is_available(): raise RuntimeError("Using FP8 quantized models with fbgemm kernels requires a GPU") compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 9: raise ValueError( "FP8 quantized models is only supported on GPUs with compute capability >= 9.0 (e.g H100)" )
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded an FP8 model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. " ) elif device_map is not None: if ( not self.pre_quantized and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device." "This is not supported when the model is quantized on the fly. " "Please use a quantized checkpoint or remove the CPU or disk device from the device_map." )
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.bfloat16 logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.bloat16` due to " "requirements of `fbgemm-gpu` to enable model loading in fp8. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.bfloat16 to remove this warning.", torch_dtype, ) elif torch_dtype == torch.float16: raise ValueError( "You cannot use FP8 with torch_dtype=torch.float16." "We recommend you passing torch_dtype=torch.bfloat16" ) return torch_dtype
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): from ..integrations import FbgemmFp8Linear module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, FbgemmFp8Linear): if self.pre_quantized or tensor_name == "bias": if tensor_name == "weight" and param_value.dtype != torch.float8_e4m3fn: raise ValueError("Expect quantized weights but got an unquantized weight") return False else: if tensor_name == "weight_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True return False
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ Quantizes weights into weight and weight_scale """ new_value, weight_scale = torch.ops.fbgemm.quantize_fp8_per_row(param_value) module, tensor_name = get_module_from_name(model, param_name) module._buffers[tensor_name] = new_value.to(target_device) # to have the right output shape -> (out_features, 1) module._buffers["weight_scale"] = weight_scale.view(weight_scale.shape[0], 1).to(target_device) if unexpected_keys is not None and param_name in unexpected_keys: unexpected_keys.remove(param_name) del param_name def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_fbgemm_fp8_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_fbgemm_fp8_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: from ..integrations import FbgemmFp8Linear
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, FbgemmFp8Linear): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
342
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
class AwqQuantizer(HfQuantizer): """ 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978) """ # AWQ requires data callibration - we support only inference requires_calibration = True required_packages = ["awq", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, device_map, **kwargs): if not is_auto_awq_available(): raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)") if not is_accelerate_available(): raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)")
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
if self.quantization_config.version == AWQLinearVersion.GEMM and not torch.cuda.is_available(): logger.warning_once("No CUDA found, replace GEMM with IPEX version to support non-cuda AWQ model.") self.quantization_config.version = AWQLinearVersion.IPEX
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
if self.quantization_config.version == AWQLinearVersion.IPEX: if version.parse(importlib.metadata.version("autoawq")) < version.parse("0.2.6"): raise RuntimeError( "To use IPEX backend, you need autoawq>0.6.2. Please install the latest version or from source." ) if device_map is None: logger.warning_once( "You have loaded an AWQ model without setting device_map, please set 'cpu' or 'xpu' or 'auto'" ) elif isinstance(device_map, dict) and "disk" in device_map.values(): raise ValueError( "You are attempting to load an IPEX version AWQ model with a device_map that contains disk device." " This is not supported. Please make sure only cpu and xpu in the device_map." ) else: if not torch.cuda.is_available(): raise RuntimeError(
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
"GPU is required to run AWQ quantized model. You can use IPEX version AWQ if you have an Intel CPU" )
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
if device_map is None: logger.warning_once( "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model." ) elif device_map is not None: if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." )
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
def update_torch_dtype(self, torch_dtype): if torch_dtype is None: torch_dtype = torch.float16 logger.info("Loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually.") elif torch_dtype != torch.float16: logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.") return torch_dtype def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): from ..integrations import get_keys_to_not_convert, replace_quantization_scales, replace_with_awq_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
model, has_been_replaced = replace_with_awq_linear( model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert ) model = replace_quantization_scales(model, model.config.model_type) if not has_been_replaced: logger.warning( "You are loading an AWQ model but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is a bug." ) def _process_model_after_weight_loading(self, model, **kwargs): if self.quantization_config.do_fuse: from ..integrations import fuse_awq_modules model = fuse_awq_modules(model, self.quantization_config) model._awq_is_fused = True # TODO: consider storing this flag in model.config instead
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
if self.quantization_config.version == AWQLinearVersion.EXLLAMA: from ..integrations import post_init_awq_exllama_modules model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) if self.quantization_config.version == AWQLinearVersion.IPEX: from ..integrations import post_init_awq_ipex_modules model = post_init_awq_ipex_modules(model) def is_serializable(self, safe_serialization=None): # AWQ through auto-awq has been always serializable, except if the model is fused. if self.quantization_config.do_fuse: logger.warning("You cannot save an AWQ model that uses fused modules!") return False if self.quantization_config.version == AWQLinearVersion.EXLLAMA: logger.warning("You cannot save an AWQ model that uses Exllama backend!") return False return True
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
@property def is_trainable(self): # AWQ supports PEFT fine-tuning from version 0.2.0 MIN_AWQ_VERSION_FOR_PEFT = "0.2.0" return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
343
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
class Bnb4BitHfQuantizer(HfQuantizer): """ 4-bit quantization from bitsandbytes.py quantization method: before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call saving: from state dict, as usual; saves weights and `quant_state` components loading: need to locate `quant_state` components and pass to Param4bit constructor """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError( f"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) if not is_bitsandbytes_available(): raise ImportError( "Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) from ..integrations import validate_bnb_backend_availability from ..utils import is_bitsandbytes_multi_backend_available bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available() validate_bnb_backend_availability(raise_exception=True)
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." )
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled: pass elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to "
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
"`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " )
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): from accelerate.utils import CustomDtype
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization") return CustomDtype.INT4 else: raise ValueError( "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library," "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map" "calculation. You may encounter unexpected behavior, or pass your own device map" ) def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: import bitsandbytes as bnb
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit): # Add here check for loaded components' dtypes once serialization is implemented return True elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias": # bias could be loaded by regular set_module_tensor_to_device() from accelerate, # but it would wrongly use uninitialized weight there. return True else: return False
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() """ import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name)
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if isinstance(target_device, int) and is_torch_npu_available(): target_device = f"npu:{target_device}" if tensor_name == "bias": if param_value is None: new_value = old_value.to(target_device) else: new_value = param_value.to(target_device) new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value return
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): raise ValueError("this function only loads `Linear4bit components`") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") # construct `new_value` for the module._parameters[tensor_name]: if self.pre_quantized: # 4bit loading. Collecting components for restoring quantized weight # This can be expanded to make a universal call for any quantized weight loading
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if not self.is_serializable: raise ValueError( "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and ( param_name + ".quant_state.bitsandbytes__nf4" not in state_dict ): raise ValueError( f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components." ) quantized_stats = {} for k, v in state_dict.items(): if param_name + "." in k: quantized_stats[k] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k)
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
param_kwargs = {} if self.is_bnb_supports_quant_storage_module: param_kwargs["module"] = module new_value = bnb.nn.Params4bit.from_prequantized( data=param_value, quantized_stats=quantized_stats, requires_grad=False, device=target_device, **param_kwargs, ) else: new_value = param_value.to("cpu") # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
# Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
# Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_torch_dtype def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
def update_device_map(self, device_map): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} elif is_torch_npu_available(): device_map = {"": f"npu:{torch.npu.current_device()}"} elif is_torch_xpu_available(): device_map = {"": f"xpu:{torch.xpu.current_device()}"} else: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " f"Setting device_map to {device_map}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
# Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.llm_int8_skip_modules is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert]
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu)
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_4bit = True model.is_4bit_serializable = self.is_serializable() return model def is_serializable(self, safe_serialization=None): _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3")
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
if not _is_4bit_serializable: logger.warning( "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed." ) return False return True @cached_property def is_bnb_supports_quant_storage_module(self) -> bool: """ determines if the current version of bitsandbytes supports the `module` parameter in `Params4bit.from_prequantized` :return: """ return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.43.3") @property def is_trainable(self) -> bool: return True def _dequantize(self, model): from ..integrations import dequantize_and_replace
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
344
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
class TorchAoHfQuantizer(HfQuantizer): """ Quantizer for torchao: https://github.com/pytorch/ao/ """ requires_parameters_quantization = True requires_calibration = False required_packages = ["torchao"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, *args, **kwargs): if not is_torchao_available(): raise ImportError("Loading an torchao quantized model requires torchao library (`pip install torchao`)")
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
self.offload = False device_map = kwargs.get("device_map", None) if isinstance(device_map, dict): if "cpu" in device_map.values() or "disk" in device_map.values(): if self.pre_quantized: raise ValueError( "You are attempting to perform cpu/disk offload with a pre-quantized torchao model " "This is not supported yet . Please remove the CPU or disk device from the device_map." ) else: self.offload = True if self.pre_quantized: weights_only = kwargs.get("weights_only", None) if weights_only: torch_version = version.parse(importlib.metadata.version("torch")) if torch_version < version.parse("2.5.0"): raise RuntimeError(
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
f"In order to use torchao pre-quantized model, you need to have torch>=2.5.0. However, the current version is {torch_version}." f" You can also set with `weights_only=False` in `from_pretrained` if you don't want to update torch" )
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
def update_torch_dtype(self, torch_dtype): if self.quantization_config.quant_type == "int4_weight_only": if torch_dtype is not None and torch_dtype != torch.bfloat16: logger.warning_once( f"Setting torch_dtype to {torch_dtype} for int4_weight_only quantization, but only bfloat16 is supported right now. Please set the torch_dtype to bfloat16." ) if torch_dtype is None: logger.warning_once( "Setting torch_dtype to torch.bfloat16 for int4_weight_only quantization since only bfloat16 is supported right now. Please set torch_dtype=torch.bfloat16 to remove this warning." ) torch_dtype = torch.bfloat16 if self.quantization_config.quant_type == "int8_dynamic_activation_int8_weight": if torch_dtype is None: logger.info(
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
"Setting torch_dtype to torch.float32 for int8_dynamic_activation_int8_weight quantization as no torch_dtype was specified in from_pretrained" ) # we need to set the torch_dtype, otherwise we have dtype mismatch when performing the quantized linear op torch_dtype = torch.float32 return torch_dtype
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): from accelerate.utils import CustomDtype map_to_target_dtype = { "int4_weight_only": CustomDtype.INT4, "int8_weight_only": torch.int8, "int8_dynamic_activation_int8_weight": torch.int8, } return map_to_target_dtype[self.quantization_config.quant_type] else: raise ValueError( "You are using `device_map='auto'` on a torchao quantized model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library with " "`pip install --upgrade accelerate`" )
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for the quantization parameters (e.g. scale). Tested with int4 wo and group size = 128 max_memory = {key: val * 0.9 for key, val in max_memory.items()} return max_memory def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): from ..integrations import get_keys_to_not_convert self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) return
345
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py