coco2017 / README.md
ariG23498's picture
ariG23498 HF Staff
Update README.md
cad96bc verified
metadata
dataset_info:
  features:
    - name: image
      dtype: image
    - name: objects
      struct:
        - name: bbox
          sequence:
            sequence: float64
        - name: segmentation
          sequence:
            sequence:
              sequence: float64
        - name: categories
          sequence: int64
  splits:
    - name: train
      num_bytes: 17598458856.47
      num_examples: 117266
    - name: validation
      num_bytes: 795110726.04
      num_examples: 4952
  download_size: 20170024873
  dataset_size: 18393569582.510002
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
task_categories:
  - object-detection

MS-COCO2017

Use the dataset

from random import randint
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont

ds = load_dataset("ariG23498/coco2017", streaming=True, split="validation")

sample = next(iter(ds))


def draw_bboxes_on_image(
    image: Image.Image,
    objects: dict,
    category_names: dict = None,
    box_color: str = "red",
    text_color: str = "white"
) -> Image.Image:
    image_copy = image.copy()
    draw = ImageDraw.Draw(image_copy)
    font = ImageFont.load_default()

    bboxes = objects.get("bbox", [])
    categories = objects.get("categories", [])

    for bbox, category_id in zip(bboxes, categories):
        x, y, width, height = bbox
        x_min, y_min = x, y
        x_max, y_max = x + width, y + height

        # Draw bounding box
        draw.rectangle([x_min, y_min, x_max, y_max], outline=box_color, width=2)

        # Prepare label
        label = category_names.get(category_id, str(category_id)) if category_names else str(category_id)
        text_bbox = draw.textbbox((0, 0), label, font=font)
        text_width = text_bbox[2] - text_bbox[0]
        text_height = text_bbox[3] - text_bbox[1]
        label_top = max(y_min - text_height - 4, 0)

        # Draw label background and text
        draw.rectangle(
            [x_min, label_top, x_min + text_width + 4, label_top + text_height + 2],
            fill=box_color
        )
        draw.text((x_min + 2, label_top + 1), label, fill=text_color, font=font)

    return image_copy



def draw_segmaps_on_image(
    image: Image.Image,
    objects: dict,
    category_names: dict = None,
    alpha: float = 0.4,
    text_color: str = "white"
) -> Image.Image:
    base_image = image.convert("RGBA").copy()
    overlay = Image.new("RGBA", base_image.size, (255, 255, 255, 0))
    draw = ImageDraw.Draw(overlay)
    font = ImageFont.load_default()

    segmentations = objects.get("segmentation", [])
    categories = objects.get("categories", [])

    for segmentation, category_id in zip(segmentations, categories):
        polygons = segmentation if isinstance(segmentation[0], list) else [segmentation]
        label = category_names.get(category_id, str(category_id)) if category_names else str(category_id)

        for polygon in polygons:
            if len(polygon) >= 6:
                points = [(polygon[i], polygon[i + 1]) for i in range(0, len(polygon), 2)]

                # Draw filled polygon
                segmap_color = (randint(125, 255), randint(0, 125), randint(0, 255))
                rgba_fill = (*segmap_color, int(255 * alpha))
                draw.polygon(points, fill=rgba_fill)

                # Draw label at first vertex
                x0, y0 = points[0]
                draw.text((x0 + 2, y0 + 2), label, fill=text_color, font=font)

    return Image.alpha_composite(base_image, overlay).convert("RGB")

# For Bounding Boxes
od_image = draw_bboxes_on_image(
    image=sample["image"],
    objects=sample["objects"],
)

# For Segmentation Maps
segmap_image = draw_segmaps_on_image(
    image=sample["image"],
    objects=sample["objects"]
)

Get the categories

import json

with open("/content/annotations/instances_train2017.json") as f:
    instances = json.load(f)

instances["categories"]

Build the dataset and upload to Hub

!pip install -U -q datasets

# Download and unzip COCO 2017
!wget -q http://images.cocodataset.org/zips/train2017.zip
!wget -q http://images.cocodataset.org/zips/val2017.zip
!wget -q http://images.cocodataset.org/annotations/annotations_trainval2017.zip

!unzip -q train2017.zip
!unzip -q val2017.zip
!unzip -q annotations_trainval2017.zip

import json
import shutil
from pathlib import Path
from tqdm import tqdm
from datasets import load_dataset

base_dir = Path("/content")
splits = {
    "train": {
        "image_dir": base_dir / "train2017",
        "annotation_file": base_dir / "annotations" / "instances_train2017.json",
    },
    "val": {
        "image_dir": base_dir / "val2017",
        "annotation_file": base_dir / "annotations" / "instances_val2017.json",
    }
}
output_dir = base_dir / "coco_imagefolder"
output_dir.mkdir(parents=True, exist_ok=True)

def normalize_segmentation(segmentation):
    if isinstance(segmentation, list):
        if all(isinstance(poly, list) for poly in segmentation):
            return segmentation  # already a list of polygons
        elif all(isinstance(pt, (int, float)) for pt in segmentation):
            return [segmentation]  # wrap single polygon
    return []  # skip RLE or malformed segmentations

def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path):
    with open(annotation_path) as f:
        data = json.load(f)

    id_to_filename = {img['id']: img['file_name'] for img in data['images']}
    annotations_by_image = {}

    for ann in data['annotations']:
        img_id = ann['image_id']
        bbox = ann['bbox']
        category = ann['category_id']
        segmentation = normalize_segmentation(ann['segmentation'])

        if not segmentation:
            continue  # skip if malformed or RLE

        if img_id not in annotations_by_image:
            annotations_by_image[img_id] = {
                "file_name": id_to_filename[img_id],
                "objects": {
                    "bbox": [],
                    "segmentation": [],
                    "categories": [],
                }
            }

        annotations_by_image[img_id]["objects"]["bbox"].append(bbox)
        annotations_by_image[img_id]["objects"]["segmentation"].append(segmentation)
        annotations_by_image[img_id]["objects"]["categories"].append(category)

    with open(output_metadata_path, "w") as f:
        for metadata in annotations_by_image.values():
            json.dump(metadata, f)
            f.write("\n")

# Build imagefolder structure
for split, info in splits.items():
    split_dir = output_dir / split
    split_dir.mkdir(parents=True, exist_ok=True)

    # Copy images
    for img_path in tqdm(info["image_dir"].glob("*.jpg"), desc=f"Copying {split} images"):
        shutil.copy(img_path, split_dir / img_path.name)

    # Write JSONL metadata
    metadata_path = split_dir / "metadata.jsonl"
    convert_coco_to_jsonl(split_dir, info["annotation_file"], metadata_path)

# Load and push
dataset = load_dataset("imagefolder", data_dir=str(output_dir))
dataset.push_to_hub("ariG23498/coco2017")