davanstrien HF Staff commited on
Commit
d450bf5
·
1 Parent(s): 9afaa83

Add VLM image classification script

Browse files

- Uses vLLM's GuidedDecodingParams for structured classification
- Memory-efficient lazy batch processing
- Supports custom classes via CLI args

Files changed (1) hide show
  1. vlm-classify.py +404 -0
vlm-classify.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "toolz",
8
+ # "torch",
9
+ # "tqdm",
10
+ # "transformers",
11
+ # "vllm>=0.6.5",
12
+ # ]
13
+ # ///
14
+
15
+ """
16
+ Classify images using Vision Language Models with vLLM.
17
+
18
+ This script processes images through VLMs to classify them into user-defined categories,
19
+ using vLLM's GuidedDecodingParams for structured output.
20
+
21
+ Examples:
22
+ # Basic classification
23
+ uv run vlm-classify.py \\
24
+ username/input-dataset \\
25
+ username/output-dataset \\
26
+ --classes "document,photo,diagram,other"
27
+
28
+ # With custom prompt and model
29
+ uv run vlm-classify.py \\
30
+ username/input-dataset \\
31
+ username/output-dataset \\
32
+ --classes "index-card,manuscript,title-page,other" \\
33
+ --prompt "What type of historical document is this?" \\
34
+ --model Qwen/Qwen2-VL-7B-Instruct
35
+
36
+ # Quick test with sample limit
37
+ uv run vlm-classify.py \\
38
+ davanstrien/sloane-index-cards \\
39
+ username/test-output \\
40
+ --classes "index,content,other" \\
41
+ --max-samples 10
42
+ """
43
+
44
+ import argparse
45
+ import base64
46
+ import io
47
+ import logging
48
+ import os
49
+ import sys
50
+ from collections import Counter
51
+ from typing import List, Optional, Union, Dict, Any
52
+
53
+ import torch
54
+ from PIL import Image
55
+ from datasets import load_dataset, Dataset
56
+ from huggingface_hub import login
57
+ from toolz import partition_all
58
+ from tqdm.auto import tqdm
59
+ from vllm import LLM, SamplingParams
60
+ from vllm.sampling_params import GuidedDecodingParams
61
+
62
+ logging.basicConfig(level=logging.INFO)
63
+ logger = logging.getLogger(__name__)
64
+
65
+
66
+ def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]]) -> str:
67
+ """Convert image to base64 data URI for VLM processing."""
68
+ if isinstance(image, Image.Image):
69
+ pil_img = image
70
+ elif isinstance(image, dict) and "bytes" in image:
71
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
72
+ else:
73
+ raise ValueError(f"Unsupported image type: {type(image)}")
74
+
75
+ # Convert to RGB if necessary (handle RGBA, grayscale, etc.)
76
+ if pil_img.mode not in ("RGB", "L"):
77
+ pil_img = pil_img.convert("RGB")
78
+
79
+ # Convert to base64
80
+ buf = io.BytesIO()
81
+ pil_img.save(buf, format="JPEG", quality=95)
82
+ base64_str = base64.b64encode(buf.getvalue()).decode()
83
+ return f"data:image/jpeg;base64,{base64_str}"
84
+
85
+
86
+ def create_classification_messages(
87
+ image: Union[Image.Image, Dict[str, Any]],
88
+ prompt: str,
89
+ ) -> List[Dict]:
90
+ """Create chat messages for VLM classification."""
91
+ image_uri = image_to_data_uri(image)
92
+
93
+ return [
94
+ {
95
+ "role": "user",
96
+ "content": [
97
+ {"type": "image_url", "image_url": {"url": image_uri}},
98
+ {"type": "text", "text": prompt},
99
+ ],
100
+ }
101
+ ]
102
+
103
+
104
+ def main(
105
+ input_dataset: str,
106
+ output_dataset: str,
107
+ classes: str,
108
+ prompt: Optional[str] = None,
109
+ image_column: str = "image",
110
+ model: str = "Qwen/Qwen2-VL-7B-Instruct",
111
+ batch_size: int = 8,
112
+ max_samples: Optional[int] = None,
113
+ gpu_memory_utilization: float = 0.9,
114
+ max_model_len: Optional[int] = None,
115
+ tensor_parallel_size: Optional[int] = None,
116
+ split: str = "train",
117
+ hf_token: Optional[str] = None,
118
+ private: bool = False,
119
+ ):
120
+ """Classify images from a dataset using a Vision Language Model."""
121
+
122
+ # Check GPU availability
123
+ if not torch.cuda.is_available():
124
+ logger.error("CUDA is not available. This script requires a GPU.")
125
+ logger.error("If running locally, ensure you have a CUDA-capable GPU.")
126
+ logger.error("For cloud execution, use: hf jobs uv run --flavor a10g ...")
127
+ sys.exit(1)
128
+
129
+ # Parse classes
130
+ class_list = [c.strip() for c in classes.split(",")]
131
+ logger.info(f"Classes: {class_list}")
132
+
133
+ # Create default prompt if not provided
134
+ if prompt is None:
135
+ prompt = f"Classify this image into one of the following categories: {', '.join(class_list)}"
136
+ logger.info(f"Prompt template: {prompt}")
137
+
138
+ # Login to HF if token provided
139
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
140
+ if HF_TOKEN:
141
+ login(token=HF_TOKEN)
142
+
143
+ # Load dataset
144
+ logger.info(f"Loading dataset: {input_dataset}")
145
+ dataset = load_dataset(input_dataset, split=split)
146
+
147
+ # Validate image column
148
+ if image_column not in dataset.column_names:
149
+ raise ValueError(f"Column '{image_column}' not found. Available: {dataset.column_names}")
150
+
151
+ # Limit samples if requested
152
+ if max_samples:
153
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
154
+ logger.info(f"Limited to {len(dataset)} samples")
155
+
156
+ # Auto-detect tensor parallel size if not specified
157
+ if tensor_parallel_size is None:
158
+ tensor_parallel_size = torch.cuda.device_count()
159
+ logger.info(f"Auto-detected {tensor_parallel_size} GPUs for tensor parallelism")
160
+
161
+ # Initialize vLLM
162
+ logger.info(f"Loading model: {model}")
163
+ llm_kwargs = {
164
+ "model": model,
165
+ "gpu_memory_utilization": gpu_memory_utilization,
166
+ "tensor_parallel_size": tensor_parallel_size,
167
+ "trust_remote_code": True, # Required for some VLMs
168
+ }
169
+
170
+ if max_model_len:
171
+ llm_kwargs["max_model_len"] = max_model_len
172
+
173
+ llm = LLM(**llm_kwargs)
174
+
175
+ # Create guided decoding params for classification
176
+ guided_decoding_params = GuidedDecodingParams(choice=class_list)
177
+ sampling_params = SamplingParams(
178
+ temperature=0.1, # Low temperature for consistent classification
179
+ max_tokens=50, # Classifications are short
180
+ guided_decoding=guided_decoding_params,
181
+ )
182
+
183
+ # Process images in batches to avoid memory issues
184
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
185
+
186
+ all_classifications = []
187
+
188
+ # Process in batches using lazy loading
189
+ for batch_indices in tqdm(
190
+ partition_all(batch_size, range(len(dataset))),
191
+ total=(len(dataset) + batch_size - 1) // batch_size,
192
+ desc="Classifying images",
193
+ ):
194
+ batch_indices = list(batch_indices)
195
+
196
+ # Load only this batch's images
197
+ batch_images = []
198
+ valid_batch_indices = []
199
+
200
+ for idx in batch_indices:
201
+ try:
202
+ image = dataset[idx][image_column]
203
+ batch_images.append(image)
204
+ valid_batch_indices.append(idx)
205
+ except Exception as e:
206
+ logger.warning(f"Skipping image at index {idx}: {e}")
207
+ all_classifications.append(None)
208
+
209
+ if not batch_images:
210
+ continue
211
+
212
+ try:
213
+ # Create messages for just this batch
214
+ batch_messages = [
215
+ create_classification_messages(img, prompt)
216
+ for img in batch_images
217
+ ]
218
+
219
+ # Process with vLLM
220
+ outputs = llm.chat(
221
+ messages=batch_messages,
222
+ sampling_params=sampling_params,
223
+ use_tqdm=False, # Already have outer progress bar
224
+ )
225
+
226
+ # Extract classifications
227
+ for output in outputs:
228
+ if output.outputs:
229
+ label = output.outputs[0].text.strip()
230
+ all_classifications.append(label)
231
+ else:
232
+ all_classifications.append(None)
233
+ logger.warning("Empty output for an image")
234
+
235
+ except Exception as e:
236
+ logger.error(f"Error processing batch: {e}")
237
+ # Add None for failed batch
238
+ all_classifications.extend([None] * len(batch_images))
239
+
240
+ # Ensure we have the right number of classifications
241
+ while len(all_classifications) < len(dataset):
242
+ all_classifications.append(None)
243
+
244
+ # Add classifications to dataset
245
+ logger.info("Adding classifications to dataset...")
246
+ dataset = dataset.add_column("label", all_classifications[:len(dataset)])
247
+
248
+ # Push to hub
249
+ logger.info(f"Pushing to {output_dataset}...")
250
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
251
+
252
+ # Print summary
253
+ logger.info("Classification complete!")
254
+ logger.info(f"Processed {len(all_classifications)} images")
255
+ logger.info(f"Output dataset: {output_dataset}")
256
+
257
+ # Show distribution of classifications
258
+ label_counts = Counter(all_classifications)
259
+ logger.info("Classification distribution:")
260
+ for label, count in sorted(label_counts.items()):
261
+ if label is not None: # Skip None values in summary
262
+ percentage = (count / len(all_classifications)) * 100 if all_classifications else 0
263
+ logger.info(f" {label}: {count} ({percentage:.1f}%)")
264
+
265
+
266
+ if __name__ == "__main__":
267
+ parser = argparse.ArgumentParser(
268
+ description="Classify images using Vision Language Models",
269
+ formatter_class=argparse.RawDescriptionHelpFormatter,
270
+ epilog="""
271
+ Examples:
272
+ # Basic classification
273
+ uv run vlm-classify.py \\
274
+ username/input-dataset \\
275
+ username/output-dataset \\
276
+ --classes "document,photo,diagram,other"
277
+
278
+ # With custom prompt
279
+ uv run vlm-classify.py \\
280
+ username/input-dataset \\
281
+ username/output-dataset \\
282
+ --classes "index-card,manuscript,other" \\
283
+ --prompt "What type of historical document is this?"
284
+
285
+ # HF Jobs execution
286
+ hf jobs uv run \\
287
+ --flavor a10g \\
288
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/vlm-classify.py \\
289
+ username/input-dataset \\
290
+ username/output-dataset \\
291
+ --classes "title-page,content,index,other"
292
+ """,
293
+ )
294
+
295
+ parser.add_argument(
296
+ "input_dataset",
297
+ help="Input dataset ID on Hugging Face Hub",
298
+ )
299
+ parser.add_argument(
300
+ "output_dataset",
301
+ help="Output dataset ID on Hugging Face Hub",
302
+ )
303
+ parser.add_argument(
304
+ "--classes",
305
+ required=True,
306
+ help='Comma-separated list of classes (e.g., "cat,dog,other")',
307
+ )
308
+ parser.add_argument(
309
+ "--prompt",
310
+ default=None,
311
+ help="Custom classification prompt (default: auto-generated)",
312
+ )
313
+ parser.add_argument(
314
+ "--image-column",
315
+ default="image",
316
+ help="Column name containing images (default: image)",
317
+ )
318
+ parser.add_argument(
319
+ "--model",
320
+ default="Qwen/Qwen2-VL-7B-Instruct",
321
+ help="Vision Language Model to use (default: Qwen/Qwen2-VL-7B-Instruct)",
322
+ )
323
+ parser.add_argument(
324
+ "--batch-size",
325
+ type=int,
326
+ default=8,
327
+ help="Batch size for inference (default: 8)",
328
+ )
329
+ parser.add_argument(
330
+ "--max-samples",
331
+ type=int,
332
+ default=None,
333
+ help="Maximum number of samples to process (for testing)",
334
+ )
335
+ parser.add_argument(
336
+ "--gpu-memory-utilization",
337
+ type=float,
338
+ default=0.9,
339
+ help="GPU memory utilization (default: 0.9)",
340
+ )
341
+ parser.add_argument(
342
+ "--max-model-len",
343
+ type=int,
344
+ default=None,
345
+ help="Maximum model context length",
346
+ )
347
+ parser.add_argument(
348
+ "--tensor-parallel-size",
349
+ type=int,
350
+ default=None,
351
+ help="Number of GPUs for tensor parallelism (default: auto-detect)",
352
+ )
353
+ parser.add_argument(
354
+ "--split",
355
+ default="train",
356
+ help="Dataset split to use (default: train)",
357
+ )
358
+ parser.add_argument(
359
+ "--hf-token",
360
+ default=None,
361
+ help="Hugging Face API token (or set HF_TOKEN env var)",
362
+ )
363
+ parser.add_argument(
364
+ "--private",
365
+ action="store_true",
366
+ help="Make output dataset private",
367
+ )
368
+
369
+ args = parser.parse_args()
370
+
371
+ # Show example command if no arguments
372
+ if len(sys.argv) == 1:
373
+ parser.print_help()
374
+ print("\n" + "="*60)
375
+ print("Example HF Jobs command:")
376
+ print("="*60)
377
+ print("""
378
+ hf jobs uv run \\
379
+ --flavor a10g \\
380
+ -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\
381
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/vlm-classify.py \\
382
+ davanstrien/sloane-index-cards \\
383
+ username/classified-cards \\
384
+ --classes "index-card,manuscript,title-page,other" \\
385
+ --max-samples 100
386
+ """)
387
+ sys.exit(0)
388
+
389
+ main(
390
+ input_dataset=args.input_dataset,
391
+ output_dataset=args.output_dataset,
392
+ classes=args.classes,
393
+ prompt=args.prompt,
394
+ image_column=args.image_column,
395
+ model=args.model,
396
+ batch_size=args.batch_size,
397
+ max_samples=args.max_samples,
398
+ gpu_memory_utilization=args.gpu_memory_utilization,
399
+ max_model_len=args.max_model_len,
400
+ tensor_parallel_size=args.tensor_parallel_size,
401
+ split=args.split,
402
+ hf_token=args.hf_token,
403
+ private=args.private,
404
+ )