import torch from transformers import AutoModelForCausalLM, AutoProcessor from PIL import Image import gradio as gr # Load the MAGMA model and processor model_id = "microsoft/Magma-8B" processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, trust_remote_code=True) model.to("cuda" if torch.cuda.is_available() else "cpu") def magma_inference(image: Image.Image, prompt: str) -> str: # Prepare conversation convs = [ {"role": "system", "content": "You are an agent that can see, talk, and act."}, {"role": "user", "content": prompt} ] # Generate prompt text_prompt = processor.tokenizer.apply_chat_template(convs, tokenize=False, add_generation_prompt=True) # Process inputs inputs = processor(images=[image], texts=text_prompt, return_tensors="pt").to(model.device) # Generate output with torch.inference_mode(): generate_ids = model.generate(**inputs, max_new_tokens=50) generate_ids = generate_ids[:, inputs["input_ids"].shape[-1]:] response = processor.decode(generate_ids[0], skip_special_tokens=True).strip() return response # Gradio interface interface = gr.Interface( fn=magma_inference, inputs=[ gr.Image(type="pil", label="Input Image"), gr.Textbox(label="Prompt") ], outputs=gr.Textbox(label="MAGMA Output"), title="MAGMA Image + Text to Text API", description="Upload an image and enter a prompt. Returns MAGMA's textual response." ) app = gr.mount_gradio_app(app=interface, path="/")