cygon24 commited on
Commit
e1f4cc8
·
verified ·
1 Parent(s): 3c31ec8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system("pip install scipy transformers timm torch torchvision torchaudio --upgrade torch torchvision torchaudio transformers==4.39.3 gradio pillow")
3
+
4
+ import torch
5
+ import gradio as gr
6
+ from PIL import Image
7
+ import scipy.io.wavfile as wavfile
8
+
9
+ # Use a pipeline as a high-level helper
10
+ from transformers import pipeline
11
+
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large", device=device)
15
+
16
+ narrator = pipeline("text-to-speech",
17
+ model="kakao-enterprise/vits-ljs")
18
+
19
+ # Define the function to generate audio from text
20
+ def generate_audio(text):
21
+ # Generate the narrated text
22
+ narrated_text = narrator(text)
23
+
24
+ # Save the audio to a WAV file
25
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
26
+ data=narrated_text["audio"][0])
27
+
28
+ # Return the path to the saved audio file
29
+ return "output.wav"
30
+
31
+
32
+ def caption_my_image(pil_image):
33
+ semantics = caption_image(images=pil_image)[0]['generated_text']
34
+ return generate_audio(semantics)
35
+
36
+ demo = gr.Interface(fn=caption_my_image,
37
+ inputs=[gr.Image(label="Select Image",type="pil")],
38
+ outputs=[gr.Audio(label="Image Captions")],
39
+ title="@cygon: Image captioning",
40
+ description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE IN AUDIO.")
41
+ demo.launch()