Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,65 +2,138 @@ import os
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
from pydub import AudioSegment
|
5 |
-
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
|
6 |
-
import
|
7 |
-
import
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
#
|
23 |
-
def split_audio(audio_path
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
# Helper
|
29 |
-
def
|
30 |
-
|
31 |
-
|
32 |
-
input_audio, _ = torchaudio.load(tmpfile.name)
|
33 |
-
input_features = processor(input_audio.squeeze(), sampling_rate=16000, return_tensors="pt").input_features
|
34 |
-
input_features = input_features.to(device)
|
35 |
-
predicted_ids = model.generate(input_features, task="transcribe", language="sv")
|
36 |
-
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
37 |
-
os.remove(tmpfile.name)
|
38 |
-
return transcription
|
39 |
|
40 |
-
#
|
41 |
-
def
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
total_chunks = len(chunks)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
fn=transcribe_with_progress,
|
61 |
-
inputs=gr.Audio(type="filepath", label="Upload Swedish Audio"),
|
62 |
-
outputs=gr.Textbox(label="Live Transcript (Swedish)"),
|
63 |
-
title="Live Swedish Transcriber (KB-Whisper)",
|
64 |
-
description="Streams transcription word-by-word with visual progress. Supports .m4a, .mp3, .wav. May be slow on CPU.",
|
65 |
-
live=True
|
66 |
-
).launch()
|
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
from pydub import AudioSegment
|
5 |
+
from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
|
6 |
+
from pathlib import Path
|
7 |
+
from tempfile import NamedTemporaryFile
|
8 |
+
from datetime import timedelta
|
9 |
+
import time
|
10 |
|
11 |
+
# Configuration
|
12 |
+
MODEL_ID = "KBLab/kb-whisper-large"
|
13 |
+
CHUNK_DURATION_MS = 10000
|
14 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
+
TORCH_DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
|
16 |
|
17 |
+
# Initialize model and pipeline
|
18 |
+
def initialize_pipeline():
|
19 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
20 |
+
MODEL_ID,
|
21 |
+
torch_dtype=TORCH_DTYPE,
|
22 |
+
low_cpu_mem_usage=True
|
23 |
+
).to(DEVICE)
|
24 |
+
|
25 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
26 |
+
|
27 |
+
return pipeline(
|
28 |
+
"automatic-speech-recognition",
|
29 |
+
model=model,
|
30 |
+
tokenizer=processor.tokenizer,
|
31 |
+
feature_extractor=processor.feature_extractor,
|
32 |
+
device=DEVICE,
|
33 |
+
torch_dtype=TORCH_DTYPE,
|
34 |
+
model_kwargs={"use_flash_attention_2": torch.cuda.is_available()}
|
35 |
+
)
|
36 |
|
37 |
+
# Convert audio if needed
|
38 |
+
def convert_to_wav(audio_path: str) -> str:
|
39 |
+
ext = str(Path(audio_path).suffix).lower()
|
40 |
+
if ext != ".wav":
|
41 |
+
audio = AudioSegment.from_file(audio_path)
|
42 |
+
wav_path = str(Path(audio_path).with_suffix(".converted.wav"))
|
43 |
+
audio.export(wav_path, format="wav")
|
44 |
+
return wav_path
|
45 |
+
return audio_path
|
46 |
|
47 |
+
# Split audio into chunks
|
48 |
+
def split_audio(audio_path: str) -> list:
|
49 |
+
try:
|
50 |
+
audio = AudioSegment.from_file(audio_path)
|
51 |
+
if len(audio) == 0:
|
52 |
+
raise ValueError("Audio file is empty or invalid.")
|
53 |
+
return [audio[i:i + CHUNK_DURATION_MS] around(i, len(audio), CHUNK_DURATION_MS) for i in range(0, len(audio), CHUNK_DURATION_MS)]
|
54 |
+
except Exception as e:
|
55 |
+
raise ValueError(f"Failed to process audio: {str(e)}")
|
56 |
|
57 |
+
# Helper to compute chunk start time
|
58 |
+
def get_chunk_time(index: int, chunk_duration_ms: int) -> str:
|
59 |
+
start_ms = index * chunk_duration_ms
|
60 |
+
return str(timedelta(milliseconds=start_ms))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
+
# Transcribe audio with progress and timestamps
|
63 |
+
def transcribe(audio_path: str, include_timestamps: bool = False, progress=gr.Progress()):
|
64 |
+
try:
|
65 |
+
if not audio_path:
|
66 |
+
return "No audio file provided.", None
|
67 |
+
|
68 |
+
# Convert to WAV if needed
|
69 |
+
wav_path = convert_to_wav(audio_path)
|
70 |
+
|
71 |
+
# Split and process
|
72 |
+
chunks = split_audio(wav_path)
|
73 |
+
total_chunks = len(chunks)
|
74 |
+
transcript = []
|
75 |
+
timestamped_transcript = []
|
76 |
+
|
77 |
+
for i, chunk in enumerate(chunks):
|
78 |
+
try:
|
79 |
+
with NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
80 |
+
chunk.export(temp_file.name, format="wav")
|
81 |
+
result = PIPELINE(temp_file.name,
|
82 |
+
generate_kwargs={"task": "transcribe", "language": "sv"})
|
83 |
+
text = result["text"].strip()
|
84 |
+
transcript.append(text)
|
85 |
+
if include_timestamps:
|
86 |
+
timestamp = get_chunk_time(i, CHUNK_DURATION_MS)
|
87 |
+
timestamped_transcript.append(f"[{timestamp}] {text}")
|
88 |
+
finally:
|
89 |
+
if os.path.exists(temp_file.name):
|
90 |
+
os.remove(temp_file.name)
|
91 |
+
|
92 |
+
progress((i + 1) / total_chunks)
|
93 |
+
yield " ".join(transcript), None
|
94 |
+
|
95 |
+
# Clean up converted file if created
|
96 |
+
if wav_path != audio_path and os.path.exists(wav_path):
|
97 |
+
os.remove(wav_path)
|
98 |
+
|
99 |
+
# Prepare final transcript and downloadable file
|
100 |
+
final_transcript = " ".join(transcript)
|
101 |
+
download_content = "\n".join(timestamped_transcript) if include_timestamps else final_transcript
|
102 |
+
with NamedTemporaryFile(suffix=".txt", delete=False, mode='w', encoding='utf-8') as temp_file:
|
103 |
+
temp_file.write(download_content)
|
104 |
+
download_path = temp_file.name
|
105 |
+
|
106 |
+
return final_transcript, download_path
|
107 |
+
|
108 |
+
except Exception as e:
|
109 |
+
return f"Error during transcription: {str(e)}", None
|
110 |
|
111 |
+
# Initialize pipeline globally
|
112 |
+
PIPELINE = initialize_pipeline()
|
|
|
113 |
|
114 |
+
# Gradio Interface with Blocks
|
115 |
+
def create_interface():
|
116 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
117 |
+
gr.Markdown("# Swedish Whisper Transcriber")
|
118 |
+
gr.Markdown("Upload audio (.wav, .mp3, .m4a) for real-time Swedish speech transcription.")
|
119 |
+
|
120 |
+
with gr.Row():
|
121 |
+
with gr.Column():
|
122 |
+
audio_input = gr.Audio(type="filepath", label="Upload Audio")
|
123 |
+
timestamp_toggle = gr.Checkbox(label="Include Timestamps in Download", value=False)
|
124 |
+
transcribe_btn = gr.Button("Transcribe")
|
125 |
+
|
126 |
+
with gr.Column():
|
127 |
+
transcript_output = gr.Textbox(label="Live Transcription", lines=10)
|
128 |
+
download_output = gr.File(label="Download Transcript")
|
129 |
+
|
130 |
+
transcribe_btn.click(
|
131 |
+
fn=transcribe,
|
132 |
+
inputs=[audio_input, timestamp_toggle],
|
133 |
+
outputs=[transcript_output, download_output]
|
134 |
+
)
|
135 |
+
|
136 |
+
return demo
|
137 |
|
138 |
+
if __name__ == "__main__":
|
139 |
+
create_interface().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|