Issamohammed commited on
Commit
be25d7c
·
verified ·
1 Parent(s): fca61a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -53
app.py CHANGED
@@ -2,65 +2,138 @@ import os
2
  import torch
3
  import gradio as gr
4
  from pydub import AudioSegment
5
- from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
6
- import tempfile
7
- import math
 
 
8
 
9
- from datasets import load_dataset, Audio
10
- import numpy as np
11
- import torchaudio
 
 
12
 
13
- # Set up model
14
- device = "cpu"
15
- torch_dtype = torch.float32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- model_id = "KBLab/kb-whisper-large"
18
-
19
- processor = AutoProcessor.from_pretrained(model_id)
20
- model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
 
 
 
 
 
21
 
22
- # Helper: Split audio into chunks
23
- def split_audio(audio_path, chunk_duration_ms=10000):
24
- audio = AudioSegment.from_file(audio_path)
25
- chunks = [audio[i:i + chunk_duration_ms] for i in range(0, len(audio), chunk_duration_ms)]
26
- return chunks
 
 
 
 
27
 
28
- # Helper: Transcribe a single chunk
29
- def transcribe_chunk(chunk):
30
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
31
- chunk.export(tmpfile.name, format="wav")
32
- input_audio, _ = torchaudio.load(tmpfile.name)
33
- input_features = processor(input_audio.squeeze(), sampling_rate=16000, return_tensors="pt").input_features
34
- input_features = input_features.to(device)
35
- predicted_ids = model.generate(input_features, task="transcribe", language="sv")
36
- transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
37
- os.remove(tmpfile.name)
38
- return transcription
39
 
40
- # Full transcription function with progress
41
- def transcribe_with_progress(audio_path, progress=gr.Progress()):
42
- ext = os.path.splitext(audio_path)[1].lower()
43
- if ext != ".wav":
44
- sound = AudioSegment.from_file(audio_path)
45
- audio_path = audio_path.replace(ext, ".converted.wav")
46
- sound.export(audio_path, format="wav")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- chunks = split_audio(audio_path, chunk_duration_ms=8000)
49
- full_transcript = ""
50
- total_chunks = len(chunks)
51
 
52
- for i, chunk in enumerate(chunks):
53
- partial_text = transcribe_chunk(chunk)
54
- full_transcript += partial_text + " "
55
- progress(i + 1, total_chunks) # Update progress bar
56
- yield full_transcript.strip() # Stream updated text to UI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # UI
59
- gr.Interface(
60
- fn=transcribe_with_progress,
61
- inputs=gr.Audio(type="filepath", label="Upload Swedish Audio"),
62
- outputs=gr.Textbox(label="Live Transcript (Swedish)"),
63
- title="Live Swedish Transcriber (KB-Whisper)",
64
- description="Streams transcription word-by-word with visual progress. Supports .m4a, .mp3, .wav. May be slow on CPU.",
65
- live=True
66
- ).launch()
 
2
  import torch
3
  import gradio as gr
4
  from pydub import AudioSegment
5
+ from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
6
+ from pathlib import Path
7
+ from tempfile import NamedTemporaryFile
8
+ from datetime import timedelta
9
+ import time
10
 
11
+ # Configuration
12
+ MODEL_ID = "KBLab/kb-whisper-large"
13
+ CHUNK_DURATION_MS = 10000
14
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
15
+ TORCH_DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
16
 
17
+ # Initialize model and pipeline
18
+ def initialize_pipeline():
19
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
20
+ MODEL_ID,
21
+ torch_dtype=TORCH_DTYPE,
22
+ low_cpu_mem_usage=True
23
+ ).to(DEVICE)
24
+
25
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
26
+
27
+ return pipeline(
28
+ "automatic-speech-recognition",
29
+ model=model,
30
+ tokenizer=processor.tokenizer,
31
+ feature_extractor=processor.feature_extractor,
32
+ device=DEVICE,
33
+ torch_dtype=TORCH_DTYPE,
34
+ model_kwargs={"use_flash_attention_2": torch.cuda.is_available()}
35
+ )
36
 
37
+ # Convert audio if needed
38
+ def convert_to_wav(audio_path: str) -> str:
39
+ ext = str(Path(audio_path).suffix).lower()
40
+ if ext != ".wav":
41
+ audio = AudioSegment.from_file(audio_path)
42
+ wav_path = str(Path(audio_path).with_suffix(".converted.wav"))
43
+ audio.export(wav_path, format="wav")
44
+ return wav_path
45
+ return audio_path
46
 
47
+ # Split audio into chunks
48
+ def split_audio(audio_path: str) -> list:
49
+ try:
50
+ audio = AudioSegment.from_file(audio_path)
51
+ if len(audio) == 0:
52
+ raise ValueError("Audio file is empty or invalid.")
53
+ return [audio[i:i + CHUNK_DURATION_MS] around(i, len(audio), CHUNK_DURATION_MS) for i in range(0, len(audio), CHUNK_DURATION_MS)]
54
+ except Exception as e:
55
+ raise ValueError(f"Failed to process audio: {str(e)}")
56
 
57
+ # Helper to compute chunk start time
58
+ def get_chunk_time(index: int, chunk_duration_ms: int) -> str:
59
+ start_ms = index * chunk_duration_ms
60
+ return str(timedelta(milliseconds=start_ms))
 
 
 
 
 
 
 
61
 
62
+ # Transcribe audio with progress and timestamps
63
+ def transcribe(audio_path: str, include_timestamps: bool = False, progress=gr.Progress()):
64
+ try:
65
+ if not audio_path:
66
+ return "No audio file provided.", None
67
+
68
+ # Convert to WAV if needed
69
+ wav_path = convert_to_wav(audio_path)
70
+
71
+ # Split and process
72
+ chunks = split_audio(wav_path)
73
+ total_chunks = len(chunks)
74
+ transcript = []
75
+ timestamped_transcript = []
76
+
77
+ for i, chunk in enumerate(chunks):
78
+ try:
79
+ with NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
80
+ chunk.export(temp_file.name, format="wav")
81
+ result = PIPELINE(temp_file.name,
82
+ generate_kwargs={"task": "transcribe", "language": "sv"})
83
+ text = result["text"].strip()
84
+ transcript.append(text)
85
+ if include_timestamps:
86
+ timestamp = get_chunk_time(i, CHUNK_DURATION_MS)
87
+ timestamped_transcript.append(f"[{timestamp}] {text}")
88
+ finally:
89
+ if os.path.exists(temp_file.name):
90
+ os.remove(temp_file.name)
91
+
92
+ progress((i + 1) / total_chunks)
93
+ yield " ".join(transcript), None
94
+
95
+ # Clean up converted file if created
96
+ if wav_path != audio_path and os.path.exists(wav_path):
97
+ os.remove(wav_path)
98
+
99
+ # Prepare final transcript and downloadable file
100
+ final_transcript = " ".join(transcript)
101
+ download_content = "\n".join(timestamped_transcript) if include_timestamps else final_transcript
102
+ with NamedTemporaryFile(suffix=".txt", delete=False, mode='w', encoding='utf-8') as temp_file:
103
+ temp_file.write(download_content)
104
+ download_path = temp_file.name
105
+
106
+ return final_transcript, download_path
107
+
108
+ except Exception as e:
109
+ return f"Error during transcription: {str(e)}", None
110
 
111
+ # Initialize pipeline globally
112
+ PIPELINE = initialize_pipeline()
 
113
 
114
+ # Gradio Interface with Blocks
115
+ def create_interface():
116
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
117
+ gr.Markdown("# Swedish Whisper Transcriber")
118
+ gr.Markdown("Upload audio (.wav, .mp3, .m4a) for real-time Swedish speech transcription.")
119
+
120
+ with gr.Row():
121
+ with gr.Column():
122
+ audio_input = gr.Audio(type="filepath", label="Upload Audio")
123
+ timestamp_toggle = gr.Checkbox(label="Include Timestamps in Download", value=False)
124
+ transcribe_btn = gr.Button("Transcribe")
125
+
126
+ with gr.Column():
127
+ transcript_output = gr.Textbox(label="Live Transcription", lines=10)
128
+ download_output = gr.File(label="Download Transcript")
129
+
130
+ transcribe_btn.click(
131
+ fn=transcribe,
132
+ inputs=[audio_input, timestamp_toggle],
133
+ outputs=[transcript_output, download_output]
134
+ )
135
+
136
+ return demo
137
 
138
+ if __name__ == "__main__":
139
+ create_interface().launch()