seanerons commited on
Commit
d5c2ba0
Β·
verified Β·
1 Parent(s): 3bfa32c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -0
app.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import streamlit as st
4
+ import soundfile as sf
5
+ import librosa
6
+
7
+ from yt_dlp import YoutubeDL
8
+ from moviepy.editor import VideoFileClip
9
+
10
+ import whisper
11
+ import whisper.tokenizer as tok
12
+ from speechbrain.pretrained import EncoderClassifier
13
+ import numpy as np
14
+ from audio_recorder_streamlit import audio_recorder
15
+
16
+ # ───────────────────────────────────────────────
17
+ # 1) Page config & Dark Theme Styling
18
+ # ───────────────────────────────────────────────
19
+ st.set_page_config(page_title="English & Accent Detector", page_icon="🎀", layout="wide")
20
+ st.markdown("""
21
+ <style>
22
+ body, .stApp { background-color: #121212; color: #e0e0e0; overflow-y: scroll; }
23
+ .stButton>button {
24
+ background-color: #1f77b4; color: #fff;
25
+ border-radius:8px; padding:0.6em 1.2em; font-size:1rem;
26
+ }
27
+ .stButton>button:hover { background-color: #105b88; }
28
+ .stVideo > video { max-width: 300px !important; border: 1px solid #333; }
29
+ </style>
30
+ """, unsafe_allow_html=True)
31
+
32
+ # ───────────────────────────────────────────────
33
+ # 2) Load models once
34
+ # ───────────────────────────────────────────────
35
+ wmodel = whisper.load_model("tiny")
36
+ classifier = EncoderClassifier.from_hparams(
37
+ source="Jzuluaga/accent-id-commonaccent_ecapa",
38
+ savedir="pretrained_models/accent-id-commonaccent_ecapa"
39
+ )
40
+
41
+ # ───────────────────────────────────────────────
42
+ # 3) Accent grouping map
43
+ # ───────────────────────────────────────────────
44
+ GROUP_MAP = {
45
+ "england": "British", "us": "American", "canada": "American",
46
+ "australia": "Australian", "newzealand": "Australian",
47
+ "indian": "Indian", "scotland": "Scottish", "ireland": "Irish",
48
+ "wales": "Welsh", "african": "African", "malaysia": "Malaysian",
49
+ "bermuda": "Bermudian", "philippines": "Philippine",
50
+ "hongkong": "Hong Kong", "singapore": "Singaporean",
51
+ "southatlandtic": "Other"
52
+ }
53
+ def group_accents(raw_list):
54
+ return [(GROUP_MAP.get(r, r.capitalize()), p) for r, p in raw_list]
55
+
56
+ # ───────────────────────────────────────────────
57
+ # 4) Helper functions
58
+ # ───────────────────────────────────────────────
59
+ def download_extract_audio(url, out_vid="clip.mp4", out_wav="clip.wav",
60
+ max_duration=60, sr=16000):
61
+ if os.path.exists(out_vid): os.remove(out_vid)
62
+ with YoutubeDL({"outtmpl": out_vid, "merge_output_format": "mp4"}) as ydl:
63
+ ydl.download([url])
64
+ clip = VideoFileClip(out_vid)
65
+ used = min(clip.duration, max_duration)
66
+ sub = clip.subclip(0, used)
67
+ sub.audio.write_audiofile(out_wav, fps=sr, codec="pcm_s16le")
68
+ clip.close(); sub.close()
69
+ wav, rate = librosa.load(out_wav, sr=sr, mono=True)
70
+ return wav, rate, out_wav, out_vid
71
+
72
+ def detect_language_whisper(wav_path):
73
+ audio = whisper.load_audio(wav_path, sr=16000)
74
+ audio = whisper.pad_or_trim(audio)
75
+ mel = whisper.log_mel_spectrogram(audio).to(wmodel.device)
76
+ _, probs = wmodel.detect_language(mel)
77
+ lang = max(probs, key=probs.get)
78
+ conf = probs.get("en", 0.0) * 100
79
+ return lang, conf
80
+
81
+ def classify_clip_topk(wav_path, k=3):
82
+ out_prob, _, _, _ = classifier.classify_file(wav_path)
83
+ probs = out_prob.squeeze().cpu().numpy()
84
+ idxs = probs.argsort()[-k:][::-1]
85
+ return [(classifier.hparams.label_encoder.ind2lab[i], float(probs[i]))
86
+ for i in idxs]
87
+
88
+ # ───────────────────────────────────────────────
89
+ # 5) Streamlit UI
90
+ # ───────────────────────────────────────────────
91
+ st.title("🎀 English & Accent Detector")
92
+ st.write("""
93
+ This tool helps you determine if a speaker is speaking English and identifies their accent.
94
+
95
+ 🧭 **How to use:**
96
+ - Use **URL** for public **YouTube**, **Loom**, or any **MP4-accessible video link**.
97
+ - Use **Upload** to submit local video files (MP4, MOV, WEBM, MKV).
98
+ - Use **Record** to record short audio snippets directly from your browser microphone.
99
+
100
+
101
+ """)
102
+
103
+ st.sidebar.header("πŸ“₯ Input")
104
+ method = st.sidebar.radio("Input method", ["URL", "Upload", "Record"])
105
+
106
+ url = None
107
+ uploaded = None
108
+ audio_bytes = None
109
+
110
+ if method == "URL":
111
+ url = st.sidebar.text_input("Video URL (e.g. YouTube, Loom, MP4 link)")
112
+ elif method == "Upload":
113
+ uploaded = st.sidebar.file_uploader("Upload a video file", type=["mp4", "mov", "webm", "mkv"])
114
+ elif method == "Record":
115
+ st.sidebar.write("πŸŽ™οΈ Click below to start recording (wait for microphone access prompt):")
116
+ audio_bytes = audio_recorder()
117
+ if not audio_bytes:
118
+ st.sidebar.info("Waiting for you to record your voice...")
119
+ else:
120
+ st.sidebar.success("Audio recorded successfully! You can now classify it.")
121
+
122
+ if st.sidebar.button("Classify Accent"):
123
+ with st.spinner("πŸ”Š Extracting audio..."):
124
+ if method == "URL" and url:
125
+ wav, sr, wav_path, vid_path = download_extract_audio(url)
126
+ elif method == "Upload" and uploaded:
127
+ vid_path = tempfile.NamedTemporaryFile(
128
+ suffix=os.path.splitext(uploaded.name)[1], delete=False
129
+ ).name
130
+ with open(vid_path, "wb") as f:
131
+ f.write(uploaded.read())
132
+ clip = VideoFileClip(vid_path)
133
+ wav_path = "clip.wav"
134
+ clip.audio.write_audiofile(wav_path, fps=16000, codec="pcm_s16le")
135
+ clip.close()
136
+ wav, sr = librosa.load(wav_path, sr=16000, mono=True)
137
+ elif method == "Record" and audio_bytes:
138
+ wav_path = "recorded.wav"
139
+ with open(wav_path, "wb") as f:
140
+ f.write(audio_bytes)
141
+ wav, sr = librosa.load(wav_path, sr=16000, mono=True)
142
+ vid_path = None
143
+ else:
144
+ st.error("Please supply a valid input.")
145
+ st.stop()
146
+
147
+ left, right = st.columns([1, 2])
148
+ with left:
149
+ st.subheader("πŸ“Ί Preview")
150
+ if method == "Record":
151
+ st.audio(audio_bytes, format="audio/wav")
152
+ elif vid_path:
153
+ with open(vid_path, "rb") as f:
154
+ st.video(f.read())
155
+
156
+ with right:
157
+ with st.spinner("πŸ”Ž Detecting English..."):
158
+ lang_code, eng_conf = detect_language_whisper(wav_path)
159
+
160
+ if eng_conf >= 4.0:
161
+ st.markdown(
162
+ "<div style='background-color:#1b5e20; color:#a5d6a7; padding:8px;"
163
+ " border-radius:5px;'>βœ… <strong>English detected – classifying accent...</strong></div>",
164
+ unsafe_allow_html=True
165
+ )
166
+ with st.spinner("🎯 Classifying accent..."):
167
+ raw3 = classify_clip_topk(wav_path, k=3)
168
+ grouped = group_accents(raw3)
169
+
170
+ st.subheader("πŸ—£οΈ Accent Classification")
171
+ cols = st.columns(len(grouped))
172
+ for c, (lbl, p) in zip(cols, grouped):
173
+ c.markdown(
174
+ f"""<div style=\"border:1px solid #444; border-radius:8px; padding:15px; text-align:center;\">
175
+ <div style=\"font-size:1.1em; font-weight:bold; color:#90caf9\">{lbl}</div>
176
+ <div style=\"font-size:1.8em; color:#29b6f6;\">{p*100:5.1f}%</div>
177
+ </div>""",
178
+ unsafe_allow_html=True
179
+ )
180
+ else:
181
+ st.markdown(
182
+ "<div style='background-color:#b71c1c; color:#ffcdd2; padding:8px;"
183
+ " border-radius:5px;'>❌ <strong>English not detected</strong></div>",
184
+ unsafe_allow_html=True
185
+ )
186
+ name = tok.LANGUAGES.get(lang_code, lang_code).capitalize()
187
+ st.write(f"**Top detected language:** {name} ({eng_conf:.1f}% English)")
188
+
189
+ for p in (wav_path, vid_path):
190
+ if p and os.path.exists(p):
191
+ try:
192
+ os.remove(p)
193
+ except:
194
+ pass
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+