Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ from models.custom_interface import CustomEncoderWav2vec2Classifier
|
|
19 |
|
20 |
|
21 |
|
22 |
-
st.title("
|
23 |
|
24 |
# Initialize session state
|
25 |
initialize_session_state()
|
@@ -36,7 +36,7 @@ if 'whisper' not in st.session_state:
|
|
36 |
display_memory_once()
|
37 |
|
38 |
# Reset state for a new analysis
|
39 |
-
if st.button("
|
40 |
reset_session_state_except_model()
|
41 |
st.rerun()
|
42 |
|
@@ -54,7 +54,7 @@ if option == "Upload video file":
|
|
54 |
with open(temp_video_path.name, "wb") as f:
|
55 |
f.write(uploaded_video.read())
|
56 |
audio_path = trim_video(temp_video_path.name)
|
57 |
-
st.success("
|
58 |
st.session_state.audio_path = audio_path
|
59 |
|
60 |
|
@@ -65,18 +65,18 @@ elif option == "Enter Video Url":
|
|
65 |
audio_path = download_audio_as_wav(yt_url)
|
66 |
audio_path = trim_audio(audio_path)
|
67 |
if audio_path:
|
68 |
-
st.success("
|
69 |
st.session_state.audio_path = audio_path
|
70 |
|
71 |
|
72 |
# Transcription and Accent Analysis
|
73 |
if st.session_state.audio_path and not st.session_state.transcription:
|
74 |
-
if st.button("
|
75 |
st.session_state.audio_ready = True
|
76 |
st.audio(st.session_state.audio_path, format='audio/wav')
|
77 |
|
78 |
mem = psutil.virtual_memory()
|
79 |
-
st.write(f"
|
80 |
#Detect Language AND FILTER OUT NON-ENGLISH AUDIOS FOR ANALYSIS
|
81 |
segments, info = st.session_state.whisper.transcribe(st.session_state.audio_path, beam_size=1)
|
82 |
|
@@ -85,34 +85,34 @@ if st.session_state.audio_path and not st.session_state.transcription:
|
|
85 |
|
86 |
if info.language != "en":
|
87 |
|
88 |
-
st.error("
|
89 |
else:
|
90 |
# Show transcription for audio
|
91 |
with st.spinner("Transcribing audio..."):
|
92 |
st.markdown(" Transcript Preview")
|
93 |
st.markdown(st.session_state.transcription)
|
94 |
-
st.success("
|
95 |
mem = psutil.virtual_memory()
|
96 |
-
st.write(f"
|
97 |
|
98 |
|
99 |
|
100 |
if st.session_state.transcription:
|
101 |
if st.button("π£οΈ Analyze Accent"):
|
102 |
-
with st.spinner("
|
103 |
try:
|
104 |
mem = psutil.virtual_memory()
|
105 |
-
st.write(f"
|
106 |
waveform, sample_rate = torchaudio.load(st.session_state.audio_path)
|
107 |
readable_accent, confidence = analyze_accent(waveform, sample_rate, st.session_state.classifier)
|
108 |
|
109 |
if readable_accent:
|
110 |
-
st.success(f"
|
111 |
-
st.info(f"
|
112 |
|
113 |
else:
|
114 |
st.warning("Could not determine accent.")
|
115 |
|
116 |
except Exception as e:
|
117 |
-
st.error("
|
118 |
st.code(str(e))
|
|
|
19 |
|
20 |
|
21 |
|
22 |
+
st.title("English Accent Audio Detector")
|
23 |
|
24 |
# Initialize session state
|
25 |
initialize_session_state()
|
|
|
36 |
display_memory_once()
|
37 |
|
38 |
# Reset state for a new analysis
|
39 |
+
if st.button("Analyze new video"):
|
40 |
reset_session_state_except_model()
|
41 |
st.rerun()
|
42 |
|
|
|
54 |
with open(temp_video_path.name, "wb") as f:
|
55 |
f.write(uploaded_video.read())
|
56 |
audio_path = trim_video(temp_video_path.name)
|
57 |
+
st.success("Video uploaded successfully.")
|
58 |
st.session_state.audio_path = audio_path
|
59 |
|
60 |
|
|
|
65 |
audio_path = download_audio_as_wav(yt_url)
|
66 |
audio_path = trim_audio(audio_path)
|
67 |
if audio_path:
|
68 |
+
st.success("Video downloaded successfully.")
|
69 |
st.session_state.audio_path = audio_path
|
70 |
|
71 |
|
72 |
# Transcription and Accent Analysis
|
73 |
if st.session_state.audio_path and not st.session_state.transcription:
|
74 |
+
if st.button("Extract Audio"):
|
75 |
st.session_state.audio_ready = True
|
76 |
st.audio(st.session_state.audio_path, format='audio/wav')
|
77 |
|
78 |
mem = psutil.virtual_memory()
|
79 |
+
st.write(f"Memory used: {mem.percent}%")
|
80 |
#Detect Language AND FILTER OUT NON-ENGLISH AUDIOS FOR ANALYSIS
|
81 |
segments, info = st.session_state.whisper.transcribe(st.session_state.audio_path, beam_size=1)
|
82 |
|
|
|
85 |
|
86 |
if info.language != "en":
|
87 |
|
88 |
+
st.error("This video does not appear to be in English. Please provide a clear English video.")
|
89 |
else:
|
90 |
# Show transcription for audio
|
91 |
with st.spinner("Transcribing audio..."):
|
92 |
st.markdown(" Transcript Preview")
|
93 |
st.markdown(st.session_state.transcription)
|
94 |
+
st.success("Audio extracted and ready for analysis!")
|
95 |
mem = psutil.virtual_memory()
|
96 |
+
st.write(f"Memory used: {mem.percent}%")
|
97 |
|
98 |
|
99 |
|
100 |
if st.session_state.transcription:
|
101 |
if st.button("π£οΈ Analyze Accent"):
|
102 |
+
with st.spinner("Analyzing accent..."):
|
103 |
try:
|
104 |
mem = psutil.virtual_memory()
|
105 |
+
st.write(f"Memory used: {mem.percent}%")
|
106 |
waveform, sample_rate = torchaudio.load(st.session_state.audio_path)
|
107 |
readable_accent, confidence = analyze_accent(waveform, sample_rate, st.session_state.classifier)
|
108 |
|
109 |
if readable_accent:
|
110 |
+
st.success(f"Accent Detected: **{readable_accent}**")
|
111 |
+
st.info(f"Confidence: {confidence}%")
|
112 |
|
113 |
else:
|
114 |
st.warning("Could not determine accent.")
|
115 |
|
116 |
except Exception as e:
|
117 |
+
st.error("Failed to analyze accent.")
|
118 |
st.code(str(e))
|