import os
import gradio as gr
from transformers import pipeline

title = "Transcribe speech several languages"

pipelineGE = pipeline(task="automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-german")
pipelineEN = pipeline(task="automatic-speech-recognition", model="openai/whisper-large")

def transcribeFile(audio_path : str) -> str:
    transcription = pipelineGE(audio_path)
    return transcription["text"]

def transcribeFileMulti(inputlang, audio_path : str) -> str:
    if inputlang == "English":
        transcription = pipelineEN(audio_path)
    elif inputlang == "German":
        transcription = pipelineGE(audio_path)
    return transcription["text"]


    
app1 = gr.Interface(
    fn=transcribeFile,
    inputs=gr.inputs.Audio(label="Upload audio file", type="filepath"),
    outputs="text",
    title=title
)


app2 = gr.Interface(
    fn=transcribeFileMulti,
    inputs=[gr.Radio(["English", "German"], value="German", label="Source Language", info="Select the language of the speech you want to transcribe"),
                     gr.Audio(source="microphone", type="filepath")], 
    outputs="text",
    title=title
)


demo = gr.TabbedInterface([app1, app2], ["Audio File", "Microphone"])

if __name__ == "__main__":
    demo.launch()