|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
import spaces |
|
|
|
import argparse |
|
from model import SALMONN |
|
|
|
class ff: |
|
def generate(self, wav_path, prompt, prompt_pattern, num_beams, temperature, top_p): |
|
print(f'wav_path: {wav_path}, prompt: {prompt}, temperature: {temperature}, num_beams: {num_beams}, top_p: {top_p}') |
|
return "I'm sorry, but I cannot answer that question as it is not clear what you are asking. Can you please provide more context or clarify your question?" |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--device", type=str, default="cuda:0") |
|
parser.add_argument("--ckpt_path", type=str, default="./salmonn_7b_v0.pth") |
|
parser.add_argument("--whisper_path", type=str, default="./whisper_large_v2") |
|
parser.add_argument("--beats_path", type=str, default="./beats/BEATs_iter3_plus_AS2M_finetuned_on_AS2M_cpt2.pt") |
|
parser.add_argument("--vicuna_path", type=str, default="./vicuna-7b-v1.5") |
|
parser.add_argument("--low_resource", action='store_true', default=False) |
|
parser.add_argument("--port", default=9527) |
|
|
|
args = parser.parse_args() |
|
args.low_resource = True |
|
|
|
model = SALMONN( |
|
ckpt=args.ckpt_path, |
|
whisper_path=args.whisper_path, |
|
beats_path=args.beats_path, |
|
vicuna_path=args.vicuna_path, |
|
low_resource=args.low_resource, |
|
lora_alpha=28, |
|
device='cpu', |
|
) |
|
model.to(args.device) |
|
model.eval() |
|
|
|
@spaces.GPU(enable_queue=True) |
|
def gradio_answer(speech, text_input, num_beams, temperature, top_p): |
|
|
|
llm_message = model.generate( |
|
wav_path=speech, |
|
prompt=text_input, |
|
num_beams=num_beams, |
|
temperature=temperature, |
|
top_p=top_p, |
|
) |
|
|
|
return llm_message[0] |
|
|
|
title = """<h1 align="center">SALMONN: Speech Audio Language Music Open Neural Network</h1>""" |
|
image_src = """<h1 align="center"><a href="https://github.com/bytedance/SALMONN"><img src="https://raw.githubusercontent.com/bytedance/SALMONN/main/resource/salmon.png", alt="SALMONN" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>""" |
|
description = """<h3>This is the demo of SALMONN-7B. To experience SALMONN-13B, you can go to <a href="https://bytedance.github.io/SALMONN">https://bytedance.github.io/SALMONN</a>.\n Upload your audio and start chatting!</h3>""" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(title) |
|
gr.Markdown(image_src) |
|
gr.Markdown(description) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
speech = gr.Audio(label="Audio", type='filepath') |
|
|
|
num_beams = gr.Slider( |
|
minimum=1, |
|
maximum=10, |
|
value=4, |
|
step=1, |
|
interactive=True, |
|
label="beam search numbers", |
|
) |
|
|
|
top_p = gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.9, |
|
step=0.1, |
|
interactive=True, |
|
label="top p", |
|
) |
|
|
|
temperature = gr.Slider( |
|
minimum=0.8, |
|
maximum=2.0, |
|
value=1.0, |
|
step=0.1, |
|
interactive=False, |
|
label="temperature", |
|
) |
|
|
|
with gr.Column(): |
|
|
|
text_input = gr.Textbox(label='User', placeholder='Please upload your audio first', interactive=True) |
|
answer = gr.Textbox(label="Salmonn answer") |
|
|
|
with gr.Row(): |
|
examples = gr.Examples( |
|
examples = [ |
|
["resource/audio_demo/gunshots.wav", "Recognize the speech and give me the transcription."], |
|
["resource/audio_demo/gunshots.wav", "Listen to the speech and translate it into German."], |
|
["resource/audio_demo/gunshots.wav", "Provide the phonetic transcription for the speech."], |
|
["resource/audio_demo/gunshots.wav", "Please describe the audio."], |
|
["resource/audio_demo/gunshots.wav", "Recognize what the speaker says and describe the background audio at the same time."], |
|
["resource/audio_demo/gunshots.wav", "Use your strong reasoning skills to answer the speaker's question in detail based on the background sound."], |
|
["resource/audio_demo/duck.wav", "Please list each event in the audio in order."], |
|
["resource/audio_demo/duck.wav", "Based on the audio, write a story in detail. Your story should be highly related to the audio."], |
|
["resource/audio_demo/duck.wav", "How many speakers did you hear in this audio? Who are they?"], |
|
["resource/audio_demo/excitement.wav", "Describe the emotion of the speaker."], |
|
["resource/audio_demo/mountain.wav", "Please answer the question in detail."], |
|
["resource/audio_demo/jobs.wav", "Give me only three keywords of the text. Explain your reason."], |
|
["resource/audio_demo/2_30.wav", "What is the time mentioned in the speech?"], |
|
["resource/audio_demo/music.wav", "Please describe the music in detail."], |
|
["resource/audio_demo/music.wav", "What is the emotion of the music? Explain the reason in detail."], |
|
["resource/audio_demo/music.wav", "Can you write some lyrics of the song?"], |
|
["resource/audio_demo/music.wav", "Give me a title of the music based on its rhythm and emotion."] |
|
], |
|
inputs=[speech, text_input] |
|
) |
|
|
|
|
|
text_input.submit( |
|
gradio_answer, [speech, text_input, num_beams, temperature, top_p], [answer] |
|
) |
|
|
|
|
|
|
|
demo.queue(max_size=20).launch(share=False) |