File size: 3,544 Bytes
4c10907
 
 
 
 
 
 
 
 
1f30f06
4c10907
 
 
 
 
 
 
297dc79
4c10907
 
 
5185e9f
4c10907
 
 
 
9d9c8ab
4c10907
df6c3f5
 
 
0a63f68
d6ae001
df6c3f5
 
 
 
 
 
 
 
 
4c10907
d6ae001
 
 
4c10907
df6c3f5
4c10907
 
 
 
 
 
 
 
 
297dc79
4c10907
d6ae001
9477214
4c10907
 
9477214
4c10907
9477214
 
 
 
 
 
 
 
 
4c10907
9477214
 
 
 
1f30f06
9477214
1f30f06
9477214
 
4c10907
9477214
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import spaces
import gradio as gr
import json
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from pydub import AudioSegment
from gradio import Markdown
import uuid
import torch
from diffusers import DiffusionPipeline,AudioPipelineOutput
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
from typing import Union
from diffusers.utils.torch_utils import randn_tensor
from tqdm import tqdm
from TangoFlux import TangoFluxInference
import torchaudio



tangoflux = TangoFluxInference(name="declare-lab/TangoFlux")



@spaces.GPU(duration=15)
def gradio_generate(prompt, steps, guidance,duration=10):

    output = tangoflux.generate(prompt,steps=steps,guidance_scale=guidance,duration=duration)
    output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
    
    
    #wavio.write(output_filename, output_wave, rate=44100, sampwidth=2)
    unique_filename = f"output_{uuid.uuid4().hex}.wav"
    print(f"Saving audio to file: {unique_filename}")

    # Save to file
    torchaudio.save(unique_filename, output, sample_rate)
    print(f"Audio saved: {unique_filename}")

    # Return the path to the generated audio file
    return unique_filename

    #if (output_format == "mp3"):
     #   AudioSegment.from_wav("temp.wav").export("temp.mp3", format = "mp3")
      #  output_filename = "temp.mp3"

    #return output_filename

description_text = """
<p><a href="https://huggingface.co/spaces/declare-lab/tango2/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
<br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
<p/>
"""
# Gradio input and output components
input_text = gr.Textbox(lines=2, label="Prompt")
#output_format = gr.Radio(label = "Output format", info = "The file you can dowload", choices =  "wav"], value = "wav")
output_audio = gr.Audio(label="Generated Audio", type="filepath")
denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True)
guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True)
duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True)

interface = gr.Interface(
    fn=gradio_generate,
    inputs=[
        gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"),
        gr.Slider(0, 30, value=10, label="Duration in Seconds"),
        gr.Slider(10, 150, value=50, step=5, label="Number of Diffusion Steps"),
        gr.Slider(1, 10, value=4.5, step=0.5, label="CFG Scale")
    ],
    outputs=gr.Audio(type="filepath", label="Generated Audio"),
    title="TangoFlux Generator",
    description="Generate variable-length stereo audio at 44.1kHz from text prompts using TangoFlux.",
    examples=[
    [
        "Create a serene soundscape of a quiet beach at sunset.",  # Text prompt
 
        15,  # Duration in Seconds
        50,  # Number of Diffusion Steps
        4.5,  # CFG Scale
    ]
    ]
])

interface.launch()