Devakumar868 commited on
Commit
ee439d6
·
verified ·
1 Parent(s): 036f56f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -37
app.py CHANGED
@@ -3,21 +3,23 @@ import gradio as gr
3
  import torch
4
  import numpy as np
5
  from transformers import pipeline
 
6
  from pyannote.audio import Pipeline as PyannotePipeline
7
  from dia.model import Dia
8
  from dac.utils import load_model as load_dac_model
9
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
10
 
11
- # Environment token
12
  HF_TOKEN = os.environ["HF_TOKEN"]
13
 
14
- # Shard large models across 4× L4 GPUs
15
  device_map = "auto"
16
 
17
- # 1. RVQ codec (Descript Audio Codec)
18
  rvq = load_dac_model(tag="latest", model_type="44khz")
19
  rvq.eval()
20
- if torch.cuda.is_available(): rvq = rvq.to("cuda")
 
21
 
22
  # 2. Voice Activity Detection via Pyannote
23
  vad_pipe = PyannotePipeline.from_pretrained(
@@ -25,7 +27,7 @@ vad_pipe = PyannotePipeline.from_pretrained(
25
  use_auth_token=HF_TOKEN
26
  )
27
 
28
- # 3. Ultravox pipeline (speech → text + LLM)
29
  ultravox_pipe = pipeline(
30
  model="fixie-ai/ultravox-v0_4",
31
  trust_remote_code=True,
@@ -33,16 +35,13 @@ ultravox_pipe = pipeline(
33
  torch_dtype=torch.float16
34
  )
35
 
36
- # 4. Diffusion-based prosody model
37
- diff_pipe = pipeline(
38
- "audio-to-audio",
39
- model="teticio/audio-diffusion-instrumental-hiphop-256",
40
- trust_remote_code=True,
41
- device_map=device_map,
42
  torch_dtype=torch.float16
43
- )
44
 
45
- # 5. Dia TTS loaded with multi-GPU dispatch
46
  with init_empty_weights():
47
  dia = Dia.from_pretrained(
48
  "nari-labs/Dia-1.6B",
@@ -56,42 +55,40 @@ dia = load_checkpoint_and_dispatch(
56
  dtype=torch.float16
57
  )
58
 
59
- # Inference function
60
  def process_audio(audio):
61
  sr, array = audio
62
- # Ensure numpy
63
- if torch.is_tensor(array): array = array.numpy()
64
 
65
- # VAD: extract speech regions
66
- chunks = vad_pipe(array, sampling_rate=sr)
67
 
68
  # RVQ encode/decode
69
- x = torch.tensor(array).unsqueeze(0).to("cuda")
70
- codes = rvq.encode(x)
71
  decoded = rvq.decode(codes).squeeze().cpu().numpy()
72
 
73
- # Ultravox ASR + LLM
74
- out = ultravox_pipe({"array": decoded, "sampling_rate": sr})
75
- text = out.get("text", "")
76
 
77
- # Diffusion prosody enhancement
78
- pros_audio = diff_pipe({"array": decoded, "sampling_rate": sr})["array"][0]
79
 
80
- # Dia TTS synthesis
81
- tts = dia.generate(f"[emotion:neutral] {text}")
82
- tts_np = tts.squeeze().cpu().numpy()
83
- tts_np = tts_np / np.max(np.abs(tts_np)) * 0.95
84
 
85
- return (sr, tts_np), text
86
 
87
- # Gradio UI
88
- with gr.Blocks(title="Maya AI 📈", theme=None) as demo:
89
  gr.Markdown("## Maya-AI: Supernatural Conversational Agent")
90
- audio_in = gr.Audio(source="microphone", type="numpy", label="Your Voice")
91
- send_btn = gr.Button("Send")
92
- audio_out = gr.Audio(label="AI’s Response")
93
- text_out = gr.Textbox(label="Generated Text")
94
- send_btn.click(process_audio, inputs=audio_in, outputs=[audio_out, text_out])
95
 
96
  if __name__ == "__main__":
97
  demo.launch()
 
3
  import torch
4
  import numpy as np
5
  from transformers import pipeline
6
+ from diffusers import DiffusionPipeline
7
  from pyannote.audio import Pipeline as PyannotePipeline
8
  from dia.model import Dia
9
  from dac.utils import load_model as load_dac_model
10
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
11
 
12
+ # Retrieve HF token from Secrets
13
  HF_TOKEN = os.environ["HF_TOKEN"]
14
 
15
+ # Automatic multi-GPU sharding across 4× L4 GPUs
16
  device_map = "auto"
17
 
18
+ # 1. Descript Audio Codec (RVQ)
19
  rvq = load_dac_model(tag="latest", model_type="44khz")
20
  rvq.eval()
21
+ if torch.cuda.is_available():
22
+ rvq = rvq.to("cuda")
23
 
24
  # 2. Voice Activity Detection via Pyannote
25
  vad_pipe = PyannotePipeline.from_pretrained(
 
27
  use_auth_token=HF_TOKEN
28
  )
29
 
30
+ # 3. Ultravox ASR+LLM
31
  ultravox_pipe = pipeline(
32
  model="fixie-ai/ultravox-v0_4",
33
  trust_remote_code=True,
 
35
  torch_dtype=torch.float16
36
  )
37
 
38
+ # 4. Audio Diffusion (direct load via Diffusers)
39
+ diff_pipe = DiffusionPipeline.from_pretrained(
40
+ "teticio/audio-diffusion-instrumental-hiphop-256",
 
 
 
41
  torch_dtype=torch.float16
42
+ ).to("cuda")
43
 
44
+ # 5. Dia TTS (multi-GPU dispatch)
45
  with init_empty_weights():
46
  dia = Dia.from_pretrained(
47
  "nari-labs/Dia-1.6B",
 
55
  dtype=torch.float16
56
  )
57
 
58
+ # 6. Inference Function
59
  def process_audio(audio):
60
  sr, array = audio
61
+ array = array.numpy() if torch.is_tensor(array) else array
 
62
 
63
+ # VAD
64
+ _ = vad_pipe(array, sampling_rate=sr)
65
 
66
  # RVQ encode/decode
67
+ tensor = torch.tensor(array).unsqueeze(0).to("cuda")
68
+ codes = rvq.encode(tensor)
69
  decoded = rvq.decode(codes).squeeze().cpu().numpy()
70
 
71
+ # Ultravox inference
72
+ ultra_out = ultravox_pipe({"array": decoded, "sampling_rate": sr})
73
+ text = ultra_out.get("text", "")
74
 
75
+ # Diffusion enhancement
76
+ pros = diff_pipe(raw_audio=decoded)["audios"][0]
77
 
78
+ # Dia TTS
79
+ tts = dia.generate(f"[emotion:neutral] {text}").squeeze().cpu().numpy()
80
+ tts = tts / np.max(np.abs(tts)) * 0.95
 
81
 
82
+ return (sr, tts), text
83
 
84
+ # 7. Gradio UI
85
+ with gr.Blocks(title="Maya AI 📈") as demo:
86
  gr.Markdown("## Maya-AI: Supernatural Conversational Agent")
87
+ audio_input = gr.Audio(source="microphone", type="numpy", label="Your Voice")
88
+ send_button = gr.Button("Send")
89
+ audio_output = gr.Audio(label="AI’s Response")
90
+ text_output = gr.Textbox(label="Generated Text")
91
+ send_button.click(process_audio, inputs=audio_input, outputs=[audio_output, text_output])
92
 
93
  if __name__ == "__main__":
94
  demo.launch()