Devakumar868 commited on
Commit
c0e0942
·
verified ·
1 Parent(s): ee439d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -31
app.py CHANGED
@@ -9,25 +9,21 @@ from dia.model import Dia
9
  from dac.utils import load_model as load_dac_model
10
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
11
 
12
- # Retrieve HF token from Secrets
13
  HF_TOKEN = os.environ["HF_TOKEN"]
14
-
15
- # Automatic multi-GPU sharding across 4× L4 GPUs
16
  device_map = "auto"
17
 
18
- # 1. Descript Audio Codec (RVQ)
19
  rvq = load_dac_model(tag="latest", model_type="44khz")
20
  rvq.eval()
21
- if torch.cuda.is_available():
22
- rvq = rvq.to("cuda")
23
 
24
- # 2. Voice Activity Detection via Pyannote
25
  vad_pipe = PyannotePipeline.from_pretrained(
26
  "pyannote/voice-activity-detection",
27
  use_auth_token=HF_TOKEN
28
  )
29
 
30
- # 3. Ultravox ASR+LLM
31
  ultravox_pipe = pipeline(
32
  model="fixie-ai/ultravox-v0_4",
33
  trust_remote_code=True,
@@ -35,17 +31,15 @@ ultravox_pipe = pipeline(
35
  torch_dtype=torch.float16
36
  )
37
 
38
- # 4. Audio Diffusion (direct load via Diffusers)
39
  diff_pipe = DiffusionPipeline.from_pretrained(
40
- "teticio/audio-diffusion-instrumental-hiphop-256",
41
- torch_dtype=torch.float16
42
  ).to("cuda")
43
 
44
- # 5. Dia TTS (multi-GPU dispatch)
45
  with init_empty_weights():
46
  dia = Dia.from_pretrained(
47
  "nari-labs/Dia-1.6B",
48
- torch_dtype=torch.float16,
49
  trust_remote_code=True
50
  )
51
  dia = load_checkpoint_and_dispatch(
@@ -55,40 +49,35 @@ dia = load_checkpoint_and_dispatch(
55
  dtype=torch.float16
56
  )
57
 
58
- # 6. Inference Function
59
  def process_audio(audio):
60
  sr, array = audio
61
  array = array.numpy() if torch.is_tensor(array) else array
62
 
63
- # VAD
64
  _ = vad_pipe(array, sampling_rate=sr)
65
-
66
- # RVQ encode/decode
67
- tensor = torch.tensor(array).unsqueeze(0).to("cuda")
68
- codes = rvq.encode(tensor)
69
  decoded = rvq.decode(codes).squeeze().cpu().numpy()
70
 
71
- # Ultravox inference
72
  ultra_out = ultravox_pipe({"array": decoded, "sampling_rate": sr})
73
  text = ultra_out.get("text", "")
74
 
75
- # Diffusion enhancement
76
  pros = diff_pipe(raw_audio=decoded)["audios"][0]
77
 
78
- # Dia TTS
79
- tts = dia.generate(f"[emotion:neutral] {text}").squeeze().cpu().numpy()
80
- tts = tts / np.max(np.abs(tts)) * 0.95
81
 
82
- return (sr, tts), text
83
 
84
- # 7. Gradio UI
85
  with gr.Blocks(title="Maya AI 📈") as demo:
86
  gr.Markdown("## Maya-AI: Supernatural Conversational Agent")
87
- audio_input = gr.Audio(source="microphone", type="numpy", label="Your Voice")
88
- send_button = gr.Button("Send")
89
- audio_output = gr.Audio(label="AI’s Response")
90
- text_output = gr.Textbox(label="Generated Text")
91
- send_button.click(process_audio, inputs=audio_input, outputs=[audio_output, text_output])
92
 
93
  if __name__ == "__main__":
94
  demo.launch()
 
9
  from dac.utils import load_model as load_dac_model
10
  from accelerate import init_empty_weights, load_checkpoint_and_dispatch
11
 
 
12
  HF_TOKEN = os.environ["HF_TOKEN"]
 
 
13
  device_map = "auto"
14
 
15
+ # 1. RVQ Codec
16
  rvq = load_dac_model(tag="latest", model_type="44khz")
17
  rvq.eval()
18
+ if torch.cuda.is_available(): rvq = rvq.to("cuda")
 
19
 
20
+ # 2. VAD
21
  vad_pipe = PyannotePipeline.from_pretrained(
22
  "pyannote/voice-activity-detection",
23
  use_auth_token=HF_TOKEN
24
  )
25
 
26
+ # 3. Ultravox
27
  ultravox_pipe = pipeline(
28
  model="fixie-ai/ultravox-v0_4",
29
  trust_remote_code=True,
 
31
  torch_dtype=torch.float16
32
  )
33
 
34
+ # 4. Audio Diffusion
35
  diff_pipe = DiffusionPipeline.from_pretrained(
36
+ "teticio/audio-diffusion-instrumental-hiphop-256"
 
37
  ).to("cuda")
38
 
39
+ # 5. Dia TTS
40
  with init_empty_weights():
41
  dia = Dia.from_pretrained(
42
  "nari-labs/Dia-1.6B",
 
43
  trust_remote_code=True
44
  )
45
  dia = load_checkpoint_and_dispatch(
 
49
  dtype=torch.float16
50
  )
51
 
52
+ # Inference
53
  def process_audio(audio):
54
  sr, array = audio
55
  array = array.numpy() if torch.is_tensor(array) else array
56
 
 
57
  _ = vad_pipe(array, sampling_rate=sr)
58
+ x = torch.tensor(array).unsqueeze(0).to("cuda")
59
+ codes = rvq.encode(x)
 
 
60
  decoded = rvq.decode(codes).squeeze().cpu().numpy()
61
 
 
62
  ultra_out = ultravox_pipe({"array": decoded, "sampling_rate": sr})
63
  text = ultra_out.get("text", "")
64
 
 
65
  pros = diff_pipe(raw_audio=decoded)["audios"][0]
66
 
67
+ tts = dia.generate(f"[emotion:neutral] {text}")
68
+ tts_np = tts.squeeze().cpu().numpy()
69
+ tts_np = tts_np / np.max(np.abs(tts_np)) * 0.95
70
 
71
+ return (sr, tts_np), text
72
 
73
+ # UI
74
  with gr.Blocks(title="Maya AI 📈") as demo:
75
  gr.Markdown("## Maya-AI: Supernatural Conversational Agent")
76
+ audio_in = gr.Audio(source="microphone", type="numpy")
77
+ send_btn = gr.Button("Send")
78
+ audio_out = gr.Audio()
79
+ text_out = gr.Textbox()
80
+ send_btn.click(process_audio, inputs=audio_in, outputs=[audio_out, text_out])
81
 
82
  if __name__ == "__main__":
83
  demo.launch()