import torchaudio
import torch

from vui.inference import render
from vui.model import Vui

model = Vui.from_pretrained().cuda()
model.decoder = torch.compile(model.decoder, fullgraph=True, mode="max-autotune")
for i  in range(10):
    waveform = render(
        model,
        """Hey, here is some random stuff, usually something quite long as the shorter the text the less likely the model can cope!
So cool yeah makes sense, would you be able to help me with something?
Sure what is it?""",
    )
    print(waveform.shape)
torchaudio.save("out.opus", waveform[0].cpu(), 22050)