nisten commited on
Commit
12d1f78
·
verified ·
1 Parent(s): 3bbd16b

Update src/worker.js

Browse files
Files changed (1) hide show
  1. src/worker.js +3 -3
src/worker.js CHANGED
@@ -29,7 +29,7 @@ import {
29
  const model_id = "onnx-community/Kokoro-82M-v1.0-ONNX";
30
  let voice;
31
  const tts = await KokoroTTS.from_pretrained(model_id, {
32
- dtype: "fp16",
33
  device: "webgpu",
34
  });
35
 
@@ -77,7 +77,7 @@ const transcriber = await pipeline(
77
 
78
  await transcriber(new Float32Array(INPUT_SAMPLE_RATE)); // Compile shaders
79
 
80
- const llm_model_id = "onnx-community/Qwen3-4B-ONNX";
81
  const tokenizer = await AutoTokenizer.from_pretrained(llm_model_id);
82
  const llm = await AutoModelForCausalLM.from_pretrained(llm_model_id, {
83
  dtype: "q4f16",
@@ -87,7 +87,7 @@ const llm = await AutoModelForCausalLM.from_pretrained(llm_model_id, {
87
  const SYSTEM_MESSAGE = {
88
  role: "system",
89
  content:
90
- "You're a helpful and conversational voice assistant for financial managers, you have a high EQ and are great at math and behavioral finance. Keep your responses short, clear, and casual. /no_think",
91
  };
92
  await llm.generate({ ...tokenizer("x"), max_new_tokens: 1 }); // Compile shaders
93
 
 
29
  const model_id = "onnx-community/Kokoro-82M-v1.0-ONNX";
30
  let voice;
31
  const tts = await KokoroTTS.from_pretrained(model_id, {
32
+ dtype: "fp32",
33
  device: "webgpu",
34
  });
35
 
 
77
 
78
  await transcriber(new Float32Array(INPUT_SAMPLE_RATE)); // Compile shaders
79
 
80
+ const llm_model_id = "onnx-community/Qwen3-1.7B-ONNX";
81
  const tokenizer = await AutoTokenizer.from_pretrained(llm_model_id);
82
  const llm = await AutoModelForCausalLM.from_pretrained(llm_model_id, {
83
  dtype: "q4f16",
 
87
  const SYSTEM_MESSAGE = {
88
  role: "system",
89
  content:
90
+ "You're a helpful and conversational voice assistant. Keep your responses short, clear, and casual.",
91
  };
92
  await llm.generate({ ...tokenizer("x"), max_new_tokens: 1 }); // Compile shaders
93