Spaces:
Paused
Paused
split things into components + lint
Browse files- src/lib/agents/evalBuilder.ts +32 -32
- src/lib/agents/generateCode.ts +30 -27
- src/lib/agents/getInference.ts +4 -4
- src/lib/agents/promptGeneration.ts +29 -29
- src/lib/agents/tools/imageToText.ts +29 -23
- src/lib/agents/tools/index.ts +8 -10
- src/lib/agents/tools/message.ts +20 -20
- src/lib/agents/tools/speechToText.ts +26 -26
- src/lib/agents/tools/textToImage.ts +31 -25
- src/lib/agents/tools/textToSpeech.ts +31 -22
- src/lib/agents/tools/tool.ts +7 -7
- src/lib/components/CodePreview.svelte +21 -0
- src/lib/components/FileUpload.svelte +25 -0
- src/lib/components/ResultsDisplay.svelte +32 -0
- src/lib/components/ToolSelector.svelte +32 -0
- src/lib/store.ts +10 -6
- src/routes/+page.server.ts +0 -0
- src/routes/+page.svelte +18 -93
src/lib/agents/evalBuilder.ts
CHANGED
@@ -1,48 +1,48 @@
|
|
1 |
-
import type { Tool } from
|
2 |
|
3 |
export type Update = {
|
4 |
-
|
5 |
-
|
6 |
};
|
7 |
|
8 |
// this function passes the tools & files to the context before calling eval
|
9 |
export async function evalBuilder(
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
) {
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
32 |
-
|
33 |
-
|
34 |
|
35 |
-
|
36 |
${code}
|
37 |
return await generate();
|
38 |
`)();
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
|
47 |
-
|
48 |
}
|
|
|
1 |
+
import type { Tool } from "./tools/tool";
|
2 |
|
3 |
export type Update = {
|
4 |
+
message: string;
|
5 |
+
data: undefined | string | Blob;
|
6 |
};
|
7 |
|
8 |
// this function passes the tools & files to the context before calling eval
|
9 |
export async function evalBuilder(
|
10 |
+
code: string,
|
11 |
+
tools: Tool<any, any>[],
|
12 |
+
files: FileList | null,
|
13 |
+
updateCallback: (message: string, data: undefined | string | Blob) => void
|
14 |
) {
|
15 |
+
async function wrapperEval() {
|
16 |
+
if (files) {
|
17 |
+
if (files[0].type.startsWith("image")) {
|
18 |
+
// @ts-ignore
|
19 |
+
globalThis["image"] = await files[0].arrayBuffer();
|
20 |
+
} else if (files[0].type.startsWith("audio")) {
|
21 |
+
// @ts-ignore
|
22 |
+
globalThis["audio"] = await files[0].arrayBuffer();
|
23 |
+
}
|
24 |
+
}
|
25 |
|
26 |
+
// add tools to context
|
27 |
+
for (const tool of tools) {
|
28 |
+
// @ts-ignore
|
29 |
+
globalThis[tool.name] = tool.call;
|
30 |
+
}
|
31 |
|
32 |
+
// @ts-ignore
|
33 |
+
globalThis["message"] = updateCallback;
|
34 |
|
35 |
+
await Object.getPrototypeOf(async function () {}).constructor(`
|
36 |
${code}
|
37 |
return await generate();
|
38 |
`)();
|
39 |
|
40 |
+
// clean up tools
|
41 |
+
for (const tool of tools) {
|
42 |
+
// @ts-ignore
|
43 |
+
delete globalThis[tool.name];
|
44 |
+
}
|
45 |
+
}
|
46 |
|
47 |
+
return wrapperEval;
|
48 |
}
|
src/lib/agents/generateCode.ts
CHANGED
@@ -1,35 +1,38 @@
|
|
1 |
-
import type { Tool } from
|
2 |
|
3 |
-
import { get } from
|
4 |
-
import { OPENAI_API_KEY } from
|
5 |
-
import { Configuration, OpenAIApi } from
|
6 |
-
import { generatePrompt } from
|
|
|
7 |
|
8 |
export async function generateCode(
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
) {
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
|
33 |
-
|
34 |
-
|
35 |
}
|
|
|
1 |
+
import type { Tool } from "./tools/tool";
|
2 |
|
3 |
+
import { get } from "svelte/store";
|
4 |
+
import { OPENAI_API_KEY } from "../store";
|
5 |
+
import { Configuration, OpenAIApi } from "openai";
|
6 |
+
import { generatePrompt } from "./promptGeneration";
|
7 |
+
import { messageTool } from "./tools/message";
|
8 |
|
9 |
export async function generateCode(
|
10 |
+
prompt: string,
|
11 |
+
tools: Tool<any, any>[],
|
12 |
+
files: FileList | null
|
13 |
) {
|
14 |
+
const fullprompt = generatePrompt(
|
15 |
+
prompt,
|
16 |
+
[...tools, messageTool],
|
17 |
+
!!files && files[0].type.startsWith("image"),
|
18 |
+
!!files && files[0].type.startsWith("audio")
|
19 |
+
);
|
20 |
|
21 |
+
const openai = new OpenAIApi(
|
22 |
+
new Configuration({ apiKey: get(OPENAI_API_KEY) })
|
23 |
+
);
|
24 |
+
const textAnswer =
|
25 |
+
(
|
26 |
+
await openai.createCompletion({
|
27 |
+
model: "text-davinci-003",
|
28 |
+
prompt: fullprompt,
|
29 |
+
max_tokens: 1000,
|
30 |
+
})
|
31 |
+
).data.choices[0].text ?? "";
|
32 |
|
33 |
+
const regex = /```(.*?)```/gs;
|
34 |
+
const matches = [...textAnswer.matchAll(regex)];
|
35 |
|
36 |
+
const codeBlocks = matches.map((match) => match[1]);
|
37 |
+
return codeBlocks[0].replace("js\n", "") ?? "nothing";
|
38 |
}
|
src/lib/agents/getInference.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
import { HfInference } from
|
2 |
-
import { get } from
|
3 |
-
import { HF_ACCESS_TOKEN } from
|
4 |
|
5 |
export function getInference() {
|
6 |
-
|
7 |
}
|
|
|
1 |
+
import { HfInference } from "@huggingface/inference";
|
2 |
+
import { get } from "svelte/store";
|
3 |
+
import { HF_ACCESS_TOKEN } from "../store";
|
4 |
|
5 |
export function getInference() {
|
6 |
+
return new HfInference(get(HF_ACCESS_TOKEN));
|
7 |
}
|
src/lib/agents/promptGeneration.ts
CHANGED
@@ -1,39 +1,39 @@
|
|
1 |
-
import type { Tool } from
|
2 |
|
3 |
function toolDescription(tool: Tool<any, any>) {
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
|
10 |
-
|
11 |
|
12 |
-
|
13 |
}
|
14 |
|
15 |
export function generatePrompt(
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
) {
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
Create a function that does the following: ${prompt}.
|
38 |
|
39 |
Examples:
|
@@ -68,7 +68,7 @@ async function generate(audio) {
|
|
68 |
\`\`\`
|
69 |
|
70 |
In order to help in answering the above prompt, the function has access to the following methods to generate outputs.
|
71 |
-
${tools.map((tool) => toolDescription(tool)).join(
|
72 |
|
73 |
Use the above methods and only the above methods to answer the prompt: ${prompt}.
|
74 |
|
@@ -80,5 +80,5 @@ return output;
|
|
80 |
};
|
81 |
\`\`\``;
|
82 |
|
83 |
-
|
84 |
}
|
|
|
1 |
+
import type { Tool } from "./tools/tool";
|
2 |
|
3 |
function toolDescription(tool: Tool<any, any>) {
|
4 |
+
let prompt = ` name: ${tool.name} \n description: ${tool.description}`;
|
5 |
|
6 |
+
const examples = tool.examples.slice(0, 1).map((example) => {
|
7 |
+
return ` prompt: ${example.prompt} \n command generated: \`${example.command}\``;
|
8 |
+
});
|
9 |
|
10 |
+
prompt += `\n` + examples.join("\n");
|
11 |
|
12 |
+
return prompt;
|
13 |
}
|
14 |
|
15 |
export function generatePrompt(
|
16 |
+
prompt: string,
|
17 |
+
tools: Tool<any, any>[],
|
18 |
+
image?: boolean,
|
19 |
+
audio?: boolean
|
20 |
) {
|
21 |
+
if (tools.length === 0) {
|
22 |
+
throw new Error("no tools selected");
|
23 |
+
}
|
24 |
+
|
25 |
+
let params = "";
|
26 |
+
|
27 |
+
if (image) {
|
28 |
+
params += `image`;
|
29 |
+
}
|
30 |
+
if (audio) {
|
31 |
+
params += params ? "," : "";
|
32 |
+
params += `audio`;
|
33 |
+
}
|
34 |
+
|
35 |
+
// describe all the tools
|
36 |
+
const fullPrompt = `
|
37 |
Create a function that does the following: ${prompt}.
|
38 |
|
39 |
Examples:
|
|
|
68 |
\`\`\`
|
69 |
|
70 |
In order to help in answering the above prompt, the function has access to the following methods to generate outputs.
|
71 |
+
${tools.map((tool) => toolDescription(tool)).join("\n\n ")}
|
72 |
|
73 |
Use the above methods and only the above methods to answer the prompt: ${prompt}.
|
74 |
|
|
|
80 |
};
|
81 |
\`\`\``;
|
82 |
|
83 |
+
return fullPrompt;
|
84 |
}
|
src/lib/agents/tools/imageToText.ts
CHANGED
@@ -1,25 +1,31 @@
|
|
1 |
-
import { getInference } from
|
2 |
-
import type {
|
3 |
-
|
|
|
|
|
|
|
4 |
|
5 |
-
export const imageToTextTool: Tool<
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
};
|
|
|
1 |
+
import { getInference } from "$lib/agents/getInference";
|
2 |
+
import type {
|
3 |
+
ImageToTextArgs,
|
4 |
+
ImageToTextOutput,
|
5 |
+
} from "@huggingface/inference";
|
6 |
+
import type { Tool } from "./tool";
|
7 |
|
8 |
+
export const imageToTextTool: Tool<
|
9 |
+
ImageToTextArgs["data"],
|
10 |
+
ImageToTextOutput["generated_text"]
|
11 |
+
> = {
|
12 |
+
name: "imageToText",
|
13 |
+
description: "Caption an image.",
|
14 |
+
examples: [
|
15 |
+
{
|
16 |
+
prompt: "Describe the image",
|
17 |
+
command: "imageToText(image)",
|
18 |
+
},
|
19 |
+
],
|
20 |
+
call: async (input) => {
|
21 |
+
return (
|
22 |
+
await getInference().imageToText(
|
23 |
+
{
|
24 |
+
data: await input,
|
25 |
+
model: "nlpconnect/vit-gpt2-image-captioning",
|
26 |
+
},
|
27 |
+
{ wait_for_model: true }
|
28 |
+
)
|
29 |
+
).generated_text;
|
30 |
+
},
|
31 |
};
|
src/lib/agents/tools/index.ts
CHANGED
@@ -1,13 +1,11 @@
|
|
1 |
-
import { textToImageTool } from
|
2 |
-
import { textToSpeechTool } from
|
3 |
-
import { speechToTextTool } from
|
4 |
-
import { imageToTextTool } from
|
5 |
-
import { messageTool } from './message';
|
6 |
|
7 |
export const tools = [
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
messageTool
|
13 |
];
|
|
|
1 |
+
import { textToImageTool } from "./textToImage";
|
2 |
+
import { textToSpeechTool } from "./textToSpeech";
|
3 |
+
import { speechToTextTool } from "./speechToText";
|
4 |
+
import { imageToTextTool } from "./imageToText";
|
|
|
5 |
|
6 |
export const tools = [
|
7 |
+
textToImageTool,
|
8 |
+
textToSpeechTool,
|
9 |
+
speechToTextTool,
|
10 |
+
imageToTextTool,
|
|
|
11 |
];
|
src/lib/agents/tools/message.ts
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
import type { Tool } from
|
2 |
|
3 |
export const messageTool: Tool<Promise<Blob | string> | string, void> = {
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
};
|
|
|
1 |
+
import type { Tool } from "./tool";
|
2 |
|
3 |
export const messageTool: Tool<Promise<Blob | string> | string, void> = {
|
4 |
+
name: "message",
|
5 |
+
description: "Send data back to the user.",
|
6 |
+
examples: [
|
7 |
+
{
|
8 |
+
prompt: "Display the created image",
|
9 |
+
command: 'message("we display the image", image)',
|
10 |
+
},
|
11 |
+
{
|
12 |
+
prompt: "Display the generated text",
|
13 |
+
command: 'message("we render the text", text)',
|
14 |
+
},
|
15 |
+
{
|
16 |
+
prompt: 'Display the text "hello world"',
|
17 |
+
command: 'message("hello world")',
|
18 |
+
},
|
19 |
+
],
|
20 |
+
call: async () => {
|
21 |
+
return;
|
22 |
+
},
|
23 |
};
|
src/lib/agents/tools/speechToText.ts
CHANGED
@@ -1,31 +1,31 @@
|
|
1 |
-
import { getInference } from
|
2 |
import type {
|
3 |
-
|
4 |
-
|
5 |
-
} from
|
6 |
-
import type { Tool } from
|
7 |
|
8 |
export const speechToTextTool: Tool<
|
9 |
-
|
10 |
-
|
11 |
> = {
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
};
|
|
|
1 |
+
import { getInference } from "$lib/agents/getInference";
|
2 |
import type {
|
3 |
+
AutomaticSpeechRecognitionArgs,
|
4 |
+
AutomaticSpeechRecognitionOutput,
|
5 |
+
} from "@huggingface/inference";
|
6 |
+
import type { Tool } from "./tool";
|
7 |
|
8 |
export const speechToTextTool: Tool<
|
9 |
+
AutomaticSpeechRecognitionArgs["data"],
|
10 |
+
AutomaticSpeechRecognitionOutput["text"]
|
11 |
> = {
|
12 |
+
name: "speechToText",
|
13 |
+
description: "Caption an audio file and returns its text content.",
|
14 |
+
examples: [
|
15 |
+
{
|
16 |
+
prompt: "Transcribe the sound file",
|
17 |
+
command: "speechToText(audio)",
|
18 |
+
},
|
19 |
+
],
|
20 |
+
call: async (data) => {
|
21 |
+
return (
|
22 |
+
await getInference().automaticSpeechRecognition(
|
23 |
+
{
|
24 |
+
data: await data,
|
25 |
+
model: "facebook/wav2vec2-large-960h-lv60-self",
|
26 |
+
},
|
27 |
+
{ wait_for_model: true }
|
28 |
+
)
|
29 |
+
).text;
|
30 |
+
},
|
31 |
};
|
src/lib/agents/tools/textToImage.ts
CHANGED
@@ -1,27 +1,33 @@
|
|
1 |
-
import { getInference } from
|
2 |
-
import type {
|
3 |
-
|
|
|
|
|
|
|
4 |
|
5 |
-
export const textToImageTool: Tool<
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
27 |
};
|
|
|
1 |
+
import { getInference } from "$lib/agents/getInference";
|
2 |
+
import type {
|
3 |
+
TextToImageArgs,
|
4 |
+
TextToImageOutput,
|
5 |
+
} from "@huggingface/inference";
|
6 |
+
import type { Tool } from "./tool";
|
7 |
|
8 |
+
export const textToImageTool: Tool<
|
9 |
+
TextToImageArgs["inputs"],
|
10 |
+
TextToImageOutput
|
11 |
+
> = {
|
12 |
+
name: "textToImage",
|
13 |
+
description: "Generate an image from a text prompt.",
|
14 |
+
examples: [
|
15 |
+
{
|
16 |
+
prompt: "Generate an image of a cat wearing a top hat",
|
17 |
+
command: "textToImage('cat wearing a top hat')",
|
18 |
+
},
|
19 |
+
{
|
20 |
+
prompt: "Draw a brown dog on a beach",
|
21 |
+
command: "textToImage('drawing of a brown dog on a beach')",
|
22 |
+
},
|
23 |
+
],
|
24 |
+
call: async (input) => {
|
25 |
+
return await getInference().textToImage(
|
26 |
+
{
|
27 |
+
inputs: await input,
|
28 |
+
model: "stabilityai/stable-diffusion-2",
|
29 |
+
},
|
30 |
+
{ wait_for_model: true }
|
31 |
+
);
|
32 |
+
},
|
33 |
};
|
src/lib/agents/tools/textToSpeech.ts
CHANGED
@@ -1,25 +1,34 @@
|
|
1 |
-
import { getInference } from
|
2 |
-
import type {
|
|
|
|
|
|
|
3 |
|
4 |
-
import type { Tool } from
|
5 |
|
6 |
-
export const textToSpeechTool: Tool<
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
};
|
|
|
1 |
+
import { getInference } from "$lib/agents/getInference";
|
2 |
+
import type {
|
3 |
+
TextToSpeechArgs,
|
4 |
+
TextToSpeechOutput,
|
5 |
+
} from "@huggingface/inference";
|
6 |
|
7 |
+
import type { Tool } from "./tool";
|
8 |
|
9 |
+
export const textToSpeechTool: Tool<
|
10 |
+
TextToSpeechArgs["inputs"],
|
11 |
+
TextToSpeechOutput
|
12 |
+
> = {
|
13 |
+
name: "textToSpeech",
|
14 |
+
description: "This tool takes a text input and turns it into an audio file.",
|
15 |
+
examples: [
|
16 |
+
{
|
17 |
+
prompt: 'Say the following out loud:"Hello world!"',
|
18 |
+
command: "textToSpeech('Hello world!')",
|
19 |
+
},
|
20 |
+
{
|
21 |
+
prompt: "Say the content of the string txt out loud",
|
22 |
+
command: "textToSpeech(txt)",
|
23 |
+
},
|
24 |
+
],
|
25 |
+
call: async (input) => {
|
26 |
+
return await getInference().textToSpeech(
|
27 |
+
{
|
28 |
+
inputs: await input,
|
29 |
+
model: "espnet/kan-bayashi_ljspeech_vits",
|
30 |
+
},
|
31 |
+
{ wait_for_model: true }
|
32 |
+
);
|
33 |
+
},
|
34 |
};
|
src/lib/agents/tools/tool.ts
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
export interface Tool<Input, Output> {
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
}
|
|
|
1 |
export interface Tool<Input, Output> {
|
2 |
+
name: string;
|
3 |
+
description: string;
|
4 |
+
examples: Array<{
|
5 |
+
prompt: string;
|
6 |
+
command: string;
|
7 |
+
}>;
|
8 |
+
call: (input: Promise<Input> | Input) => Promise<Output>;
|
9 |
}
|
src/lib/components/CodePreview.svelte
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let codePromise: Promise<string>
|
3 |
+
export let onRun: (code: string) => void;
|
4 |
+
</script>
|
5 |
+
|
6 |
+
{#await codePromise}
|
7 |
+
<div class="loading loading-lg mx-auto" />
|
8 |
+
{:then code}
|
9 |
+
<div class="mockup-code text-sm">
|
10 |
+
<pre class="ml-4"><code>{code}</code></pre>
|
11 |
+
</div>
|
12 |
+
|
13 |
+
<button
|
14 |
+
class="btn btn-primary w-fit mx-auto"
|
15 |
+
on:click={() => {
|
16 |
+
onRun(code);
|
17 |
+
}}
|
18 |
+
>
|
19 |
+
run code
|
20 |
+
</button>
|
21 |
+
{/await}
|
src/lib/components/FileUpload.svelte
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let files: FileList | null = null;
|
3 |
+
</script>
|
4 |
+
|
5 |
+
<div class="grid grid-cols-2 gap-5">
|
6 |
+
<div class="form-control">
|
7 |
+
<label class="label">
|
8 |
+
<span class="label-text">
|
9 |
+
{#if files && files.length > 0}
|
10 |
+
{files[0].type.split('/')[0]} detected
|
11 |
+
<button class="btn-sm btn btn-ghost" on:click={() => (files = null)}>clear </button>
|
12 |
+
{:else}
|
13 |
+
Upload a file (image or audio)
|
14 |
+
{/if}
|
15 |
+
</span>
|
16 |
+
</label>
|
17 |
+
<input
|
18 |
+
type="file"
|
19 |
+
bind:files
|
20 |
+
accept="audio/*, image/*"
|
21 |
+
class="mt-auto file-input file-input-bordered max-w-xs"
|
22 |
+
class:file-input-primary={files && files.length > 0}
|
23 |
+
/>
|
24 |
+
</div>
|
25 |
+
</div>
|
src/lib/components/ResultsDisplay.svelte
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let messages: Array<{ message: string; data: string | Blob | undefined }>;
|
3 |
+
|
4 |
+
const isBlob = (message: string | Blob): message is Blob => {
|
5 |
+
return message instanceof Blob;
|
6 |
+
};
|
7 |
+
|
8 |
+
</script>
|
9 |
+
|
10 |
+
<div class="join join-vertical w-full">
|
11 |
+
{#each messages as message}
|
12 |
+
<div class="collapse collapse-arrow join-item border border-base-300">
|
13 |
+
<input type="radio" name="my-accordion-4" checked={true} />
|
14 |
+
<div class="collapse-title text-xl font-medium">
|
15 |
+
{message.message}
|
16 |
+
</div>
|
17 |
+
<div class="collapse-content">
|
18 |
+
{#if !!message.data && isBlob(message.data)}
|
19 |
+
{#if message.data.type.startsWith('image')}
|
20 |
+
<img class="mx-auto" alt="generated" src={URL.createObjectURL(message.data)} />
|
21 |
+
{:else if message.data.type.startsWith('audio')}
|
22 |
+
<audio controls src={URL.createObjectURL(message.data)} />
|
23 |
+
{:else}
|
24 |
+
<p class="text-mono text-light w-full">blob type unknown</p>
|
25 |
+
{/if}
|
26 |
+
{:else if !!message.data}
|
27 |
+
<p class="text-mono text-light w-full">{message.data}</p>
|
28 |
+
{/if}
|
29 |
+
</div>
|
30 |
+
</div>
|
31 |
+
{/each}
|
32 |
+
</div>
|
src/lib/components/ToolSelector.svelte
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { tools } from '$lib/agents/tools';
|
3 |
+
export let selectedTools: Array<string> = [];
|
4 |
+
</script>
|
5 |
+
|
6 |
+
<h3 class="text-lg w-fit mx-auto">Select your tools</h3>
|
7 |
+
|
8 |
+
<div class="w-fit mx-auto">
|
9 |
+
|
10 |
+
<div class="join mx-auto inline-block">
|
11 |
+
{#each tools as tool}
|
12 |
+
<label
|
13 |
+
class="form-switch join-item btn normal-case btn-sm"
|
14 |
+
class:btn-info={selectedTools.includes(tool.name)}
|
15 |
+
>
|
16 |
+
<input
|
17 |
+
class="hidden"
|
18 |
+
type="checkbox"
|
19 |
+
bind:group={selectedTools}
|
20 |
+
name="tools"
|
21 |
+
value={tool.name}
|
22 |
+
/>
|
23 |
+
{tool.name}
|
24 |
+
</label>
|
25 |
+
{/each}
|
26 |
+
</div>
|
27 |
+
{#if selectedTools.length === tools.length}
|
28 |
+
<button class='btn btn-ghost inline-block w-fit btn-sm' on:click={() => (selectedTools = [])}>clear</button>
|
29 |
+
{:else}
|
30 |
+
<button class='btn btn-ghost inline-block w-fit btn-sm' on:click={() => (selectedTools = tools.map((el) => el.name))}>select all</button>
|
31 |
+
{/if}
|
32 |
+
</div>
|
src/lib/store.ts
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
-
import { writable } from
|
2 |
-
import { browser } from
|
3 |
|
4 |
// Set the stored value or a sane default.
|
5 |
-
export const HF_ACCESS_TOKEN = writable(
|
|
|
|
|
6 |
|
7 |
-
export const OPENAI_API_KEY = writable(
|
|
|
|
|
8 |
|
9 |
if (browser) {
|
10 |
-
|
11 |
-
|
12 |
}
|
|
|
1 |
+
import { writable } from "svelte/store";
|
2 |
+
import { browser } from "$app/environment";
|
3 |
|
4 |
// Set the stored value or a sane default.
|
5 |
+
export const HF_ACCESS_TOKEN = writable(
|
6 |
+
(browser && localStorage.HF_ACCESS_TOKEN) || ""
|
7 |
+
);
|
8 |
|
9 |
+
export const OPENAI_API_KEY = writable(
|
10 |
+
(browser && localStorage.OPENAI_API_KEY) || ""
|
11 |
+
);
|
12 |
|
13 |
if (browser) {
|
14 |
+
HF_ACCESS_TOKEN.subscribe((value) => (localStorage.content = value));
|
15 |
+
OPENAI_API_KEY.subscribe((value) => (localStorage.content = value));
|
16 |
}
|
src/routes/+page.server.ts
DELETED
File without changes
|
src/routes/+page.svelte
CHANGED
@@ -3,6 +3,10 @@
|
|
3 |
import { generateCode } from '$lib/agents/generateCode';
|
4 |
import { tools } from '$lib/agents/tools';
|
5 |
import { evalBuilder } from '$lib/agents/evalBuilder';
|
|
|
|
|
|
|
|
|
6 |
|
7 |
let prompt = 'Draw a picture of a cat wearing a top hat. Then caption the picture and read it out loud.';
|
8 |
let selectedTools: Array<string> = [];
|
@@ -16,7 +20,7 @@
|
|
16 |
messages = [];
|
17 |
codePromise = generateCode(
|
18 |
prompt,
|
19 |
-
tools.filter((el) =>
|
20 |
files
|
21 |
);
|
22 |
};
|
@@ -26,7 +30,7 @@
|
|
26 |
|
27 |
const wrapperEval = await evalBuilder(
|
28 |
code,
|
29 |
-
tools.filter((el) =>
|
30 |
files,
|
31 |
(message, data) => {
|
32 |
messages = [...messages, { message, data }];
|
@@ -38,9 +42,6 @@
|
|
38 |
isLoading = false;
|
39 |
};
|
40 |
|
41 |
-
const isBlob = (message: string | Blob): message is Blob => {
|
42 |
-
return message instanceof Blob;
|
43 |
-
};
|
44 |
</script>
|
45 |
|
46 |
<div class="flex flex-col space-y-4 max-w-xl">
|
@@ -49,108 +50,32 @@
|
|
49 |
<h1 class="text-3xl font-semibold w-fit mx-auto">Agents.js</h1>
|
50 |
</div>
|
51 |
<div class="divider" />
|
52 |
-
<
|
53 |
-
|
54 |
-
<div class="join mx-auto">
|
55 |
-
{#each tools.filter((tool) => tool.name != 'message') as tool}
|
56 |
-
<label
|
57 |
-
class="form-switch join-item btn normal-case btn-sm"
|
58 |
-
class:btn-info={selectedTools.includes(tool.name)}
|
59 |
-
>
|
60 |
-
<input
|
61 |
-
class="hidden"
|
62 |
-
type="checkbox"
|
63 |
-
bind:group={selectedTools}
|
64 |
-
name="tools"
|
65 |
-
value={tool.name}
|
66 |
-
/>
|
67 |
-
{tool.name}
|
68 |
-
</label>
|
69 |
-
{/each}
|
70 |
-
</div>
|
71 |
<div class="divider" />
|
72 |
<span class="label-text"> Input your request </span>
|
73 |
-
|
74 |
<textarea
|
75 |
class="textarea border-base-300 bg-base-300"
|
76 |
placeholder="Ask something here"
|
77 |
bind:value={prompt}
|
78 |
/>
|
79 |
-
|
80 |
-
<
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
<button class="btn-sm btn btn-ghost" on:click={() => (files = null)}>clear </button>
|
87 |
-
{:else}
|
88 |
-
Upload a file (image or audio)
|
89 |
-
{/if}
|
90 |
-
</span>
|
91 |
-
</label>
|
92 |
-
<input
|
93 |
-
type="file"
|
94 |
-
bind:files
|
95 |
-
accept="audio/*, image/*"
|
96 |
-
class="mt-auto file-input file-input-bordered max-w-xs"
|
97 |
-
class:file-input-primary={files && files.length > 0}
|
98 |
-
/>
|
99 |
-
</div>
|
100 |
-
|
101 |
-
<button
|
102 |
-
class="btn btn-primary mt-auto"
|
103 |
-
on:click={onGenerate}
|
104 |
-
on:keypress={onGenerate}
|
105 |
-
disabled={selectedTools.length === 0}>generate</button
|
106 |
-
>
|
107 |
-
</div>
|
108 |
|
109 |
{#if codePromise}
|
110 |
-
|
111 |
-
<div class="loading loading-lg mx-auto" />
|
112 |
-
{:then code}
|
113 |
-
<div class="mockup-code text-sm">
|
114 |
-
<pre class="ml-4"><code>{code}</code></pre>
|
115 |
-
</div>
|
116 |
-
|
117 |
-
<button
|
118 |
-
class="btn btn-primary w-fit mx-auto"
|
119 |
-
on:click={() => {
|
120 |
-
onRun(code);
|
121 |
-
}}
|
122 |
-
>
|
123 |
-
run code
|
124 |
-
</button>
|
125 |
-
{/await}
|
126 |
{/if}
|
|
|
127 |
<div class="divider" />
|
128 |
{#if isLoading}
|
129 |
<div class="loading loading-lg mx-auto" />
|
130 |
{:else if messages.length > 0}
|
131 |
<h3 class="text-lg w-fit mx-auto">Results</h3>
|
132 |
{/if}
|
133 |
-
|
134 |
-
|
135 |
-
<div class="collapse collapse-arrow join-item border border-base-300">
|
136 |
-
<input type="radio" name="my-accordion-4" checked={true} />
|
137 |
-
<div class="collapse-title text-xl font-medium">
|
138 |
-
{message.message}
|
139 |
-
</div>
|
140 |
-
<div class="collapse-content">
|
141 |
-
{#if !!message.data && isBlob(message.data)}
|
142 |
-
{#if message.data.type.startsWith('image')}
|
143 |
-
<img class="mx-auto" alt="generated" src={URL.createObjectURL(message.data)} />
|
144 |
-
{:else if message.data.type.startsWith('audio')}
|
145 |
-
<audio controls src={URL.createObjectURL(message.data)} />
|
146 |
-
{:else}
|
147 |
-
<p class="text-mono text-light w-full">blob type unknown</p>
|
148 |
-
{/if}
|
149 |
-
{:else if !!message.data}
|
150 |
-
<p class="text-mono text-light w-full">{message.data}</p>
|
151 |
-
{/if}
|
152 |
-
</div>
|
153 |
-
</div>
|
154 |
-
{/each}
|
155 |
-
</div>
|
156 |
</div>
|
|
|
3 |
import { generateCode } from '$lib/agents/generateCode';
|
4 |
import { tools } from '$lib/agents/tools';
|
5 |
import { evalBuilder } from '$lib/agents/evalBuilder';
|
6 |
+
import FileUpload from '$lib/components/FileUpload.svelte';
|
7 |
+
import ToolSelector from '$lib/components/ToolSelector.svelte';
|
8 |
+
import CodePreview from '$lib/components/CodePreview.svelte';
|
9 |
+
import ResultsDisplay from '$lib/components/ResultsDisplay.svelte';
|
10 |
|
11 |
let prompt = 'Draw a picture of a cat wearing a top hat. Then caption the picture and read it out loud.';
|
12 |
let selectedTools: Array<string> = [];
|
|
|
20 |
messages = [];
|
21 |
codePromise = generateCode(
|
22 |
prompt,
|
23 |
+
tools.filter((el) => selectedTools.includes(el.name)),
|
24 |
files
|
25 |
);
|
26 |
};
|
|
|
30 |
|
31 |
const wrapperEval = await evalBuilder(
|
32 |
code,
|
33 |
+
tools.filter((el) => selectedTools.includes(el.name)),
|
34 |
files,
|
35 |
(message, data) => {
|
36 |
messages = [...messages, { message, data }];
|
|
|
42 |
isLoading = false;
|
43 |
};
|
44 |
|
|
|
|
|
|
|
45 |
</script>
|
46 |
|
47 |
<div class="flex flex-col space-y-4 max-w-xl">
|
|
|
50 |
<h1 class="text-3xl font-semibold w-fit mx-auto">Agents.js</h1>
|
51 |
</div>
|
52 |
<div class="divider" />
|
53 |
+
<ToolSelector bind:selectedTools />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
<div class="divider" />
|
55 |
<span class="label-text"> Input your request </span>
|
|
|
56 |
<textarea
|
57 |
class="textarea border-base-300 bg-base-300"
|
58 |
placeholder="Ask something here"
|
59 |
bind:value={prompt}
|
60 |
/>
|
61 |
+
<FileUpload bind:files/>
|
62 |
+
<button
|
63 |
+
class="btn btn-primary mt-auto"
|
64 |
+
on:click={onGenerate}
|
65 |
+
on:keypress={onGenerate}
|
66 |
+
disabled={selectedTools.length === 0}>generate</button
|
67 |
+
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
{#if codePromise}
|
70 |
+
<CodePreview bind:codePromise onRun={onRun}/>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
{/if}
|
72 |
+
|
73 |
<div class="divider" />
|
74 |
{#if isLoading}
|
75 |
<div class="loading loading-lg mx-auto" />
|
76 |
{:else if messages.length > 0}
|
77 |
<h3 class="text-lg w-fit mx-auto">Results</h3>
|
78 |
{/if}
|
79 |
+
|
80 |
+
<ResultsDisplay bind:messages />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
</div>
|