import { useState, useRef, useEffect, useCallback } from 'react' import { Send, Eraser, Loader2, X } from 'lucide-react' import { ChatMessage, TextGenerationWorkerInput, WorkerMessage } from '../../types' import { useModel } from '../../contexts/ModelContext' import { useTextGeneration } from '../../contexts/TextGenerationContext' function TextGeneration() { const { config, messages, setMessages } = useTextGeneration() const [currentMessage, setCurrentMessage] = useState('') const [prompt, setPrompt] = useState('') const [generatedText, setGeneratedText] = useState('') const [isGenerating, setIsGenerating] = useState(false) const { activeWorker, status, modelInfo, hasBeenLoaded, selectedQuantization } = useModel() const messagesEndRef = useRef(null) const scrollToBottom = () => { messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) } useEffect(() => { scrollToBottom() }, [messages, generatedText]) const stopGeneration = useCallback(() => { if (activeWorker && isGenerating) { activeWorker.postMessage({ type: 'stop' }) setIsGenerating(false) } }, [activeWorker, isGenerating]) const handleSendMessage = useCallback(() => { if (!currentMessage.trim() || !modelInfo || !activeWorker || isGenerating) return const userMessage: ChatMessage = { role: 'user', content: currentMessage.trim() } const updatedMessages = [...messages, userMessage] setMessages(updatedMessages) setCurrentMessage('') setIsGenerating(true) const message: TextGenerationWorkerInput = { type: 'generate', messages: updatedMessages, hasChatTemplate: modelInfo.hasChatTemplate, model: modelInfo.id, dtype: selectedQuantization ?? 'fp32', config } activeWorker.postMessage(message) }, [ currentMessage, messages, setMessages, modelInfo, activeWorker, config, isGenerating, selectedQuantization ]) const handleGenerateText = useCallback(() => { if (!prompt.trim() || !modelInfo || !activeWorker || isGenerating) return setIsGenerating(true) const message: TextGenerationWorkerInput = { type: 'generate', prompt: prompt.trim(), hasChatTemplate: modelInfo.hasChatTemplate, model: modelInfo.id, config, dtype: selectedQuantization ?? 'fp32' } activeWorker.postMessage(message) }, [ prompt, modelInfo, activeWorker, config, isGenerating, selectedQuantization ]) useEffect(() => { if (!activeWorker) return const onMessageReceived = (e: MessageEvent) => { const { status, output } = e.data if (status === 'output' && output) { setIsGenerating(false) if (modelInfo?.hasChatTemplate) { const assistantMessage: ChatMessage = { role: 'assistant', content: output.content } setMessages((prev) => [...prev, assistantMessage]) } else { setGeneratedText(output.content) } } else if (status === 'ready' || status === 'error') { setIsGenerating(false) } } activeWorker.addEventListener('message', onMessageReceived) return () => activeWorker.removeEventListener('message', onMessageReceived) }, [activeWorker, modelInfo?.hasChatTemplate, setMessages]) const handleKeyPress = (e: React.KeyboardEvent) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault() if (modelInfo?.hasChatTemplate) { handleSendMessage() } else { handleGenerateText() } } } const clearChat = () => { if (modelInfo?.hasChatTemplate) { setMessages((prev) => prev.filter((msg) => msg.role === 'system')) } else { setPrompt('') setGeneratedText('') } } const busy = status !== 'ready' || isGenerating const hasChatTemplate = modelInfo?.hasChatTemplate return (

Text Generation {hasChatTemplate ? '(Chat)' : ''}

{isGenerating && ( )}
{hasChatTemplate ? ( <>
{messages .filter((msg) => msg.role !== 'system') .map((message, index) => (
{message.role === 'user' ? 'You' : 'Assistant'}
{message.content}
))} {isGenerating && (
Assistant
Loading...
)}