import { Switch } from '@headlessui/react' import { useTextGeneration, GenerationConfigState } from '../../contexts/TextGenerationContext' import { useModel } from '../../contexts/ModelContext' function TextGenerationConfig() { const { config, setConfig, messages, updateSystemMessage } = useTextGeneration() const { modelInfo } = useModel() const handleConfigChange = ( field: keyof GenerationConfigState, value: number | boolean ) => { setConfig((prev) => ({ ...prev, [field]: value })) } return (

Text Generation Settings

handleConfigChange('temperature', parseFloat(e.target.value)) } className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer" />

Controls randomness in generation (lower = more focused, higher = more creative)

handleConfigChange('maxTokens', parseInt(e.target.value)) } className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer" />

Maximum number of tokens to generate in the response

handleConfigChange('topP', parseFloat(e.target.value)) } className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer" />

Nucleus sampling - considers tokens with cumulative probability up to this value

handleConfigChange('topK', parseInt(e.target.value)) } className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer" />

Only consider the top K most likely tokens at each step

handleConfigChange('doSample', checked)} className={`${config.doSample ? 'bg-blue-600' : 'bg-gray-200'} relative inline-flex h-6 w-11 items-center rounded-full`} >

Enable sampling-based generation (disable for deterministic output)

{/* System Message for Chat */} {modelInfo?.hasChatTemplate && (

System Message