import { Switch } from '@/components/ui/switch' import { Slider } from '@/components/ui/slider' import { useTextGeneration, GenerationConfigState } from '../../contexts/TextGenerationContext' import { useModel } from '../../contexts/ModelContext' function TextGenerationConfig() { const { config, setConfig, messages, updateSystemMessage } = useTextGeneration() const { modelInfo } = useModel() const handleConfigChange = ( field: keyof GenerationConfigState, value: number | boolean ) => { setConfig((prev) => ({ ...prev, [field]: value })) } return (
Controls randomness in generation (lower = more focused, higher = more creative)
Maximum number of tokens to generate in the response
Nucleus sampling - considers tokens with cumulative probability up to this value
Only consider the top K most likely tokens at each step
Enable sampling-based generation (disable for deterministic output)
{/* System Message for Chat */} {modelInfo?.hasChatTemplate && (Temperature: Higher values make output more random, lower values more focused
Top-p & Top-k: Control which tokens are considered during generation
Sampling: When disabled, always picks the most likely next token (greedy decoding)