File size: 5,785 Bytes
852dc0d 1b3b6e1 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d a94a061 852dc0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import { Switch } from '@headlessui/react'
import {
useTextGeneration,
GenerationConfigState
} from '../../contexts/TextGenerationContext'
import { useModel } from '../../contexts/ModelContext'
function TextGenerationConfig() {
const { config, setConfig, messages, updateSystemMessage } =
useTextGeneration()
const { modelInfo } = useModel()
const handleConfigChange = (
field: keyof GenerationConfigState,
value: number | boolean
) => {
setConfig((prev) => ({ ...prev, [field]: value }))
}
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold text-gray-900">
Text Generation Settings
</h3>
<div className="space-y-3">
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Temperature: {config.temperature}
</label>
<input
type="range"
min="0.1"
max="2.0"
step="0.1"
value={config.temperature}
onChange={(e) =>
handleConfigChange('temperature', parseFloat(e.target.value))
}
className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer"
/>
<p className="text-xs text-gray-500 mt-1">
Controls randomness in generation (lower = more focused, higher =
more creative)
</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Max Tokens: {config.maxTokens}
</label>
<input
type="range"
min="10"
max="500"
step="10"
value={config.maxTokens}
onChange={(e) =>
handleConfigChange('maxTokens', parseInt(e.target.value))
}
className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer"
/>
<p className="text-xs text-gray-500 mt-1">
Maximum number of tokens to generate in the response
</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Top-p: {config.topP}
</label>
<input
type="range"
min="0.1"
max="1.0"
step="0.1"
value={config.topP}
onChange={(e) =>
handleConfigChange('topP', parseFloat(e.target.value))
}
className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer"
/>
<p className="text-xs text-gray-500 mt-1">
Nucleus sampling - considers tokens with cumulative probability up
to this value
</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Top-k: {config.topK}
</label>
<input
type="range"
min="1"
max="100"
step="1"
value={config.topK}
onChange={(e) =>
handleConfigChange('topK', parseInt(e.target.value))
}
className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer"
/>
<p className="text-xs text-gray-500 mt-1">
Only consider the top K most likely tokens at each step
</p>
</div>
<div className="flex items-center">
<Switch
checked={config.doSample}
onChange={(checked) => handleConfigChange('doSample', checked)}
className={`${config.doSample ? 'bg-blue-600' : 'bg-gray-200'}
relative inline-flex h-6 w-11 items-center rounded-full`}
>
<span
className={`${config.doSample ? 'translate-x-6' : 'translate-x-1'}
inline-block h-4 w-4 transform rounded-full bg-white transition`}
/>
</Switch>
<label className="ml-3 text-sm font-medium text-gray-700">
Do Sample
</label>
</div>
<p className="text-xs text-gray-500 mt-1">
Enable sampling-based generation (disable for deterministic output)
</p>
{/* System Message for Chat */}
{modelInfo?.hasChatTemplate && (
<div className="pt-2 border-t border-gray-200">
<h4 className="text-sm font-semibold text-gray-800 mb-2">
System Message
</h4>
<textarea
value={messages.find((m) => m.role === 'system')?.content || ''}
onChange={(e) => updateSystemMessage(e.target.value)}
className="w-full p-2 border border-gray-300 rounded-md text-sm focus:outline-none focus:ring-1 focus:ring-blue-500 focus:border-blue-500"
rows={4}
placeholder="e.g., You are a helpful assistant."
/>
<p className="text-xs text-gray-500 mt-1">
Initial instructions that guide the model's behavior throughout
the conversation
</p>
</div>
)}
</div>
<div className="pt-2 border-t border-gray-200">
<div className="text-xs text-gray-500">
<p className="mb-1">
<strong>Temperature:</strong> Higher values make output more random,
lower values more focused
</p>
<p className="mb-1">
<strong>Top-p & Top-k:</strong> Control which tokens are considered
during generation
</p>
<p>
<strong>Sampling:</strong> When disabled, always picks the most
likely next token (greedy decoding)
</p>
</div>
</div>
</div>
)
}
export default TextGenerationConfig
|