Luigi commited on
Commit
4decc4b
·
1 Parent(s): 5462ff3

increase n_ctx to 8192

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -92,7 +92,7 @@ def update_llm(size, model_file, clip_file):
92
  if (model_cache['size'], model_cache['model_file'], model_cache['clip_file']) != (size, model_file, clip_file):
93
  mf, cf = ensure_weights(size, model_file, clip_file)
94
  handler = SmolVLM2ChatHandler(clip_model_path=cf, verbose=False)
95
- llm = Llama(model_path=mf, chat_handler=handler, n_ctx=1024,
96
  verbose=False, n_threads=max(2, os.cpu_count()))
97
  model_cache.update({'size': size, 'model_file': mf, 'clip_file': cf, 'llm': llm})
98
  return None # no UI output
 
92
  if (model_cache['size'], model_cache['model_file'], model_cache['clip_file']) != (size, model_file, clip_file):
93
  mf, cf = ensure_weights(size, model_file, clip_file)
94
  handler = SmolVLM2ChatHandler(clip_model_path=cf, verbose=False)
95
+ llm = Llama(model_path=mf, chat_handler=handler, n_ctx=8192,
96
  verbose=False, n_threads=max(2, os.cpu_count()))
97
  model_cache.update({'size': size, 'model_file': mf, 'clip_file': cf, 'llm': llm})
98
  return None # no UI output