Luigi commited on
Commit
be5c239
·
1 Parent(s): b56b6ec

use all cpu cores

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -93,7 +93,7 @@ def update_llm(size, model_file, clip_file):
93
  mf, cf = ensure_weights(size, model_file, clip_file)
94
  handler = SmolVLM2ChatHandler(clip_model_path=cf, verbose=False)
95
  llm = Llama(model_path=mf, chat_handler=handler, n_ctx=1024,
96
- verbose=False, n_threads=min(2, os.cpu_count()))
97
  model_cache.update({'size': size, 'model_file': mf, 'clip_file': cf, 'llm': llm})
98
  return None # no UI output
99
 
 
93
  mf, cf = ensure_weights(size, model_file, clip_file)
94
  handler = SmolVLM2ChatHandler(clip_model_path=cf, verbose=False)
95
  llm = Llama(model_path=mf, chat_handler=handler, n_ctx=1024,
96
+ verbose=False, n_threads=max(2, os.cpu_count()))
97
  model_cache.update({'size': size, 'model_file': mf, 'clip_file': cf, 'llm': llm})
98
  return None # no UI output
99