Spaces:
Running
Running
aremoves cache parameter
Browse files
app.py
CHANGED
@@ -118,9 +118,12 @@ class ModelCacheManager:
|
|
118 |
try:
|
119 |
return method(*args, **kwargs)
|
120 |
except:
|
121 |
-
# Try without cache
|
122 |
kwargs_copy = kwargs.copy()
|
123 |
-
|
|
|
|
|
|
|
124 |
return method(*args, **kwargs_copy)
|
125 |
else:
|
126 |
raise e
|
@@ -128,13 +131,15 @@ class ModelCacheManager:
|
|
128 |
def direct_call(self, method_name, *args, **kwargs):
|
129 |
"""Direct call bypassing all cache mechanisms"""
|
130 |
try:
|
131 |
-
# Disable cache completely
|
132 |
-
kwargs_copy = kwargs.copy()
|
133 |
-
kwargs_copy['use_cache'] = False
|
134 |
-
|
135 |
# Clear all caches first
|
136 |
self._clear_all_caches()
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
# Make the call
|
139 |
method = getattr(self.model, method_name)
|
140 |
return method(*args, **kwargs_copy)
|
@@ -149,8 +154,9 @@ class ModelCacheManager:
|
|
149 |
kwargs_copy = kwargs.copy()
|
150 |
|
151 |
# Remove any cache-related parameters
|
152 |
-
|
153 |
-
|
|
|
154 |
|
155 |
# Clear caches
|
156 |
self._clear_all_caches()
|
@@ -187,7 +193,6 @@ def initialize_model_safely():
|
|
187 |
device_map=device,
|
188 |
use_safetensors=True,
|
189 |
pad_token_id=tokenizer.eos_token_id,
|
190 |
-
use_cache=True,
|
191 |
torch_dtype=torch.float16 if device == 'cuda' else torch.float32
|
192 |
)
|
193 |
|
@@ -276,20 +281,12 @@ def safe_model_chat(model, tokenizer, image_path, **kwargs):
|
|
276 |
try:
|
277 |
# Remove any cache-related parameters
|
278 |
kwargs_copy = kwargs.copy()
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
# Try with cache disabled
|
283 |
return model.chat(tokenizer, image_path, **kwargs_copy)
|
284 |
except:
|
285 |
-
|
286 |
-
try:
|
287 |
-
# Force cache clearing by setting use_cache=False
|
288 |
-
kwargs_copy = kwargs.copy()
|
289 |
-
kwargs_copy['use_cache'] = False
|
290 |
-
return model.chat(tokenizer, image_path, **kwargs_copy)
|
291 |
-
except:
|
292 |
-
raise Exception("Model compatibility issue: DynamicCache error. Please try again.")
|
293 |
else:
|
294 |
raise e
|
295 |
except Exception as e:
|
@@ -336,20 +333,12 @@ def safe_model_chat_crop(model, tokenizer, image_path, **kwargs):
|
|
336 |
try:
|
337 |
# Remove any cache-related parameters
|
338 |
kwargs_copy = kwargs.copy()
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
# Try with cache disabled
|
343 |
return model.chat_crop(tokenizer, image_path, **kwargs_copy)
|
344 |
except:
|
345 |
-
|
346 |
-
try:
|
347 |
-
# Force cache clearing by setting use_cache=False
|
348 |
-
kwargs_copy = kwargs.copy()
|
349 |
-
kwargs_copy['use_cache'] = False
|
350 |
-
return model.chat_crop(tokenizer, image_path, **kwargs_copy)
|
351 |
-
except:
|
352 |
-
raise Exception("Model compatibility issue: DynamicCache error. Please try again.")
|
353 |
else:
|
354 |
raise e
|
355 |
except Exception as e:
|
|
|
118 |
try:
|
119 |
return method(*args, **kwargs)
|
120 |
except:
|
121 |
+
# Try without any cache-related parameters
|
122 |
kwargs_copy = kwargs.copy()
|
123 |
+
# Remove any cache-related parameters that might cause issues
|
124 |
+
for key in list(kwargs_copy.keys()):
|
125 |
+
if 'cache' in key.lower():
|
126 |
+
del kwargs_copy[key]
|
127 |
return method(*args, **kwargs_copy)
|
128 |
else:
|
129 |
raise e
|
|
|
131 |
def direct_call(self, method_name, *args, **kwargs):
|
132 |
"""Direct call bypassing all cache mechanisms"""
|
133 |
try:
|
|
|
|
|
|
|
|
|
134 |
# Clear all caches first
|
135 |
self._clear_all_caches()
|
136 |
|
137 |
+
# Remove any cache-related parameters
|
138 |
+
kwargs_copy = kwargs.copy()
|
139 |
+
for key in list(kwargs_copy.keys()):
|
140 |
+
if 'cache' in key.lower():
|
141 |
+
del kwargs_copy[key]
|
142 |
+
|
143 |
# Make the call
|
144 |
method = getattr(self.model, method_name)
|
145 |
return method(*args, **kwargs_copy)
|
|
|
154 |
kwargs_copy = kwargs.copy()
|
155 |
|
156 |
# Remove any cache-related parameters
|
157 |
+
for key in list(kwargs_copy.keys()):
|
158 |
+
if 'cache' in key.lower():
|
159 |
+
del kwargs_copy[key]
|
160 |
|
161 |
# Clear caches
|
162 |
self._clear_all_caches()
|
|
|
193 |
device_map=device,
|
194 |
use_safetensors=True,
|
195 |
pad_token_id=tokenizer.eos_token_id,
|
|
|
196 |
torch_dtype=torch.float16 if device == 'cuda' else torch.float32
|
197 |
)
|
198 |
|
|
|
281 |
try:
|
282 |
# Remove any cache-related parameters
|
283 |
kwargs_copy = kwargs.copy()
|
284 |
+
for key in list(kwargs_copy.keys()):
|
285 |
+
if 'cache' in key.lower():
|
286 |
+
del kwargs_copy[key]
|
|
|
287 |
return model.chat(tokenizer, image_path, **kwargs_copy)
|
288 |
except:
|
289 |
+
raise Exception("Model compatibility issue: DynamicCache error. Please try again.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
290 |
else:
|
291 |
raise e
|
292 |
except Exception as e:
|
|
|
333 |
try:
|
334 |
# Remove any cache-related parameters
|
335 |
kwargs_copy = kwargs.copy()
|
336 |
+
for key in list(kwargs_copy.keys()):
|
337 |
+
if 'cache' in key.lower():
|
338 |
+
del kwargs_copy[key]
|
|
|
339 |
return model.chat_crop(tokenizer, image_path, **kwargs_copy)
|
340 |
except:
|
341 |
+
raise Exception("Model compatibility issue: DynamicCache error. Please try again.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
342 |
else:
|
343 |
raise e
|
344 |
except Exception as e:
|