Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -128,10 +128,13 @@ lang_id = {
|
|
| 128 |
def translation_text(source_lang, target_lang, user_input):
|
| 129 |
src_lang = lang_id[source_lang]
|
| 130 |
trg_lang = lang_id[target_lang]
|
|
|
|
| 131 |
tokenizer.src_lang = src_lang
|
| 132 |
with torch.no_grad():
|
| 133 |
-
encoded_input = tokenizer(user_input, return_tensors="pt")
|
|
|
|
| 134 |
generated_tokens = translation_model.generate(encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
|
|
|
| 135 |
translated_text = tokenizer.batch_decode(
|
| 136 |
generated_tokens, skip_special_tokens=True)[0]
|
| 137 |
return translated_text
|
|
|
|
| 128 |
def translation_text(source_lang, target_lang, user_input):
|
| 129 |
src_lang = lang_id[source_lang]
|
| 130 |
trg_lang = lang_id[target_lang]
|
| 131 |
+
print(trg_lang)
|
| 132 |
tokenizer.src_lang = src_lang
|
| 133 |
with torch.no_grad():
|
| 134 |
+
encoded_input = tokenizer(user_input, return_tensors="pt")
|
| 135 |
+
print(encoded_input)
|
| 136 |
generated_tokens = translation_model.generate(encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
| 137 |
+
print(generated_tokens)
|
| 138 |
translated_text = tokenizer.batch_decode(
|
| 139 |
generated_tokens, skip_special_tokens=True)[0]
|
| 140 |
return translated_text
|