# import torch # import gradio as gr # from transformers import AutoModelForCausalLM, AutoTokenizer # # Load the model and tokenizer # MODEL_NAME = "sarvamai/sarvam-1" # tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) # model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") # model.eval() # def respond(message, history, max_tokens, temperature, top_p): # # Convert chat history to format # messages = [{"role": "system", "content": "You are a friendly AI assistant."}] # for val in history: # if val[0]: # messages.append({"role": "user", "content": val[0]}) # if val[1]: # messages.append({"role": "assistant", "content": val[1]}) # messages.append({"role": "user", "content": message}) # # Tokenize and generate response # inputs = tokenizer.apply_chat_template(messages, tokenize=False) # input_tokens = tokenizer(inputs, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") # output_tokens = model.generate( # **input_tokens, # max_new_tokens=max_tokens, # temperature=temperature, # top_p=top_p, # pad_token_id=tokenizer.pad_token_id, # eos_token_id=tokenizer.eos_token_id, # ) # response = tokenizer.decode(output_tokens[0], skip_special_tokens=True) # return response # # Define Gradio Chat Interface # demo = gr.ChatInterface( # fn=respond, # additional_inputs=[ # gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max Tokens"), # gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), # gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"), # ], # title="Sarvam-1 Chat Interface", # description="Chat with the Sarvam-1 language model" # ) # if __name__ == "__main__": # demo.launch() import torch import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer MODEL_NAME = "sarvamai/sarvam-1" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") model.eval() def respond(message, history, max_tokens, temperature, top_p): # Convert chat history to format messages = [{"role": "system", "content": "You are a friendly AI assistant."}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) # Tokenize and generate response inputs = tokenizer.apply_chat_template(messages, tokenize=False) input_tokens = tokenizer(inputs, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") output_tokens = model.generate( **input_tokens, max_new_tokens=max_tokens, temperature=temperature, top_p=top_p, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, ) response = tokenizer.decode(output_tokens[0], skip_special_tokens=True) return response # Define Gradio Chat Interface demo = gr.ChatInterface( fn=respond, additional_inputs=[ gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max Tokens"), gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"), ], title="Sarvam-1 Chat Interface", description="Chat with the Sarvam-1 language model" ) if __name__ == "__main__": demo.launch()