Spaces:
Sleeping
Sleeping
File size: 7,192 Bytes
4cfde22 6888520 4cfde22 261e4f0 6888520 4cfde22 6888520 8196568 6888520 4cfde22 6888520 4cfde22 6888520 ee5803c 6888520 ee5803c 6888520 37a12f4 595b02f 4cfde22 6888520 b25dddd 6888520 bd77084 6888520 b25dddd 6888520 b25dddd 6888520 4cfde22 6888520 37a12f4 6888520 bd77084 37a12f4 6888520 37a12f4 6888520 37a12f4 6888520 37a12f4 4cfde22 6888520 37a12f4 4cfde22 409e026 6888520 409e026 ee5803c 6888520 37a12f4 6888520 37a12f4 4cfde22 6888520 4cfde22 6888520 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import os
import logging
import traceback
from typing import List, Tuple
import gradio as gr
from dotenv import load_dotenv
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
class RAGChatbot:
def __init__(self, document_path):
"""
Initialize RAG Chatbot with document vectorization
:param document_path: Path to the input document
"""
self.openai_api_key = os.getenv('OPENAI_API_KEY')
if not self.openai_api_key:
raise ValueError("OpenAI API Key is not set. Please add it to environment variables.")
self.document_path = document_path
self.vectorstore = self._load_or_create_vector_store()
self.qa_system = self._create_qa_system()
def _load_or_create_vector_store(self):
"""
Load existing FAISS index or create a new one
:return: FAISS vector store
"""
try:
embeddings = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
# Check if index exists
if os.path.exists('faiss_index'):
logger.info("Loading existing vector store...")
return FAISS.load_local('faiss_index', embeddings)
# Create new vector store
logger.info("Creating new vector store...")
loader = TextLoader(self.document_path, encoding='utf-8')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=3000,
chunk_overlap=600,
separators=["\n\n\n", "\n\n", "\n", ".", " ", ""]
)
texts = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(texts, embeddings)
# Ensure faiss_index directory exists
os.makedirs('faiss_index', exist_ok=True)
vectorstore.save_local('faiss_index')
return vectorstore
except Exception as e:
logger.error(f"Vector store creation error: {e}")
logger.error(traceback.format_exc())
raise
def _create_qa_system(self):
"""
Create Question-Answering system with custom prompt
:return: RetrievalQA chain
"""
custom_prompt = PromptTemplate(
input_variables=["context", "question"],
template="""You are an expert AI assistant for Beeline Uzbekistan tariffs.
Provide clear, precise answers based on the context.
Respond in the language of the question.
Context: {context}
Question: {question}
Comprehensive Answer:"""
)
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=self.openai_api_key,
temperature=0.1
)
return RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=self.vectorstore.as_retriever(
search_type="mmr",
search_kwargs={"k": 4, "fetch_k": 10}
),
chain_type_kwargs={"prompt": custom_prompt}
)
def chat(self, message: str, history: List[Tuple[str, str]]) -> str:
"""
Main chat method with multilingual support
:param message: User input message
:param history: Chat history
:return: Bot response
"""
# Handle initial greeting
if message.lower() in ['init', 'start', 'begin']:
return "Assalomu alaykum! 📱 Beeline tarifları haqida qanday ma'lumot kerak? (Hello! What Beeline tariff information do you need?)"
# Multilingual greeting handling
greetings = {
'uz': ['salom', 'assalomu alaykum', 'hammaga salom'],
'ru': ['привет', 'здравствуйте', 'hi', 'hello'],
'en': ['hi', 'hello', 'hey']
}
for lang, greeting_list in greetings.items():
if message.lower() in greeting_list:
return {
'uz': "Salom! Sizga qanday yordam bera olaman? 🤖",
'ru': "Привет! Чем могу помочь? 🤖",
'en': "Hello! How can I assist you today about Beeline tariffs? 🤖"
}[lang]
try:
# Query the document
response = self.qa_system.run(message)
# Add conversational touch
response += "\n\n📞 Yana bir nima so'rashingizni xohlar edingizmi? (Would you like to ask anything else?)"
return response
except Exception as e:
logger.error(f"Chat processing error: {e}")
logger.error(traceback.format_exc())
return "Kechirasiz, so'rovingizni qayta ishlashda xatolik yuz berdi. Iltimos, qaytadan urinib ko'ring. (Sorry, there was an error processing your request. Please try again.)"
def create_demo() -> gr.Interface:
"""
Create Gradio interface for the chatbot
:return: Gradio demo
"""
# Initialize chatbot with document
chatbot = RAGChatbot('12.txt')
with gr.Blocks() as demo:
gr.Markdown("# 📱 Beeline Uzbekistan Tariff Assistant")
chatbot_interface = gr.Chatbot(
height=600,
show_copy_button=True,
avatar_images=["🤔", "🤖"] # User and Bot avatars
)
with gr.Row():
msg = gr.Textbox(
show_label=False,
placeholder="Beeline tariffları haqida so'rang... (Ask about Beeline tariffs...)",
container=False
)
submit = gr.Button("Yuborish (Send)", variant="primary")
clear = gr.Button("Yangi suhbat (New Chat)")
def respond(message, chat_history):
bot_message = chatbot.chat(message, chat_history)
chat_history.append((message, bot_message))
return "", chat_history
def init_chat():
initial_greeting = chatbot.chat("init", [])
return [("", initial_greeting)]
# Event handlers
submit.click(respond, [msg, chatbot_interface], [msg, chatbot_interface])
msg.submit(respond, [msg, chatbot_interface], [msg, chatbot_interface])
clear.click(fn=init_chat, inputs=None, outputs=chatbot_interface)
demo.load(init_chat, inputs=None, outputs=chatbot_interface)
return demo
# Main execution
demo = create_demo()
if __name__ == "__main__":
demo.launch(debug=True) |