Spaces:
Running
Running
import gradio as gr | |
from openai import OpenAI | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
SYSTEM_PROMPT = os.getenv("XTRNPMT") | |
API_BASE_URL = "https://api.featherless.ai/v1" | |
FEATHERLESS_API_KEY = os.getenv("FEATHERLESS_API_KEY") | |
FEATHERLESS_MODEL = "darkc0de/XortronCriminalComputingConfig" | |
if not FEATHERLESS_API_KEY: | |
print("WARNING: FEATHERLESS_API_KEY environment variable is not set.") | |
try: | |
if not FEATHERLESS_API_KEY: | |
raise ValueError("FEATHERLESS_API_KEY is not set. Please set it as an environment variable or a secret in your deployment environment.") | |
client = OpenAI( | |
base_url=API_BASE_URL, | |
api_key=FEATHERLESS_API_KEY | |
) | |
print(f"OpenAI client initialized with base_url: {API_BASE_URL} for Featherless AI, model: {FEATHERLESS_MODEL}") | |
except Exception as e: | |
print(f"Error initializing OpenAI client with base_url '{API_BASE_URL}': {e}") | |
raise RuntimeError( | |
"Could not initialize OpenAI client. " | |
f"Please check the API base URL ('{API_BASE_URL}'), your Featherless AI API key, model ID, " | |
f"and ensure the server is accessible. Original error: {e}" | |
) | |
def respond(message, history): | |
""" | |
This function processes the user's message and the chat history to generate a response | |
from the language model using the Featherless AI API (compatible with OpenAI's API), | |
including a static system prompt. | |
Args: | |
message (str): The latest message from the user. | |
history (list of lists): A list where each inner list contains a pair of | |
[user_message, ai_message]. | |
Yields: | |
str: The generated response token by token (for streaming). | |
""" | |
messages = [{"role": "system", "content": SYSTEM_PROMPT}] | |
for user_message, ai_message in history: | |
if user_message: | |
messages.append({"role": "user", "content": user_message}) | |
if ai_message: | |
messages.append({"role": "assistant", "content": ai_message}) | |
messages.append({"role": "user", "content": message}) | |
response_text = "" | |
try: | |
stream = client.chat.completions.create( | |
messages=messages, | |
model=FEATHERLESS_MODEL, | |
stream=True, | |
) | |
for chunk in stream: | |
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None: | |
token = chunk.choices[0].delta.content | |
response_text += token | |
yield response_text | |
elif chunk.choices and chunk.choices[0].message and chunk.choices[0].message.content is not None: | |
token = chunk.choices[0].message.content | |
response_text += token | |
yield response_text | |
except Exception as e: | |
error_message = f"An error occurred during model inference with Featherless AI: {e}" | |
print(error_message) | |
yield error_message | |
kofi_script = """ | |
<script src='https://storage.ko-fi.com/cdn/scripts/overlay-widget.js'></script> | |
<script> | |
kofiWidgetOverlay.draw('sonnydesorbo', { | |
'type': 'floating-chat', | |
'floating-chat.donateButton.text': 'Support me', | |
'floating-chat.donateButton.background-color': '#00b9fe', | |
'floating-chat.donateButton.text-color': '#fff' | |
}); | |
</script> | |
""" | |
kofi_button_html = """ | |
<div style="text-align: center; padding: 20px;"> | |
<a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'> | |
<img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi5.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' /> | |
</a> | |
</div> | |
""" | |
donation_solicitation_html = """ | |
<div style="text-align: center; font-size: x-small; margin-bottom: 5px;"> | |
The Cybernetic Criminal Computing Corporation presents: XORTRON, free of charge, unlimited, no login, no signup, no bullshit. Im sure even a low-life deadbeat freeloader like yourself can at least throw some spare change right? - Support Xortron @ ko-fi.com/xortron<br> | |
</div> | |
""" | |
custom_css = """ | |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap'); | |
body, .gradio-container { | |
font-family: 'Orbitron', sans-serif !important; | |
} | |
.gr-button { font-family: 'Orbitron', sans-serif !important; } | |
.gr-input { font-family: 'Orbitron', sans-serif !important; } | |
.gr-label { font-family: 'Orbitron', sans-serif !important; } | |
.gr-chatbot .message { font-family: 'Orbitron', sans-serif !important; } | |
""" | |
with gr.Blocks(theme="dark", head=kofi_script, css=custom_css) as demo: | |
gr.ChatInterface( | |
fn=respond, # The function to call when a message is sent | |
chatbot=gr.Chatbot( # Configure the chatbot display area | |
height=800, # Set the height of the chat history display to 800px | |
label="Xortron - Criminal Computing" # Set the label | |
) | |
) | |
gr.HTML(donation_solicitation_html) | |
gr.HTML(kofi_button_html) | |
if __name__ == "__main__": | |
if not FEATHERLESS_API_KEY: | |
print("\nCRITICAL ERROR: FEATHERLESS_API_KEY is not set.") | |
print("Please ensure it's set as a secret in your Hugging Face Space settings or as an environment variable.\n") | |
try: | |
demo.queue(default_concurrency_limit=1) | |
demo.launch(show_api=False, share=True) | |
except NameError as ne: | |
print(f"Gradio demo could not be launched. 'client' might not have been initialized: {ne}") | |
except RuntimeError as re: | |
print(f"Gradio demo could not be launched due to an error during client initialization: {re}") | |
except Exception as e: | |
print(f"An unexpected error occurred when trying to launch Gradio demo: {e}") |