Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
import pandas as pd | |
# Load your synthetic profitability dataset | |
df = pd.read_csv('synthetic_profit.csv') | |
# Initialize the TAPEX small model fine-tuned on WikiSQL | |
MODEL_ID = "microsoft/tapex-small-finetuned-wikisql" | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID) | |
# Build a table-QA pipeline | |
table_qa = pipeline( | |
"table-question-answering", | |
model=model, | |
tokenizer=tokenizer, | |
framework="pt", | |
device=-1 # set to 0 if you enable GPU in your Space | |
) | |
def answer_profitability(question): | |
table = df.to_dict(orient="records") | |
try: | |
out = table_qa(table=table, query=question) | |
return out.get("answer", "No answer found.") | |
except Exception as e: | |
return f"Error: {e}" | |
# Gradio interface | |
iface = gr.Interface( | |
fn=answer_profitability, | |
inputs=gr.Textbox(lines=2, placeholder="Ask a question about profitability…"), | |
outputs="text", | |
title="SAP Profitability Q&A (TAPEX-Small)", | |
description=""" | |
Ask free-form questions on the synthetic profitability dataset. | |
Powered end-to-end by microsoft/tapex-small-finetuned-wikisql. | |
""" | |
) | |
if __name__ == "__main__": | |
iface.launch(server_name="0.0.0.0", server_port=7860) | |