ehagey commited on
Commit
581e3d6
·
verified ·
1 Parent(s): 444c9f3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +162 -0
app.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+
6
+ st.set_page_config(page_title="LLM API Budget Dashboard", layout="wide")
7
+
8
+ # Title and description
9
+ st.title("LLM API Budget Dashboard")
10
+ st.markdown("This dashboard helps you budget your API calls to various LLMs based on input and output tokens.")
11
+
12
+ # Define LLM models and their costs
13
+ llm_data = {
14
+ "GPT-4o": {"input_cost_per_m": 2.50, "output_cost_per_m": 10.00},
15
+ "Claude 3.7 Sonnet": {"input_cost_per_m": 3.00, "output_cost_per_m": 15.00},
16
+ "Gemini Flash 1.5-8b": {"input_cost_per_m": 0.038, "output_cost_per_m": 0.15},
17
+ "o3-mini": {"input_cost_per_m": 1.10, "output_cost_per_m": 4.40}
18
+ }
19
+
20
+ # Convert the LLM data to a DataFrame for displaying in a table
21
+ llm_df = pd.DataFrame([
22
+ {
23
+ "Model": model,
24
+ "Input Cost ($/M tokens)": data["input_cost_per_m"],
25
+ "Output Cost ($/M tokens)": data["output_cost_per_m"]
26
+ }
27
+ for model, data in llm_data.items()
28
+ ])
29
+
30
+ # Display LLM cost info
31
+ st.subheader("LLM Cost Information")
32
+ st.dataframe(llm_df, use_container_width=True)
33
+
34
+ # Create sidebar for inputs
35
+ st.sidebar.header("Configuration")
36
+
37
+ # Token input section
38
+ st.sidebar.subheader("Token Settings")
39
+ input_tokens = st.sidebar.number_input("Input Tokens", min_value=1, value=1000, step=100)
40
+ output_tokens = st.sidebar.number_input("Output Tokens", min_value=1, value=500, step=100)
41
+
42
+ # LLM selection
43
+ st.sidebar.subheader("Select LLMs")
44
+ selected_llms = st.sidebar.multiselect("Choose LLMs", options=list(llm_data.keys()), default=list(llm_data.keys()))
45
+
46
+ # Run count settings
47
+ st.sidebar.subheader("Run Count Settings")
48
+ uniform_runs = st.sidebar.checkbox("Run all LLMs the same number of times", value=True)
49
+
50
+ if uniform_runs:
51
+ uniform_run_count = st.sidebar.number_input("Number of runs for all LLMs", min_value=1, value=1, step=1)
52
+ run_counts = {llm: uniform_run_count for llm in selected_llms}
53
+ else:
54
+ st.sidebar.write("Set individual run counts for each LLM:")
55
+ run_counts = {}
56
+ for llm in selected_llms:
57
+ run_counts[llm] = st.sidebar.number_input(f"Runs for {llm}", min_value=1, value=1, step=1)
58
+
59
+ # Stability test settings
60
+ st.sidebar.subheader("Stability Test Settings")
61
+ stability_test = st.sidebar.checkbox("Enable stability testing", value=False)
62
+
63
+ if stability_test:
64
+ st.sidebar.write("Set stability iterations for selected LLMs:")
65
+ stability_iterations = {}
66
+ for llm in selected_llms:
67
+ stability_enabled = st.sidebar.checkbox(f"Test stability for {llm}", value=False)
68
+ if stability_enabled:
69
+ iterations = st.sidebar.number_input(f"Iterations for {llm}", min_value=2, value=10, step=1)
70
+ stability_iterations[llm] = iterations
71
+ else:
72
+ stability_iterations = {}
73
+
74
+ # Calculate costs
75
+ results = []
76
+
77
+ for llm in selected_llms:
78
+ base_runs = run_counts[llm]
79
+ stability_runs = stability_iterations.get(llm, 0)
80
+ total_runs = base_runs * (1 if stability_runs == 0 else stability_runs)
81
+
82
+ total_input_tokens = input_tokens * total_runs
83
+ total_output_tokens = output_tokens * total_runs
84
+
85
+ input_cost = (total_input_tokens / 1_000_000) * llm_data[llm]["input_cost_per_m"]
86
+ output_cost = (total_output_tokens / 1_000_000) * llm_data[llm]["output_cost_per_m"]
87
+ total_cost = input_cost + output_cost
88
+
89
+ results.append({
90
+ "Model": llm,
91
+ "Base Runs": base_runs,
92
+ "Stability Test Iterations": stability_iterations.get(llm, 0),
93
+ "Total Runs": total_runs,
94
+ "Total Input Tokens": total_input_tokens,
95
+ "Total Output Tokens": total_output_tokens,
96
+ "Input Cost ($)": input_cost,
97
+ "Output Cost ($)": output_cost,
98
+ "Total Cost ($)": total_cost
99
+ })
100
+
101
+ # Create DataFrame from results
102
+ results_df = pd.DataFrame(results)
103
+
104
+ # Main content
105
+ st.header("Cost Summary")
106
+ st.dataframe(results_df, use_container_width=True)
107
+
108
+ # Calculate overall totals
109
+ total_input_cost = results_df["Input Cost ($)"].sum()
110
+ total_output_cost = results_df["Output Cost ($)"].sum()
111
+ total_cost = results_df["Total Cost ($)"].sum()
112
+
113
+ # Display totals
114
+ col1, col2, col3 = st.columns(3)
115
+ col1.metric("Total Input Cost", f"${total_input_cost:.2f}")
116
+ col2.metric("Total Output Cost", f"${total_output_cost:.2f}")
117
+ col3.metric("Total API Cost", f"${total_cost:.2f}")
118
+
119
+ # Data visualization
120
+ st.header("Cost Visualization")
121
+
122
+ # Cost breakdown by model
123
+ fig1, ax1 = plt.subplots(figsize=(10, 6))
124
+ models = results_df["Model"]
125
+ input_costs = results_df["Input Cost ($)"]
126
+ output_costs = results_df["Output Cost ($)"]
127
+
128
+ x = np.arange(len(models))
129
+ width = 0.35
130
+
131
+ ax1.bar(x - width/2, input_costs, width, label='Input Cost')
132
+ ax1.bar(x + width/2, output_costs, width, label='Output Cost')
133
+
134
+ ax1.set_ylabel('Cost ($)')
135
+ ax1.set_title('Cost Breakdown by Model')
136
+ ax1.set_xticks(x)
137
+ ax1.set_xticklabels(models, rotation=45, ha='right')
138
+ ax1.legend()
139
+
140
+ fig1.tight_layout()
141
+ st.pyplot(fig1)
142
+
143
+ # Percentage of total cost by model
144
+ fig2, ax2 = plt.subplots(figsize=(8, 8))
145
+ ax2.pie(results_df["Total Cost ($)"], labels=results_df["Model"], autopct='%1.1f%%', startangle=90)
146
+ ax2.axis('equal')
147
+ ax2.set_title('Percentage of Total Cost by Model')
148
+ st.pyplot(fig2)
149
+
150
+ # Export options
151
+ st.header("Export Options")
152
+ csv = results_df.to_csv(index=False).encode('utf-8')
153
+ st.download_button(
154
+ label="Download Results as CSV",
155
+ data=csv,
156
+ file_name='llm_budget_results.csv',
157
+ mime='text/csv',
158
+ )
159
+
160
+ # Footer
161
+ st.markdown("---")
162
+ st.markdown("*Note: All costs are estimates based on the provided rates. Actual API costs may vary.*")