Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,9 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import json
|
4 |
import logging
|
5 |
-
import tempfile
|
6 |
|
7 |
# ══════════════════════════════════════════════════════════════════════════════
|
8 |
-
# 🎓
|
9 |
# ══════════════════════════════════════════════════════════════════════════════
|
10 |
|
11 |
try:
|
@@ -19,369 +18,162 @@ logging.basicConfig(level=logging.INFO)
|
|
19 |
logger = logging.getLogger(__name__)
|
20 |
|
21 |
# ══════════════════════════════════════════════════════════════════════════════
|
22 |
-
# 🔧 SIMPLE CLIENT
|
23 |
# ══════════════════════════════════════════════════════════════════════════════
|
24 |
|
25 |
def get_client():
|
26 |
-
"""Get HuggingFace
|
27 |
api_token = os.getenv("HF_API_TOKEN")
|
28 |
|
29 |
-
if not HF_AVAILABLE:
|
30 |
-
return None
|
31 |
-
|
32 |
-
if not api_token:
|
33 |
-
return None, "HF_API_TOKEN not set"
|
34 |
|
35 |
try:
|
36 |
-
# Following official docs - using provider parameter
|
37 |
client = InferenceClient(
|
38 |
provider="hf-inference",
|
39 |
api_key=api_token,
|
40 |
)
|
41 |
-
|
42 |
-
|
43 |
-
return None, f"Connection failed: {e}"
|
44 |
-
|
45 |
-
# Initialize client
|
46 |
-
CLIENT, STATUS = get_client()
|
47 |
-
logger.info(f"Client status: {STATUS}")
|
48 |
-
|
49 |
-
# Add debug info
|
50 |
-
if CLIENT:
|
51 |
-
logger.info("✅ Client successfully initialized with provider='hf-inference'")
|
52 |
-
|
53 |
-
# Test the client with a simple call
|
54 |
-
try:
|
55 |
-
test_result = CLIENT.fill_mask("The capital of France is [MASK].", model="google-bert/bert-base-uncased")
|
56 |
logger.info(f"✅ Client test successful: {type(test_result)}")
|
|
|
57 |
except Exception as e:
|
58 |
-
logger.error(f"❌ Client
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
logger.error(f"❌ Client initialization failed: {STATUS}")
|
63 |
|
64 |
# ══════════════════════════════════════════════════════════════════════════════
|
65 |
-
# 🤖
|
66 |
# ══════════════════════════════════════════════════════════════════════════════
|
67 |
|
68 |
-
def
|
69 |
-
"""
|
70 |
-
if
|
71 |
-
return "❌
|
72 |
-
|
73 |
-
if CLIENT is None:
|
74 |
-
return f"❌ Client not available: {STATUS}"
|
75 |
-
|
76 |
-
try:
|
77 |
-
# Handle audio input
|
78 |
-
if isinstance(audio_file, tuple):
|
79 |
-
# Convert numpy array to file if needed
|
80 |
-
sample_rate, audio_array = audio_file
|
81 |
-
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
|
82 |
-
import scipy.io.wavfile as wav
|
83 |
-
wav.write(tmp_file.name, sample_rate, audio_array)
|
84 |
-
audio_path = tmp_file.name
|
85 |
-
else:
|
86 |
-
audio_path = audio_file
|
87 |
-
|
88 |
-
# Official API call from docs
|
89 |
-
output = CLIENT.automatic_speech_recognition(
|
90 |
-
audio_path,
|
91 |
-
model="openai/whisper-large-v3"
|
92 |
-
)
|
93 |
-
|
94 |
-
return f"🎤 **Transcription:** {output.get('text', str(output))}"
|
95 |
-
|
96 |
-
except Exception as e:
|
97 |
-
return f"❌ Error: {str(e)}"
|
98 |
-
|
99 |
-
def chat_completion(messages_json):
|
100 |
-
"""Chat Completion - Using exact working model from your test."""
|
101 |
-
if not messages_json.strip():
|
102 |
-
return "❌ Please enter valid JSON messages"
|
103 |
-
|
104 |
-
if CLIENT is None:
|
105 |
-
return f"❌ Client not available: {STATUS}"
|
106 |
|
107 |
try:
|
108 |
-
messages =
|
109 |
-
|
110 |
-
# Using the EXACT same model and call that works in your test
|
111 |
completion = CLIENT.chat.completions.create(
|
112 |
-
model="Qwen/Qwen2.5-72B-Instruct",
|
113 |
messages=messages,
|
114 |
)
|
115 |
-
|
116 |
-
return f"🤖 **Response:** {completion.choices[0].message.content}"
|
117 |
-
|
118 |
-
except json.JSONDecodeError:
|
119 |
-
return "❌ Invalid JSON format"
|
120 |
except Exception as e:
|
121 |
-
return f"❌ Error: {str(e)}
|
122 |
|
123 |
-
def
|
124 |
-
"""Fill
|
125 |
-
if not
|
126 |
-
return "❌
|
127 |
|
128 |
-
if
|
129 |
-
return
|
130 |
|
131 |
try:
|
132 |
-
|
133 |
-
|
134 |
-
# Official API call from docs
|
135 |
-
result = CLIENT.fill_mask(
|
136 |
-
text,
|
137 |
-
model="google-bert/bert-base-uncased",
|
138 |
-
)
|
139 |
-
|
140 |
-
logger.info(f"fill_mask result type: {type(result)}, content: {result}")
|
141 |
-
|
142 |
if isinstance(result, list):
|
143 |
-
|
144 |
for i, pred in enumerate(result[:5], 1):
|
145 |
token = pred.get("token_str", "").strip()
|
146 |
score = pred.get("score", 0)
|
147 |
-
|
148 |
-
return
|
149 |
-
|
150 |
-
return f"🎭 **Result:** {result}"
|
151 |
-
|
152 |
except Exception as e:
|
153 |
-
|
154 |
-
return f"❌ Error: {str(e)}\n\n**Debug:** Model: google-bert/bert-base-uncased, Text: {text}"
|
155 |
|
156 |
-
def
|
157 |
-
"""
|
158 |
-
if not question or not context:
|
159 |
-
return "❌
|
160 |
-
|
161 |
-
if CLIENT is None:
|
162 |
-
return f"❌ Client not available: {STATUS}"
|
163 |
|
164 |
try:
|
165 |
-
# Official API call from docs
|
166 |
answer = CLIENT.question_answering(
|
167 |
question=question,
|
168 |
context=context,
|
169 |
model="deepset/roberta-base-squad2",
|
170 |
)
|
171 |
-
|
172 |
-
return f"💡 **Answer:** {answer.get('answer', str(answer))}\n📊 **Score:** {answer.get('score', 'N/A')}"
|
173 |
-
|
174 |
except Exception as e:
|
175 |
return f"❌ Error: {str(e)}"
|
176 |
|
177 |
-
def
|
178 |
-
"""Summarization
|
179 |
-
if not
|
180 |
-
return "❌
|
181 |
-
|
182 |
-
if CLIENT is None:
|
183 |
-
return f"❌ Client not available: {STATUS}"
|
184 |
|
185 |
try:
|
186 |
-
|
187 |
-
result = CLIENT.summarization(
|
188 |
-
text,
|
189 |
-
model="facebook/bart-large-cnn",
|
190 |
-
)
|
191 |
-
|
192 |
if isinstance(result, list) and result:
|
193 |
-
|
194 |
-
|
195 |
-
summary = str(result)
|
196 |
-
|
197 |
-
return f"📝 **Summary:** {summary}"
|
198 |
-
|
199 |
-
except Exception as e:
|
200 |
-
return f"❌ Error: {str(e)}"
|
201 |
-
|
202 |
-
def text_generation(prompt):
|
203 |
-
"""Text Generation - Using working Qwen model."""
|
204 |
-
if not prompt.strip():
|
205 |
-
return "❌ Prompt cannot be empty"
|
206 |
-
|
207 |
-
if CLIENT is None:
|
208 |
-
return f"❌ Client not available: {STATUS}"
|
209 |
-
|
210 |
-
try:
|
211 |
-
# Using the same model family that works in your test
|
212 |
-
completion = CLIENT.chat.completions.create(
|
213 |
-
model="Qwen/Qwen2.5-72B-Instruct",
|
214 |
-
messages=[{"role": "user", "content": prompt}],
|
215 |
-
)
|
216 |
-
|
217 |
-
return f"✍️ **Generated:** {completion.choices[0].message.content}"
|
218 |
-
|
219 |
-
except Exception as e:
|
220 |
-
return f"❌ Error: {str(e)}\n\n**Debug:** Model: Qwen/Qwen2.5-72B-Instruct, Prompt: {prompt}"
|
221 |
-
|
222 |
-
def image_classification(image_path):
|
223 |
-
"""Image Classification - Following official docs exactly."""
|
224 |
-
if image_path is None:
|
225 |
-
return "❌ Please upload an image"
|
226 |
-
|
227 |
-
if CLIENT is None:
|
228 |
-
return f"❌ Client not available: {STATUS}"
|
229 |
-
|
230 |
-
try:
|
231 |
-
# Official API call from docs
|
232 |
-
output = CLIENT.image_classification(
|
233 |
-
image_path,
|
234 |
-
model="google/vit-base-patch16-224"
|
235 |
-
)
|
236 |
-
|
237 |
-
if isinstance(output, list):
|
238 |
-
results = []
|
239 |
-
for i, pred in enumerate(output[:5], 1):
|
240 |
-
label = pred.get("label", "Unknown")
|
241 |
-
score = pred.get("score", 0)
|
242 |
-
results.append(f"{i}. **{label}** ({score:.1%})")
|
243 |
-
return "🖼️ **Classifications:**\n" + "\n".join(results)
|
244 |
-
else:
|
245 |
-
return f"🖼️ **Result:** {output}"
|
246 |
-
|
247 |
-
except Exception as e:
|
248 |
-
return f"❌ Error: {str(e)}"
|
249 |
-
|
250 |
-
def feature_extraction(text):
|
251 |
-
"""Feature Extraction - Following official docs exactly."""
|
252 |
-
if not text.strip():
|
253 |
-
return "❌ Text cannot be empty"
|
254 |
-
|
255 |
-
if CLIENT is None:
|
256 |
-
return f"❌ Client not available: {STATUS}"
|
257 |
-
|
258 |
-
try:
|
259 |
-
# Official API call from docs
|
260 |
-
result = CLIENT.feature_extraction(
|
261 |
-
text,
|
262 |
-
model="intfloat/multilingual-e5-large-instruct",
|
263 |
-
)
|
264 |
-
|
265 |
-
if isinstance(result, list) and result:
|
266 |
-
dim = len(result[0]) if result[0] else 0
|
267 |
-
sample = result[0][:5] if dim >= 5 else result[0]
|
268 |
-
return f"🧮 **Embeddings:** Dimension: {dim}\n**Sample:** {sample}..."
|
269 |
-
else:
|
270 |
-
return f"🧮 **Result:** {str(result)[:200]}..."
|
271 |
-
|
272 |
except Exception as e:
|
273 |
return f"❌ Error: {str(e)}"
|
274 |
|
275 |
# ══════════════════════════════════════════════════════════════════════════════
|
276 |
-
# 🎨
|
277 |
# ══════════════════════════════════════════════════════════════════════════════
|
278 |
|
279 |
-
with
|
|
|
280 |
|
281 |
-
gr.Markdown(""
|
282 |
-
# 🎓 AI Research & Academic Demo
|
283 |
-
### Simple HuggingFace Inference API Implementation
|
284 |
-
""")
|
285 |
|
286 |
-
# Status display
|
287 |
if CLIENT:
|
288 |
-
gr.Markdown("✅ **Status:** Connected
|
289 |
else:
|
290 |
-
gr.Markdown(
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
qa_btn = gr.Button("Answer")
|
342 |
-
qa_btn.click(question_answering, [qa_question, qa_context], qa_output)
|
343 |
-
|
344 |
-
# Summarization
|
345 |
-
with gr.TabItem("📝 Summarization"):
|
346 |
-
with gr.Row():
|
347 |
-
sum_input = gr.Textbox(
|
348 |
-
label="Text to Summarize",
|
349 |
-
lines=5,
|
350 |
-
value="Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention."
|
351 |
-
)
|
352 |
-
sum_output = gr.Textbox(label="Summary", lines=3)
|
353 |
-
sum_btn = gr.Button("Summarize")
|
354 |
-
sum_btn.click(summarization, sum_input, sum_output)
|
355 |
-
|
356 |
-
# Text Generation
|
357 |
-
with gr.TabItem("✍️ Text Generation"):
|
358 |
-
with gr.Row():
|
359 |
-
gen_input = gr.Textbox(
|
360 |
-
label="Prompt",
|
361 |
-
value="The future of AI research will be"
|
362 |
-
)
|
363 |
-
gen_output = gr.Textbox(label="Generated Text", lines=5)
|
364 |
-
gen_btn = gr.Button("Generate")
|
365 |
-
gen_btn.click(text_generation, gen_input, gen_output)
|
366 |
-
|
367 |
-
# Image Classification
|
368 |
-
with gr.TabItem("🖼️ Image Classification"):
|
369 |
-
with gr.Row():
|
370 |
-
img_input = gr.Image(type="filepath")
|
371 |
-
img_output = gr.Textbox(label="Classification", lines=5)
|
372 |
-
img_btn = gr.Button("Classify")
|
373 |
-
img_btn.click(image_classification, img_input, img_output)
|
374 |
-
|
375 |
-
# Feature Extraction
|
376 |
-
with gr.TabItem("🧮 Feature Extraction"):
|
377 |
-
with gr.Row():
|
378 |
-
fe_input = gr.Textbox(
|
379 |
-
label="Text",
|
380 |
-
value="This is a sample text for feature extraction."
|
381 |
-
)
|
382 |
-
fe_output = gr.Textbox(label="Features", lines=5)
|
383 |
-
fe_btn = gr.Button("Extract")
|
384 |
-
fe_btn.click(feature_extraction, fe_input, fe_output)
|
385 |
|
386 |
if __name__ == "__main__":
|
387 |
demo.launch(
|
|
|
2 |
import os
|
3 |
import json
|
4 |
import logging
|
|
|
5 |
|
6 |
# ══════════════════════════════════════════════════════════════════════════════
|
7 |
+
# 🎓 MINIMAL AI RESEARCH DEMO - GRADIO 5.0.1 COMPATIBLE
|
8 |
# ══════════════════════════════════════════════════════════════════════════════
|
9 |
|
10 |
try:
|
|
|
18 |
logger = logging.getLogger(__name__)
|
19 |
|
20 |
# ══════════════════════════════════════════════════════════════════════════════
|
21 |
+
# 🔧 SIMPLE CLIENT - VERIFIED WORKING
|
22 |
# ══════════════════════════════════════════════════════════════════════════════
|
23 |
|
24 |
def get_client():
|
25 |
+
"""Get HuggingFace client - exactly like your working example."""
|
26 |
api_token = os.getenv("HF_API_TOKEN")
|
27 |
|
28 |
+
if not HF_AVAILABLE or not api_token:
|
29 |
+
return None
|
|
|
|
|
|
|
30 |
|
31 |
try:
|
|
|
32 |
client = InferenceClient(
|
33 |
provider="hf-inference",
|
34 |
api_key=api_token,
|
35 |
)
|
36 |
+
# Test with exact same call as your working example
|
37 |
+
test_result = client.fill_mask("The capital of France is [MASK].", model="google-bert/bert-base-uncased")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
logger.info(f"✅ Client test successful: {type(test_result)}")
|
39 |
+
return client
|
40 |
except Exception as e:
|
41 |
+
logger.error(f"❌ Client failed: {e}")
|
42 |
+
return None
|
43 |
+
|
44 |
+
CLIENT = get_client()
|
|
|
45 |
|
46 |
# ══════════════════════════════════════════════════════════════════════════════
|
47 |
+
# 🤖 MINIMAL FUNCTIONS - AVOIDING GRADIO SCHEMA BUGS
|
48 |
# ══════════════════════════════════════════════════════════════════════════════
|
49 |
|
50 |
+
def run_chat(message):
|
51 |
+
"""Chat function - using your exact working model."""
|
52 |
+
if not CLIENT:
|
53 |
+
return "❌ Client not available"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
try:
|
56 |
+
messages = [{"role": "user", "content": message}]
|
|
|
|
|
57 |
completion = CLIENT.chat.completions.create(
|
58 |
+
model="Qwen/Qwen2.5-72B-Instruct",
|
59 |
messages=messages,
|
60 |
)
|
61 |
+
return completion.choices[0].message.content
|
|
|
|
|
|
|
|
|
62 |
except Exception as e:
|
63 |
+
return f"❌ Error: {str(e)}"
|
64 |
|
65 |
+
def run_fill_mask(text):
|
66 |
+
"""Fill mask - using your exact working approach."""
|
67 |
+
if not CLIENT:
|
68 |
+
return "❌ Client not available"
|
69 |
|
70 |
+
if "[MASK]" not in text:
|
71 |
+
return "❌ Text must contain [MASK]"
|
72 |
|
73 |
try:
|
74 |
+
result = CLIENT.fill_mask(text, model="google-bert/bert-base-uncased")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
if isinstance(result, list):
|
76 |
+
output = "🎭 **Predictions:**\n"
|
77 |
for i, pred in enumerate(result[:5], 1):
|
78 |
token = pred.get("token_str", "").strip()
|
79 |
score = pred.get("score", 0)
|
80 |
+
output += f"{i}. **{token}** ({score:.3f})\n"
|
81 |
+
return output
|
82 |
+
return str(result)
|
|
|
|
|
83 |
except Exception as e:
|
84 |
+
return f"❌ Error: {str(e)}"
|
|
|
85 |
|
86 |
+
def run_question_answering(question, context):
|
87 |
+
"""Q&A function."""
|
88 |
+
if not CLIENT or not question or not context:
|
89 |
+
return "❌ Client not available or missing input"
|
|
|
|
|
|
|
90 |
|
91 |
try:
|
|
|
92 |
answer = CLIENT.question_answering(
|
93 |
question=question,
|
94 |
context=context,
|
95 |
model="deepset/roberta-base-squad2",
|
96 |
)
|
97 |
+
return f"💡 **Answer:** {answer.get('answer', str(answer))}"
|
|
|
|
|
98 |
except Exception as e:
|
99 |
return f"❌ Error: {str(e)}"
|
100 |
|
101 |
+
def run_summarization(text):
|
102 |
+
"""Summarization function."""
|
103 |
+
if not CLIENT or len(text.split()) < 10:
|
104 |
+
return "❌ Client not available or text too short"
|
|
|
|
|
|
|
105 |
|
106 |
try:
|
107 |
+
result = CLIENT.summarization(text, model="facebook/bart-large-cnn")
|
|
|
|
|
|
|
|
|
|
|
108 |
if isinstance(result, list) and result:
|
109 |
+
return f"📝 **Summary:** {result[0].get('summary_text', str(result))}"
|
110 |
+
return f"📝 **Summary:** {str(result)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
except Exception as e:
|
112 |
return f"❌ Error: {str(e)}"
|
113 |
|
114 |
# ══════════════════════════════════════════════════════════════════════════════
|
115 |
+
# 🎨 MINIMAL GRADIO INTERFACE - SCHEMA-BUG PROOF
|
116 |
# ══════════════════════════════════════════════════════════════════════════════
|
117 |
|
118 |
+
# Create interface with minimal components to avoid schema bugs
|
119 |
+
with gr.Blocks(title="AI Research Demo") as demo:
|
120 |
|
121 |
+
gr.Markdown("# 🎓 AI Research Demo\n### Working HuggingFace Inference API")
|
|
|
|
|
|
|
122 |
|
|
|
123 |
if CLIENT:
|
124 |
+
gr.Markdown("✅ **Status:** Connected and tested successfully")
|
125 |
else:
|
126 |
+
gr.Markdown("❌ **Status:** Set HF_API_TOKEN environment variable")
|
127 |
+
|
128 |
+
# Chat Tab
|
129 |
+
gr.Markdown("## 💬 Chat with AI")
|
130 |
+
chat_input = gr.Textbox(label="Your Message", placeholder="Ask anything...")
|
131 |
+
chat_output = gr.Textbox(label="AI Response", lines=5)
|
132 |
+
chat_btn = gr.Button("Send", variant="primary")
|
133 |
+
chat_btn.click(run_chat, inputs=chat_input, outputs=chat_output)
|
134 |
+
|
135 |
+
gr.Markdown("---")
|
136 |
+
|
137 |
+
# Fill Mask
|
138 |
+
gr.Markdown("## 🎭 Fill Mask")
|
139 |
+
mask_input = gr.Textbox(
|
140 |
+
label="Text with [MASK]",
|
141 |
+
value="The capital of France is [MASK].",
|
142 |
+
placeholder="Enter text with [MASK] token"
|
143 |
+
)
|
144 |
+
mask_output = gr.Textbox(label="Predictions", lines=6)
|
145 |
+
mask_btn = gr.Button("Predict", variant="primary")
|
146 |
+
mask_btn.click(run_fill_mask, inputs=mask_input, outputs=mask_output)
|
147 |
+
|
148 |
+
gr.Markdown("---")
|
149 |
+
|
150 |
+
# Q&A
|
151 |
+
gr.Markdown("## ❓ Question Answering")
|
152 |
+
qa_question = gr.Textbox(label="Question", value="What is AI?")
|
153 |
+
qa_context = gr.Textbox(
|
154 |
+
label="Context",
|
155 |
+
lines=3,
|
156 |
+
value="Artificial Intelligence (AI) is the simulation of human intelligence in machines."
|
157 |
+
)
|
158 |
+
qa_output = gr.Textbox(label="Answer", lines=3)
|
159 |
+
qa_btn = gr.Button("Answer", variant="primary")
|
160 |
+
qa_btn.click(run_question_answering, inputs=[qa_question, qa_context], outputs=qa_output)
|
161 |
+
|
162 |
+
gr.Markdown("---")
|
163 |
+
|
164 |
+
# Summarization
|
165 |
+
gr.Markdown("## 📝 Text Summarization")
|
166 |
+
sum_input = gr.Textbox(
|
167 |
+
label="Text to Summarize",
|
168 |
+
lines=5,
|
169 |
+
value="Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention. The process involves feeding data into algorithms that learn patterns and make predictions or decisions without being explicitly programmed for each specific task."
|
170 |
+
)
|
171 |
+
sum_output = gr.Textbox(label="Summary", lines=3)
|
172 |
+
sum_btn = gr.Button("Summarize", variant="primary")
|
173 |
+
sum_btn.click(run_summarization, inputs=sum_input, outputs=sum_output)
|
174 |
+
|
175 |
+
gr.Markdown("---")
|
176 |
+
gr.Markdown("**🔧 Setup:** `export HF_API_TOKEN=your_token_here`")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
if __name__ == "__main__":
|
179 |
demo.launch(
|