IndraneelKumar commited on
Commit
eea93d3
·
1 Parent(s): 804054e

Added New UI

Browse files
frontend/app.py CHANGED
@@ -42,223 +42,199 @@ feed_authors = [f["author"] for f in feeds]
42
  # Custom CSS for modern UI
43
  # -----------------------
44
  CUSTOM_CSS = """
45
- /* Modern, clean UI with subtle glass and gradients */
46
  :root {
47
- --radius-xl: 16px;
48
- --radius-lg: 14px;
49
- --radius-md: 12px;
50
- --shadow-lg: 0 18px 35px rgba(2, 6, 23, 0.10);
51
- --shadow-md: 0 10px 22px rgba(2, 6, 23, 0.08);
52
  --border: 1px solid rgba(2, 6, 23, 0.08);
53
- --primary: #6366f1; /* indigo-500 */
54
- --primary-600: #4f46e5;
55
- --primary-700: #4338ca;
56
- --slate-900: #0f172a;
57
- --slate-800: #1e293b;
58
- --slate-700: #334155;
59
- --slate-600: #475569;
60
- --slate-500: #64748b;
61
- --slate-200: #e2e8f0;
62
- --slate-100: #f1f5f9;
63
- --bg: radial-gradient(1200px 800px at 0% 0%, #f6f8ff 0%, #ffffff 40%);
64
  }
65
 
66
- .dark:root {
67
- --border: 1px solid rgba(255, 255, 255, 0.08);
68
- --bg: radial-gradient(1200px 800px at 0% 0%, #0b1220 0%, #0a0f1c 40%);
69
  }
70
 
71
- .gradio-container, body {
72
- font-family: ui-sans-serif, system-ui, -apple-system, "Segoe UI", Roboto, Inter, Helvetica, Arial, "Apple Color Emoji", "Segoe UI Emoji";
73
- background: var(--bg);
74
- color: var(--slate-900);
75
- }
76
- .dark .gradio-container, .dark body { color: #e5e7eb; }
77
-
78
- /* Header */
79
- #app-header {
80
- background: linear-gradient(135deg, #0ea5e9 0%, #6366f1 50%, #7c3aed 100%);
81
- color: white;
82
- padding: 28px 28px;
83
- border-radius: var(--radius-xl);
84
- box-shadow: var(--shadow-lg);
85
- margin-bottom: 18px;
86
- }
87
- #app-header h1 {
88
- font-size: 34px;
89
- line-height: 1.1;
90
- margin: 0 0 8px 0;
91
- letter-spacing: -0.02em;
92
- }
93
- #app-header p {
94
- margin: 0;
95
- opacity: 0.95;
96
  }
97
 
98
- /* Panels */
99
- .panel {
100
- backdrop-filter: saturate(160%) blur(8px);
101
- background: rgba(255, 255, 255, 0.75);
102
  border: var(--border);
103
- border-radius: var(--radius-xl);
104
- padding: 18px;
105
- box-shadow: var(--shadow-md);
106
  }
107
- .dark .panel {
108
- background: rgba(2, 6, 23, 0.55);
 
 
109
  }
110
 
111
- /* Segmented control (radio) */
112
- .segmented .wrap {
113
- display: grid !important;
114
- grid-auto-flow: column;
115
- grid-auto-columns: 1fr;
116
- gap: 8px;
117
- background: var(--slate-100);
118
- border: var(--border);
119
- border-radius: 999px;
120
- padding: 6px;
121
- }
122
- .dark .segmented .wrap { background: rgba(255, 255, 255, 0.06); }
123
- .segmented input[type="radio"] { display: none; }
124
- .segmented label {
125
- border-radius: 999px !important;
126
- padding: 10px 14px !important;
127
- text-align: center;
128
- border: none !important;
129
- transition: all .18s ease;
130
- color: var(--slate-700);
131
- background: transparent;
132
- }
133
- .dark .segmented label { color: #cbd5e1; }
134
- .segmented input[type="radio"]:checked + label {
135
- background: white !important;
136
- color: var(--slate-900) !important;
137
- box-shadow: 0 8px 18px rgba(2, 6, 23, 0.08);
138
- }
139
- .dark .segmented input[type="radio"]:checked + label {
140
- background: var(--slate-800) !important;
141
- color: #e5e7eb !important;
142
- }
143
-
144
- /* Form controls polish */
145
- .panel .gr-form .gr-block, .panel .gr-form { gap: 10px; }
146
- .panel .gr-textbox textarea, .panel .gr-textbox input,
147
- .panel .gr-dropdown input, .panel .gr-dropdown .wrap,
148
- .panel .gr-slider input {
149
- border-radius: 12px !important;
150
- }
151
-
152
- /* Submit button */
153
- .submit-button .gr-button {
154
- background: linear-gradient(135deg, var(--primary), var(--primary-600));
155
- border: none;
156
- color: white;
157
- border-radius: 12px;
158
- box-shadow: 0 10px 24px rgba(79, 70, 229, 0.25);
159
- padding: 12px 16px;
160
  }
161
- .submit-button .gr-button:hover {
162
- transform: translateY(-1px);
163
- box-shadow: 0 14px 28px rgba(79, 70, 229, 0.32);
 
 
 
 
164
  }
165
 
166
- /* Output area */
167
- .output-panel {
168
- padding: 0;
 
169
  }
170
- .model-info {
171
- margin-top: 8px;
 
 
 
172
  }
173
- .model-info .content {
174
- display: inline-flex;
175
- align-items: center;
176
- gap: 10px;
 
 
 
 
 
 
 
 
 
 
 
177
  font-weight: 600;
178
- background: linear-gradient(135deg, #dcfce7, #dbeafe);
179
- color: #065f46;
180
- padding: 8px 12px;
181
- border-radius: 999px;
182
- border: var(--border);
183
  }
184
- .dark .model-info .content {
185
- background: linear-gradient(135deg, rgba(22, 101, 52, 0.35), rgba(30, 58, 138, 0.35));
186
- color: #d1fae5;
 
 
187
  }
188
 
189
- /* Results grid and cards */
190
- .results-grid {
191
- display: grid;
192
- grid-template-columns: repeat(auto-fill, minmax(320px, 1fr));
193
- gap: 14px;
194
  padding: 14px;
195
  }
196
- .article-card {
 
 
 
 
 
 
 
 
 
197
  border: var(--border);
198
- border-radius: var(--radius-lg);
199
- background: rgba(255, 255, 255, 0.9);
200
- padding: 16px;
201
- box-shadow: var(--shadow-md);
 
 
 
 
202
  }
203
- .dark .article-card {
204
- background: rgba(2, 6, 23, 0.6);
 
 
 
 
205
  }
206
- .article-card__title {
207
- font-size: 18px;
208
- margin: 0 0 8px 0;
209
- color: var(--slate-900);
210
  }
211
- .dark .article-card__title { color: #e5e7eb; }
212
- .article-card__meta {
 
 
 
 
 
 
 
213
  display: flex;
214
- flex-wrap: wrap;
215
- gap: 8px;
216
- margin-bottom: 8px;
 
 
 
 
 
217
  }
218
- .chip {
219
- font-size: 12px;
220
- padding: 6px 10px;
221
  border-radius: 999px;
222
- background: var(--slate-100);
223
- border: var(--border);
224
- color: var(--slate-700);
225
  }
226
- .dark .chip { background: rgba(255, 255, 255, 0.06); color: #cbd5e1; }
227
- .article-card__authors {
228
- color: var(--slate-600);
229
- font-size: 14px;
230
- margin-bottom: 10px;
231
  }
232
- .dark .article-card__authors { color: #94a3b8; }
233
- .article-card__link {
234
- display: inline-flex;
235
- align-items: center;
 
 
 
 
 
 
 
236
  gap: 8px;
237
- color: var(--primary-600);
238
- text-decoration: none;
239
- font-weight: 600;
240
  }
241
- .article-card__link:hover { color: var(--primary-700); }
242
 
243
- /* AI answer card */
244
- .answer-card {
245
- margin: 14px;
246
- border: var(--border);
247
- border-radius: var(--radius-xl);
248
- padding: 18px;
249
- background: linear-gradient(180deg, rgba(99, 102, 241, 0.06), rgba(124, 58, 237, 0.06));
250
- box-shadow: var(--shadow-md);
251
  }
252
- .dark .answer-card {
253
- background: linear-gradient(180deg, rgba(79, 70, 229, 0.18), rgba(124, 58, 237, 0.18));
 
 
 
 
 
 
 
 
254
  }
255
- .answer-card .markdown-body table {
256
- width: 100%;
257
- border-collapse: collapse;
258
  }
259
- .answer-card .markdown-body th, .answer-card .markdown-body td {
260
- border: 1px solid rgba(0,0,0,0.05);
261
- padding: 6px 10px;
 
 
 
262
  }
263
  """
264
 
@@ -381,7 +357,16 @@ def handle_search_articles(query_text, feed_name, feed_author, title_keywords, l
381
  if not results:
382
  return "No results found."
383
 
384
- html_output = "<div class='results-grid'>"
 
 
 
 
 
 
 
 
 
385
  for item in results:
386
  title = item.get("title", "No title")
387
  feed_n = item.get("feed_name", "N/A")
@@ -389,19 +374,15 @@ def handle_search_articles(query_text, feed_name, feed_author, title_keywords, l
389
  authors = ", ".join(item.get("article_author") or ["N/A"])
390
  url = item.get("url", "#")
391
  html_output += (
392
- "<div class='article-card'>"
393
- f" <h3 class='article-card__title'>{title}</h3>"
394
- f" <div class='article-card__meta'>"
395
- f" <span class='chip'>Newsletter: {feed_n}</span>"
396
- f" <span class='chip'>Author: {feed_a}</span>"
397
- f" </div>"
398
- f" <div class='article-card__authors'><b>Article Authors:</b> {authors}</div>"
399
- f" <a class='article-card__link' href='{url}' target='_blank' rel='noopener noreferrer'>"
400
- f" Open Article →"
401
- f" </a>"
402
- "</div>"
403
  )
404
- html_output += "</div>"
405
  return html_output
406
 
407
  except Exception as e:
@@ -450,30 +431,29 @@ def handle_ai_question_streaming(
450
 
451
  try:
452
  answer_html = ""
453
- model_info = f"<div class='content'>Provider: {provider}</div>"
454
 
455
  for _, (event_type, content) in enumerate(call_ai(payload, streaming=True)):
456
  if event_type == "text":
457
- # Convert markdown to HTML
458
  html_content = markdown.markdown(content, extensions=["tables"])
459
- answer_html = f"<div class='answer-card'><div class='markdown-body'>{html_content}</div></div>"
460
  yield answer_html, model_info
461
 
462
  elif event_type == "model":
463
- model_info = f"<div class='content'>Provider: {provider} | Model: {content}</div>"
464
  yield answer_html, model_info
465
 
466
  elif event_type == "truncated":
467
- answer_html += f"<div class='answer-card'><div style='color:#ff8800; font-weight:700;'>⚠️ {content}</div></div>"
468
  yield answer_html, model_info
469
 
470
  elif event_type == "error":
471
- error_html = f"<div class='answer-card'><div style='color:#ef4444; font-weight:700;'>❌ {content}</div></div>"
472
  yield error_html, model_info
473
  break
474
 
475
  except Exception as e:
476
- error_html = "<div class='answer-card'><div style='color:#ef4444;'>Error: {}</div></div>".format(str(e))
477
  yield error_html, model_info
478
 
479
 
@@ -511,19 +491,19 @@ def handle_ai_question_non_streaming(query_text, feed_name, feed_author, limit,
511
 
512
  try:
513
  answer_html = ""
514
- model_info = f"<div class='content'>Provider: {provider}</div>"
515
 
516
  for event_type, content in call_ai(payload, streaming=False):
517
  if event_type == "text":
518
  html_content = markdown.markdown(content, extensions=["tables"])
519
- answer_html = f"<div class='answer-card'><div class='markdown-body'>{html_content}</div></div>"
520
  elif event_type == "model":
521
- model_info = f"<div class='content'>Provider: {provider} | Model: {content}</div>"
522
  elif event_type == "truncated":
523
- answer_html += f"<div class='answer-card'><div style='color:#ff8800; font-weight:700;'>⚠️ {content}</div></div>"
524
  elif event_type == "error":
525
  return (
526
- f"<div class='answer-card'><div style='color:#ef4444; font-weight:700;'>❌ {content}</div></div>",
527
  model_info,
528
  )
529
 
@@ -531,8 +511,8 @@ def handle_ai_question_non_streaming(query_text, feed_name, feed_author, limit,
531
 
532
  except Exception as e:
533
  return (
534
- f"<div class='answer-card'><div style='color:#ef4444;'>Error: {str(e)}</div></div>",
535
- f"<div class='content'>Provider: {provider}</div>",
536
  )
537
 
538
 
@@ -549,209 +529,176 @@ def update_model_choices(provider):
549
 
550
 
551
  # -----------------------
552
- # Gradio UI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553
  # -----------------------
554
- with gr.Blocks(title="AI Search Engine for Articles", theme=gr.themes.Soft(), css=CUSTOM_CSS) as demo:
555
- # Header
556
- gr.HTML(
557
- "<div id='app-header'>"
558
- " <h1>📰 AI Search Engine for Articles</h1>"
559
- " <p>Search Substack, Medium and top publications content or ask an AI across your feeds — fast and delightful.</p>"
560
- "</div>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
  )
562
 
563
- with gr.Row():
564
- with gr.Column(scale=5):
565
- with gr.Group(elem_classes="panel"):
566
- gr.Markdown("#### Mode")
567
- search_type = gr.Radio(
568
- choices=["Search Articles", "Ask the AI"],
569
- value="Search Articles",
570
- label="",
571
- info="Choose between searching for articles or asking AI questions",
572
- elem_classes="segmented",
 
 
573
  )
574
-
575
- with gr.Accordion("Filters", open=True):
576
- query_text = gr.Textbox(
577
- label="Query",
578
- placeholder="Type your query here...",
579
- lines=4,
580
- )
581
- feed_author = gr.Dropdown(
582
  choices=[""] + feed_authors, label="Author (optional)", value=""
583
  )
584
- feed_name = gr.Dropdown(
585
  choices=[""] + feed_names, label="Newsletter (optional)", value=""
586
  )
587
- title_keywords = gr.Textbox(
588
- label="Title Keywords (optional)",
 
589
  placeholder="Filter by words in the title",
590
- visible=True,
591
  )
592
- limit = gr.Slider(
593
- minimum=1, maximum=20, step=1, label="Number of results", value=5, visible=True
594
  )
595
-
596
- with gr.Accordion("⚙️ LLM Settings", open=True):
597
- with gr.Group(visible=False) as llm_options:
598
- provider = gr.Dropdown(
599
- choices=["OpenRouter", "HuggingFace", "OpenAI"],
600
- label="Select LLM Provider",
601
- value="OpenRouter",
602
- )
603
- model = gr.Dropdown(
604
- choices=get_models_for_provider("OpenRouter"),
605
- label="Select Model",
606
- value="Automatic Model Selection (Model Routing)",
607
- )
608
- streaming_mode = gr.Radio(
609
- choices=["Streaming", "Non-Streaming"],
610
- value="Streaming",
611
- label="Answer Mode",
612
- info="Streaming shows results as they're generated",
613
- )
614
-
615
- submit_btn = gr.Button("🔎 Search / Ask AI", variant="primary", size="lg", elem_classes="submit-button")
616
-
617
- with gr.Column(scale=7):
618
- with gr.Group(elem_classes="panel output-panel"):
619
- output_html = gr.HTML(label="Results")
620
- model_info = gr.HTML(visible=False, elem_classes="model-info")
621
-
622
- # Event handlers
623
- def toggle_visibility(search_type):
624
- """
625
- Toggle visibility of components based on search type
626
-
627
- Args:
628
- search_type (str): The selected search type
629
- Returns:
630
- tuple: Visibility states for (llm_options, title_keywords, model_info)
631
- """
632
-
633
- show_title_keywords = search_type == "Search Articles"
634
- show_llm_options = search_type == "Ask the AI"
635
- show_model_info = search_type == "Ask the AI"
636
- show_limit_slider = search_type == "Search Articles"
637
-
638
- return (
639
- gr.Group(visible=show_llm_options), # llm_options
640
- gr.Textbox(visible=show_title_keywords), # title_keywords
641
- gr.HTML(visible=show_model_info), # model_info
642
- gr.Slider(visible=show_limit_slider), # limit
643
- )
644
-
645
- search_type.change(
646
- fn=toggle_visibility,
647
- inputs=[search_type],
648
- outputs=[llm_options, title_keywords, model_info, limit],
 
 
 
 
 
 
 
 
 
 
 
 
649
  )
650
 
651
- # Update model dropdown when provider changes
652
- provider.change(fn=update_model_choices, inputs=[provider], outputs=[model])
653
-
654
- # Unified submission handler
655
- def handle_submission(
656
- search_type,
657
- streaming_mode,
658
- query_text,
659
- feed_name,
660
- feed_author,
661
- title_keywords,
662
- limit,
663
- provider,
664
- model,
665
- ):
666
- """
667
- Handle submission based on search type and streaming mode
668
- Args:
669
- search_type (str): The selected search type
670
- streaming_mode (str): The selected streaming mode
671
- query_text (str): The query text
672
- feed_name (str): The selected feed name
673
- feed_author (str): The selected feed author
674
- title_keywords (str): The title keywords (if applicable)
675
- limit (int): The number of results to return
676
- provider (str): The selected LLM provider (if applicable)
677
- model (str): The selected model (if applicable)
678
- Returns:
679
- tuple: (HTML formatted answer string, model info string)
680
- """
681
- if search_type == "Search Articles":
682
- result = handle_search_articles(
683
- query_text, feed_name, feed_author, title_keywords, limit
684
- )
685
- return result, "" # Always return two values
686
- else: # Ask the AI
687
- if streaming_mode == "Non-Streaming":
688
- return handle_ai_question_non_streaming(
689
- query_text, feed_name, feed_author, limit, provider, model
690
- )
691
- else:
692
- # For streaming, we'll use a separate handler
693
- return "", ""
694
-
695
- # Streaming handler
696
- def handle_streaming_submission(
697
- search_type,
698
- streaming_mode,
699
- query_text,
700
- feed_name,
701
- feed_author,
702
- title_keywords,
703
- limit,
704
- provider,
705
- model,
706
- ):
707
- """
708
- Handle submission with streaming support
709
- Args:
710
- search_type (str): The selected search type
711
- streaming_mode (str): The selected streaming mode
712
- query_text (str): The query text
713
- feed_name (str): The selected feed name
714
- feed_author (str): The selected feed author
715
- title_keywords (str): The title keywords (if applicable)
716
- limit (int): The number of results to return
717
- provider (str): The selected LLM provider (if applicable)
718
- model (str): The selected model (if applicable)
719
- Yields:
720
- tuple: (HTML formatted answer string, model info string)
721
- """
722
- if search_type == "Ask the AI" and streaming_mode == "Streaming":
723
- yield from handle_ai_question_streaming(
724
- query_text, feed_name, feed_author, limit, provider, model
725
- )
726
- else:
727
- # For non-streaming cases, just return the regular result
728
- if search_type == "Search Articles":
729
- result = handle_search_articles(
730
- query_text, feed_name, feed_author, title_keywords, limit
731
- )
732
- yield result, ""
733
- else:
734
- result_html, model_info_text = handle_ai_question_non_streaming(
735
- query_text, feed_name, feed_author, limit, provider, model
736
- )
737
- yield result_html, model_info_text
738
 
739
- # Single click handler that routes based on mode
740
- submit_btn.click(
741
- fn=handle_streaming_submission,
 
 
 
 
742
  inputs=[
743
- search_type,
744
- streaming_mode,
745
- query_text,
746
- feed_name,
747
- feed_author,
748
- title_keywords,
749
- limit,
750
- provider,
751
- model,
752
  ],
753
- outputs=[output_html, model_info],
754
- show_progress=True,
 
 
 
 
 
755
  )
756
 
757
  # For local testing
 
42
  # Custom CSS for modern UI
43
  # -----------------------
44
  CUSTOM_CSS = """
45
+ /* Minimal, utility-first vibe with a neutral palette */
46
  :root {
 
 
 
 
 
47
  --border: 1px solid rgba(2, 6, 23, 0.08);
48
+ --surface: #ffffff;
49
+ --surface-muted: #f8fafc;
50
+ --text: #0f172a;
51
+ --muted: #475569;
52
+ --accent: #0ea5e9;
53
+ --accent-strong: #0284c7;
54
+ --radius: 12px;
55
+ --shadow: 0 8px 20px rgba(2, 6, 23, 0.06);
 
 
 
56
  }
57
 
58
+ .gradio-container, body {
59
+ background: var(--surface-muted);
60
+ color: var(--text);
61
  }
62
 
63
+ .dark .gradio-container, .dark body {
64
+ background: #0b1220;
65
+ color: #e5e7eb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  }
67
 
68
+ .section {
69
+ background: var(--surface);
 
 
70
  border: var(--border);
71
+ border-radius: var(--radius);
72
+ box-shadow: var(--shadow);
73
+ padding: 16px;
74
  }
75
+
76
+ .dark .section {
77
+ background: #0f172a;
78
+ border: 1px solid rgba(255,255,255,0.08);
79
  }
80
 
81
+ .header {
82
+ display: flex;
83
+ align-items: baseline;
84
+ justify-content: space-between;
85
+ margin-bottom: 12px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  }
87
+ .header h2 {
88
+ margin: 0;
89
+ font-size: 22px;
90
+ }
91
+ .subtle {
92
+ color: var(--muted);
93
+ font-size: 13px;
94
  }
95
 
96
+ .results-table {
97
+ width: 100%;
98
+ border-collapse: collapse;
99
+ font-size: 14px;
100
  }
101
+ .results-table th, .results-table td {
102
+ border: 1px solid #e2e8f0;
103
+ padding: 10px;
104
+ text-align: left;
105
+ vertical-align: top;
106
  }
107
+ .results-table th {
108
+ background: #f1f5f9;
109
+ }
110
+ .dark .results-table th {
111
+ background: #0b1325;
112
+ border-color: rgba(255,255,255,0.08);
113
+ color: #e5e7eb;
114
+ }
115
+ .dark .results-table td {
116
+ border-color: rgba(255,255,255,0.08);
117
+ color: #e2e8f0;
118
+ }
119
+ .results-table a {
120
+ color: var(--accent-strong);
121
+ text-decoration: none;
122
  font-weight: 600;
 
 
 
 
 
123
  }
124
+ .results-table a:hover {
125
+ text-decoration: underline;
126
+ }
127
+ .dark .results-table a {
128
+ color: #7dd3fc;
129
  }
130
 
131
+ .answer {
132
+ background: var(--surface);
133
+ border: var(--border);
134
+ border-radius: var(--radius);
 
135
  padding: 14px;
136
  }
137
+ .dark .answer {
138
+ background: #0f172a;
139
+ border: 1px solid rgba(255,255,255,0.08);
140
+ color: #e5e7eb;
141
+ }
142
+ .model-badge {
143
+ display: inline-block;
144
+ margin-top: 6px;
145
+ padding: 6px 10px;
146
+ border-radius: 999px;
147
  border: var(--border);
148
+ background: #eef2ff;
149
+ color: #3730a3;
150
+ font-weight: 600;
151
+ }
152
+ .dark .model-badge {
153
+ background: rgba(59,130,246,0.15);
154
+ color: #c7d2fe;
155
+ border: 1px solid rgba(255,255,255,0.08);
156
  }
157
+ .error {
158
+ border: 1px solid #fecaca;
159
+ background: #fff1f2;
160
+ color: #7f1d1d;
161
+ border-radius: var(--radius);
162
+ padding: 10px 12px;
163
  }
164
+ .dark .error {
165
+ border: 1px solid rgba(248,113,113,0.35);
166
+ background: rgba(127,29,29,0.25);
167
+ color: #fecaca;
168
  }
169
+
170
+ /* Sticky status banner with spinner */
171
+ #status-banner {
172
+ position: sticky;
173
+ top: 0;
174
+ z-index: 1000;
175
+ margin: 8px 0 12px 0;
176
+ }
177
+ #status-banner .banner {
178
  display: flex;
179
+ align-items: center;
180
+ gap: 10px;
181
+ padding: 10px 12px;
182
+ border-radius: var(--radius);
183
+ border: 1px solid #bae6fd;
184
+ background: #e0f2fe;
185
+ color: #075985;
186
+ box-shadow: var(--shadow);
187
  }
188
+ #status-banner .spinner {
189
+ width: 16px;
190
+ height: 16px;
191
  border-radius: 999px;
192
+ border: 2px solid currentColor;
193
+ border-right-color: transparent;
194
+ animation: spin 0.8s linear infinite;
195
  }
196
+ @keyframes spin {
197
+ to { transform: rotate(360deg); }
 
 
 
198
  }
199
+ .dark #status-banner .banner {
200
+ border-color: rgba(59,130,246,0.35);
201
+ background: rgba(2,6,23,0.55);
202
+ color: #93c5fd;
203
+ }
204
+
205
+ /* Actions row aligns buttons to the right, outside filter sections */
206
+ .actions {
207
+ display: flex;
208
+ justify-content: flex-end;
209
+ margin: 8px 0 12px 0;
210
  gap: 8px;
 
 
 
211
  }
 
212
 
213
+ /* Prominent CTA buttons (not full-width) */
214
+ .cta {
215
+ display: inline-flex;
 
 
 
 
 
216
  }
217
+ .cta .gr-button {
218
+ background: linear-gradient(180deg, var(--accent), var(--accent-strong));
219
+ color: #ffffff;
220
+ border: none;
221
+ border-radius: 14px;
222
+ padding: 12px 18px;
223
+ font-weight: 700;
224
+ font-size: 15px;
225
+ box-shadow: 0 10px 22px rgba(2,6,23,0.18);
226
+ width: auto !important;
227
  }
228
+ .cta .gr-button:hover {
229
+ transform: translateY(-1px);
230
+ filter: brightness(1.05);
231
  }
232
+ .cta .gr-button:focus-visible {
233
+ outline: 2px solid #93c5fd;
234
+ outline-offset: 2px;
235
+ }
236
+ .dark .cta .gr-button {
237
+ box-shadow: 0 12px 26px rgba(2,6,23,0.45);
238
  }
239
  """
240
 
 
357
  if not results:
358
  return "No results found."
359
 
360
+ # Render results as a compact table
361
+ html_output = (
362
+ "<div class='section'>"
363
+ " <div class='header'><h2>Results</h2><span class='subtle'>Unique titles</span></div>"
364
+ " <table class='results-table'>"
365
+ " <thead>"
366
+ " <tr><th>Title</th><th>Newsletter</th><th>Feed Author</th><th>Article Authors</th><th>Link</th></tr>"
367
+ " </thead>"
368
+ " <tbody>"
369
+ )
370
  for item in results:
371
  title = item.get("title", "No title")
372
  feed_n = item.get("feed_name", "N/A")
 
374
  authors = ", ".join(item.get("article_author") or ["N/A"])
375
  url = item.get("url", "#")
376
  html_output += (
377
+ " <tr>"
378
+ f" <td>{title}</td>"
379
+ f" <td>{feed_n}</td>"
380
+ f" <td>{feed_a}</td>"
381
+ f" <td>{authors}</td>"
382
+ f" <td><a href='{url}' target='_blank' rel='noopener noreferrer'>Open</a></td>"
383
+ " </tr>"
 
 
 
 
384
  )
385
+ html_output += " </tbody></table></div>"
386
  return html_output
387
 
388
  except Exception as e:
 
431
 
432
  try:
433
  answer_html = ""
434
+ model_info = f"<span class='model-badge'>Provider: {provider}</span>"
435
 
436
  for _, (event_type, content) in enumerate(call_ai(payload, streaming=True)):
437
  if event_type == "text":
 
438
  html_content = markdown.markdown(content, extensions=["tables"])
439
+ answer_html = f"<div class='answer'><div class='markdown-body'>{html_content}</div></div>"
440
  yield answer_html, model_info
441
 
442
  elif event_type == "model":
443
+ model_info = f"<span class='model-badge'>Provider: {provider} | Model: {content}</span>"
444
  yield answer_html, model_info
445
 
446
  elif event_type == "truncated":
447
+ answer_html += f"<div class='answer'><div style='color:#b45309; font-weight:700;'>⚠️ {content}</div></div>"
448
  yield answer_html, model_info
449
 
450
  elif event_type == "error":
451
+ error_html = f"<div class='error'><div>❌ {content}</div></div>"
452
  yield error_html, model_info
453
  break
454
 
455
  except Exception as e:
456
+ error_html = "<div class='error'>Error: {}</div>".format(str(e))
457
  yield error_html, model_info
458
 
459
 
 
491
 
492
  try:
493
  answer_html = ""
494
+ model_info = f"<span class='model-badge'>Provider: {provider}</span>"
495
 
496
  for event_type, content in call_ai(payload, streaming=False):
497
  if event_type == "text":
498
  html_content = markdown.markdown(content, extensions=["tables"])
499
+ answer_html = f"<div class='answer'><div class='markdown-body'>{html_content}</div></div>"
500
  elif event_type == "model":
501
+ model_info = f"<span class='model-badge'>Provider: {provider} | Model: {content}</span>"
502
  elif event_type == "truncated":
503
+ answer_html += f"<div class='answer'><div style='color:#b45309; font-weight:700;'>⚠️ {content}</div></div>"
504
  elif event_type == "error":
505
  return (
506
+ f"<div class='error'>❌ {content}</div>",
507
  model_info,
508
  )
509
 
 
511
 
512
  except Exception as e:
513
  return (
514
+ f"<div class='error'>Error: {str(e)}</div>",
515
+ f"<span class='model-badge'>Provider: {provider}</span>",
516
  )
517
 
518
 
 
529
 
530
 
531
  # -----------------------
532
+ # Progress/status helpers
533
+ # -----------------------
534
+ def start_search_status():
535
+ return "<div class='banner'><span class='spinner'></span><strong>Searching articles...</strong></div>"
536
+
537
+
538
+ def start_ai_status(streaming_mode):
539
+ mode = "streaming" if streaming_mode == "Streaming" else "non‑streaming"
540
+ return f"<div class='banner'><span class='spinner'></span><strong>Generating answer ({mode})...</strong></div>"
541
+
542
+
543
+ def clear_status():
544
+ return ""
545
+
546
+
547
+ # -----------------------
548
+ # Gradio UI (new layout)
549
  # -----------------------
550
+ def ask_ai_router(
551
+ streaming_mode,
552
+ query_text,
553
+ feed_name,
554
+ feed_author,
555
+ limit,
556
+ provider,
557
+ model,
558
+ ):
559
+ """
560
+ Route AI question to streaming or non-streaming handler.
561
+ Yields:
562
+ tuple: (answer_html, model_info_html)
563
+ """
564
+ if streaming_mode == "Streaming":
565
+ yield from handle_ai_question_streaming(
566
+ query_text, feed_name, feed_author, limit, provider, model
567
+ )
568
+ else:
569
+ result_html, model_info_text = handle_ai_question_non_streaming(
570
+ query_text, feed_name, feed_author, limit, provider, model
571
+ )
572
+ yield result_html, model_info_text
573
+
574
+
575
+ with gr.Blocks(title="Article Search Engine", theme=gr.themes.Base(), css=CUSTOM_CSS) as demo:
576
+ gr.Markdown(
577
+ "### Article Search Engine\n"
578
+ "Search across substack, medium and top publications articles on AI topics or ask questions with an AI assistant."
579
  )
580
 
581
+ # Sticky status banner (empty by default)
582
+ status_banner = gr.HTML(value="", elem_id="status-banner")
583
+
584
+ with gr.Tabs():
585
+ # Search Tab
586
+ with gr.Tab("Search"):
587
+ with gr.Group(elem_classes="section"):
588
+ gr.Markdown("#### Find articles on any AI topic")
589
+ search_query = gr.Textbox(
590
+ label="Query",
591
+ placeholder="What are you looking for?",
592
+ lines=3,
593
  )
594
+ with gr.Row():
595
+ search_feed_author = gr.Dropdown(
 
 
 
 
 
 
596
  choices=[""] + feed_authors, label="Author (optional)", value=""
597
  )
598
+ search_feed_name = gr.Dropdown(
599
  choices=[""] + feed_names, label="Newsletter (optional)", value=""
600
  )
601
+ with gr.Row():
602
+ search_title_keywords = gr.Textbox(
603
+ label="Title keywords (optional)",
604
  placeholder="Filter by words in the title",
 
605
  )
606
+ search_limit = gr.Slider(
607
+ minimum=1, maximum=20, step=1, label="Number of results", value=5
608
  )
609
+ with gr.Row(elem_classes="actions"):
610
+ search_btn = gr.Button("Search", variant="primary", elem_classes="cta")
611
+ search_output = gr.HTML(label="Results")
612
+
613
+ # Ask AI Tab
614
+ with gr.Tab("Ask AI"):
615
+ with gr.Group(elem_classes="section"):
616
+ gr.Markdown("#### Ask an AI assistant about any AI topic")
617
+ ai_query = gr.Textbox(
618
+ label="Your question",
619
+ placeholder="Ask a question. The AI will use the articles for context.",
620
+ lines=4,
621
+ )
622
+ with gr.Row():
623
+ ai_feed_author = gr.Dropdown(
624
+ choices=[""] + feed_authors, label="Author (optional)", value=""
625
+ )
626
+ ai_feed_name = gr.Dropdown(
627
+ choices=[""] + feed_names, label="Newsletter (optional)", value=""
628
+ )
629
+ ai_limit = gr.Slider(
630
+ minimum=1, maximum=20, step=1, label="Max articles", value=5
631
+ )
632
+ with gr.Row():
633
+ provider_dd = gr.Dropdown(
634
+ choices=["OpenRouter", "HuggingFace", "OpenAI"],
635
+ label="LLM Provider",
636
+ value="OpenRouter",
637
+ )
638
+ model_dd = gr.Dropdown(
639
+ choices=get_models_for_provider("OpenRouter"),
640
+ label="Model",
641
+ value="Automatic Model Selection (Model Routing)",
642
+ )
643
+ streaming_mode_dd = gr.Radio(
644
+ choices=["Streaming", "Non-Streaming"],
645
+ value="Streaming",
646
+ label="Answer mode",
647
+ )
648
+ with gr.Row(elem_classes="actions"):
649
+ ask_btn = gr.Button("Run", variant="primary", elem_classes="cta")
650
+ ai_answer = gr.HTML(label="Answer")
651
+ ai_model_info = gr.HTML(label="Model")
652
+
653
+ # Wire events with sticky status banner
654
+ search_btn.click(
655
+ fn=start_search_status,
656
+ inputs=[],
657
+ outputs=[status_banner],
658
+ show_progress=False,
659
+ ).then(
660
+ fn=handle_search_articles,
661
+ inputs=[
662
+ search_query,
663
+ search_feed_name,
664
+ search_feed_author,
665
+ search_title_keywords,
666
+ search_limit,
667
+ ],
668
+ outputs=[search_output],
669
+ show_progress=False,
670
+ ).then(
671
+ fn=clear_status,
672
+ inputs=[],
673
+ outputs=[status_banner],
674
+ show_progress=False,
675
  )
676
 
677
+ provider_dd.change(fn=update_model_choices, inputs=[provider_dd], outputs=[model_dd])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
678
 
679
+ ask_btn.click(
680
+ fn=start_ai_status,
681
+ inputs=[streaming_mode_dd],
682
+ outputs=[status_banner],
683
+ show_progress=False,
684
+ ).then(
685
+ fn=ask_ai_router,
686
  inputs=[
687
+ streaming_mode_dd,
688
+ ai_query,
689
+ ai_feed_name,
690
+ ai_feed_author,
691
+ ai_limit,
692
+ provider_dd,
693
+ model_dd,
 
 
694
  ],
695
+ outputs=[ai_answer, ai_model_info],
696
+ show_progress=False,
697
+ ).then(
698
+ fn=clear_status,
699
+ inputs=[],
700
+ outputs=[status_banner],
701
+ show_progress=False,
702
  )
703
 
704
  # For local testing
src/api/services/providers/utils/prompts.py CHANGED
@@ -6,7 +6,7 @@ from src.api.models.provider_models import ModelConfig
6
  config = ModelConfig()
7
 
8
  PROMPT = """
9
- You are a skilled research assistant specialized in analyzing Substack newsletters.
10
  Respond to the user’s query using the provided context from these articles,
11
  that is retrieved from a vector database without relying on outside knowledge or assumptions.
12
 
@@ -34,7 +34,7 @@ that is retrieved from a vector database without relying on outside knowledge or
34
 
35
  # Create a new prompt
36
  prompt = opik.Prompt(
37
- name="substack_research_assistant", prompt=PROMPT, metadata={"environment": "development"}
38
  )
39
 
40
 
 
6
  config = ModelConfig()
7
 
8
  PROMPT = """
9
+ You are a skilled research assistant specialized in analyzing Substack, Medium and other newsletters.
10
  Respond to the user’s query using the provided context from these articles,
11
  that is retrieved from a vector database without relying on outside knowledge or assumptions.
12
 
 
34
 
35
  # Create a new prompt
36
  prompt = opik.Prompt(
37
+ name="newsletter_research_assistant", prompt=PROMPT, metadata={"environment": "development"}
38
  )
39
 
40