File size: 2,449 Bytes
bdff9e1
 
 
d704d9e
bdff9e1
 
 
 
 
 
d704d9e
 
 
 
 
 
 
 
 
 
bdff9e1
d704d9e
bdff9e1
d704d9e
 
 
 
32503a2
d704d9e
 
 
 
 
 
 
 
 
32503a2
d704d9e
 
 
 
 
 
 
 
 
 
 
32503a2
d704d9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bdff9e1
d704d9e
 
 
 
 
 
 
bdff9e1
 
d704d9e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr

def isTrue(x) -> bool:
    if isinstance(x, bool):
        return x
    return x.strip().lower() == 'true'

corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
    'corpus_keys': corpus_keys,
    'api_key': str(os.environ['api_key']),
    'title': os.environ['title'],
    'source_data_desc': os.environ['source_data_desc'],
    'streaming': isTrue(os.environ.get('streaming', False)),
    'prompt_name': os.environ.get('prompt_name', None),
    'examples': os.environ.get('examples', None)
})

vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)

def respond(message, history):
    if cfg.streaming:
        # Call stream response and stream output
        stream = vq.submit_query_streaming(message)
        
        
        outputs = ""
        for output in stream:
            outputs += output
            yield outputs
    else:
        # Call non-stream response and return message output
        response = vq.submit_query(message)
        yield response

heading_html = f'''
                <table>
                  <tr>
                      <td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td>
                      <td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
                  </tr>
                  <tr>
                      <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
                  </tr>
                </table>
                '''

bot_css = """
table {
  border: none;
  width: 100%;
  table-layout: fixed;
  border-collapse: separate;
}
td {
  vertical-align: middle;
  border: none;
}
img {
  width: 75%;
}
h1 {
  font-size: 2em; /* Adjust the size as needed */
}
"""

if cfg.examples:
    app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
    app_examples = None

demo = gr.ChatInterface(respond, description = heading_html, css = bot_css,
                        chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False)

if __name__ == "__main__":
    demo.launch()