Spaces:
Running
on
Zero
Running
on
Zero
RTE Build
commited on
Commit
·
a099612
0
Parent(s):
Deployment
Browse files- .gitattributes +37 -0
- .gitignore +179 -0
- .gitlint +143 -0
- .pre-commit-config.yaml +51 -0
- DEVELOPMENT.md +179 -0
- README.md +41 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_20_0.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_3_0.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_0.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_1.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_2.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_0.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_1.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_2.png +3 -0
- data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_3.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_16_0.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_16_1.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_16_2.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_16_3.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_17_0.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_17_1.png +3 -0
- data/final_v2_mar04/images/wihlborgs/images/image_17_2.png +3 -0
- data/final_v2_mar04/milvus/.milvus.db.lock +0 -0
- data/final_v2_mar04/milvus/milvus.db +3 -0
- data/final_v2_mar04/pdfs/IBM_Annual_Report_2007_3-20.pdf +3 -0
- data/final_v2_mar04/pdfs/wihlborgs-2-13_16-18.pdf +3 -0
- data/final_v2_mar04/preview/IBM-Z.png +3 -0
- data/final_v2_mar04/preview/IBM-financial-2010.png +3 -0
- data/final_v2_mar04/preview/Wilhlborg-financial.png +3 -0
- poetry.lock +0 -0
- pyproject.toml +62 -0
- requirements.txt +101 -0
- src/README.md +14 -0
- src/app.css +39 -0
- src/app_head.html +12 -0
- src/qa_app.py +331 -0
- src/rag_app.py +275 -0
- src/sandbox/light_rag/credits.txt +4 -0
- src/sandbox/light_rag/hf_embedding.py +38 -0
- src/sandbox/light_rag/hf_llm.py +24 -0
- src/sandbox/light_rag/light_rag.py +168 -0
- src/sandbox/light_rag/utils.py +46 -0
- src/themes/carbon.py +147 -0
- src/themes/research_monochrome.py +152 -0
.gitattributes
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
data/**/* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# UV
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
#uv.lock
|
102 |
+
|
103 |
+
# poetry
|
104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
106 |
+
# commonly ignored for libraries.
|
107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
108 |
+
#poetry.lock
|
109 |
+
|
110 |
+
# pdm
|
111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
112 |
+
#pdm.lock
|
113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
114 |
+
# in version control.
|
115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
116 |
+
.pdm.toml
|
117 |
+
.pdm-python
|
118 |
+
.pdm-build/
|
119 |
+
|
120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
121 |
+
__pypackages__/
|
122 |
+
|
123 |
+
# Celery stuff
|
124 |
+
celerybeat-schedule
|
125 |
+
celerybeat.pid
|
126 |
+
|
127 |
+
# SageMath parsed files
|
128 |
+
*.sage.py
|
129 |
+
|
130 |
+
# Environments
|
131 |
+
.env
|
132 |
+
.venv
|
133 |
+
env/
|
134 |
+
venv/
|
135 |
+
ENV/
|
136 |
+
env.bak/
|
137 |
+
venv.bak/
|
138 |
+
|
139 |
+
# Spyder project settings
|
140 |
+
.spyderproject
|
141 |
+
.spyproject
|
142 |
+
|
143 |
+
# Rope project settings
|
144 |
+
.ropeproject
|
145 |
+
|
146 |
+
# mkdocs documentation
|
147 |
+
/site
|
148 |
+
|
149 |
+
# mypy
|
150 |
+
.mypy_cache/
|
151 |
+
.dmypy.json
|
152 |
+
dmypy.json
|
153 |
+
|
154 |
+
# Pyre type checker
|
155 |
+
.pyre/
|
156 |
+
|
157 |
+
# pytype static type analyzer
|
158 |
+
.pytype/
|
159 |
+
|
160 |
+
# Cython debug symbols
|
161 |
+
cython_debug/
|
162 |
+
|
163 |
+
# PyCharm
|
164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
168 |
+
#.idea/
|
169 |
+
|
170 |
+
|
171 |
+
# Boilerplate specific ignores
|
172 |
+
.gradio/
|
173 |
+
.ruff_cache/
|
174 |
+
|
175 |
+
|
176 |
+
## own igonres
|
177 |
+
/src/app_text.py
|
178 |
+
/.idea/
|
179 |
+
/nbs/
|
.gitlint
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Edit this file as you like.
|
2 |
+
#
|
3 |
+
# All these sections are optional. Each section with the exception of [general] represents
|
4 |
+
# one rule and each key in it is an option for that specific rule.
|
5 |
+
#
|
6 |
+
# Rules and sections can be referenced by their full name or by id. For example
|
7 |
+
# section "[body-max-line-length]" could also be written as "[B1]". Full section names are
|
8 |
+
# used in here for clarity.
|
9 |
+
#
|
10 |
+
# [general]
|
11 |
+
# Ignore certain rules, this example uses both full name and id
|
12 |
+
# ignore=title-trailing-punctuation, T3
|
13 |
+
|
14 |
+
[general]
|
15 |
+
# You HAVE to add the rule here to enable it, only configuring (such as below)
|
16 |
+
# does NOT enable it.
|
17 |
+
contrib=contrib-title-conventional-commits
|
18 |
+
|
19 |
+
[contrib-title-conventional-commits]
|
20 |
+
# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
|
21 |
+
types = build,chore,ci,docs,feat,fix,perf,refactor,revert,style,test
|
22 |
+
|
23 |
+
# verbosity should be a value between 1 and 3, the commandline -v flags take precedence over this
|
24 |
+
# verbosity = 2
|
25 |
+
|
26 |
+
# By default gitlint will ignore merge, revert, fixup and squash commits.
|
27 |
+
# ignore-merge-commits=true
|
28 |
+
# ignore-revert-commits=true
|
29 |
+
# ignore-fixup-commits=true
|
30 |
+
# ignore-squash-commits=true
|
31 |
+
|
32 |
+
# Ignore any data send to gitlint via stdin
|
33 |
+
# ignore-stdin=true
|
34 |
+
|
35 |
+
# Fetch additional meta-data from the local repository when manually passing a
|
36 |
+
# commit message to gitlint via stdin or --commit-msg. Disabled by default.
|
37 |
+
# staged=true
|
38 |
+
|
39 |
+
# Hard fail when the target commit range is empty. Note that gitlint will
|
40 |
+
# already fail by default on invalid commit ranges. This option is specifically
|
41 |
+
# to tell gitlint to fail on *valid but empty* commit ranges.
|
42 |
+
# Disabled by default.
|
43 |
+
# fail-without-commits=true
|
44 |
+
|
45 |
+
# Enable debug mode (prints more output). Disabled by default.
|
46 |
+
# debug=true
|
47 |
+
|
48 |
+
# Enable community contributed rules
|
49 |
+
# See http://jorisroovers.github.io/gitlint/contrib_rules for details
|
50 |
+
# contrib=contrib-title-conventional-commits,CC1
|
51 |
+
|
52 |
+
# Set the extra-path where gitlint will search for user defined rules
|
53 |
+
# See http://jorisroovers.github.io/gitlint/user_defined_rules for details
|
54 |
+
# extra-path=examples/
|
55 |
+
|
56 |
+
# This is an example of how to configure the "title-max-length" rule and
|
57 |
+
# set the line-length it enforces to 50
|
58 |
+
# [title-max-length]
|
59 |
+
# line-length=50
|
60 |
+
|
61 |
+
# Conversely, you can also enforce minimal length of a title with the
|
62 |
+
# "title-min-length" rule:
|
63 |
+
# [title-min-length]
|
64 |
+
# min-length=5
|
65 |
+
|
66 |
+
# [title-must-not-contain-word]
|
67 |
+
# Comma-separated list of words that should not occur in the title. Matching is case
|
68 |
+
# insensitive. It's fine if the keyword occurs as part of a larger word (so "WIPING"
|
69 |
+
# will not cause a violation, but "WIP: my title" will.
|
70 |
+
# words=wip
|
71 |
+
|
72 |
+
# [title-match-regex]
|
73 |
+
# python-style regex that the commit-msg title must match
|
74 |
+
# Note that the regex can contradict with other rules if not used correctly
|
75 |
+
# (e.g. title-must-not-contain-word).
|
76 |
+
# regex=^US[0-9]*
|
77 |
+
|
78 |
+
# [body-max-line-length]
|
79 |
+
# line-length=72
|
80 |
+
|
81 |
+
# [body-min-length]
|
82 |
+
# min-length=5
|
83 |
+
|
84 |
+
# [body-is-missing]
|
85 |
+
# Whether to ignore this rule on merge commits (which typically only have a title)
|
86 |
+
# default = True
|
87 |
+
# ignore-merge-commits=false
|
88 |
+
|
89 |
+
# [body-changed-file-mention]
|
90 |
+
# List of files that need to be explicitly mentioned in the body when they are changed
|
91 |
+
# This is useful for when developers often erroneously edit certain files or git submodules.
|
92 |
+
# By specifying this rule, developers can only change the file when they explicitly reference
|
93 |
+
# it in the commit message.
|
94 |
+
# files=gitlint-core/gitlint/rules.py,README.md
|
95 |
+
|
96 |
+
# [body-match-regex]
|
97 |
+
# python-style regex that the commit-msg body must match.
|
98 |
+
# E.g. body must end in My-Commit-Tag: foo
|
99 |
+
# regex=My-Commit-Tag: foo$
|
100 |
+
|
101 |
+
# [author-valid-email]
|
102 |
+
# python-style regex that the commit author email address must match.
|
103 |
+
# For example, use the following regex if you only want to allow email addresses from foo.com
|
104 |
+
# regex=[^@]+@foo.com
|
105 |
+
|
106 |
+
# [ignore-by-title]
|
107 |
+
# Ignore certain rules for commits of which the title matches a regex
|
108 |
+
# E.g. Match commit titles that start with "Release"
|
109 |
+
# regex=^Release(.*)
|
110 |
+
|
111 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
112 |
+
# Use 'all' to ignore all rules
|
113 |
+
# ignore=T1,body-min-length
|
114 |
+
|
115 |
+
# [ignore-by-body]
|
116 |
+
# Ignore certain rules for commits of which the body has a line that matches a regex
|
117 |
+
# E.g. Match bodies that have a line that that contain "release"
|
118 |
+
# regex=(.*)release(.*)
|
119 |
+
#
|
120 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
121 |
+
# Use 'all' to ignore all rules
|
122 |
+
# ignore=T1,body-min-length
|
123 |
+
|
124 |
+
# [ignore-body-lines]
|
125 |
+
# Ignore certain lines in a commit body that match a regex.
|
126 |
+
# E.g. Ignore all lines that start with 'Co-Authored-By'
|
127 |
+
# regex=^Co-Authored-By
|
128 |
+
|
129 |
+
# [ignore-by-author-name]
|
130 |
+
# Ignore certain rules for commits of which the author name matches a regex
|
131 |
+
# E.g. Match commits made by dependabot
|
132 |
+
# regex=(.*)dependabot(.*)
|
133 |
+
#
|
134 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
135 |
+
# Use 'all' to ignore all rules
|
136 |
+
# ignore=T1,body-min-length
|
137 |
+
|
138 |
+
# This is a contrib rule - a community contributed rule. These are disabled by default.
|
139 |
+
# You need to explicitly enable them one-by-one by adding them to the "contrib" option
|
140 |
+
# under [general] section above.
|
141 |
+
# [contrib-title-conventional-commits]
|
142 |
+
# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
|
143 |
+
# types = bugfix,user-story,epic
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v5.0.0
|
4 |
+
hooks:
|
5 |
+
- id: check-added-large-files
|
6 |
+
- id: check-ast
|
7 |
+
- id: check-case-conflict
|
8 |
+
- id: check-json
|
9 |
+
- id: check-merge-conflict
|
10 |
+
- id: check-toml
|
11 |
+
- id: end-of-file-fixer
|
12 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
13 |
+
# Ruff version.
|
14 |
+
rev: v0.8.6
|
15 |
+
hooks:
|
16 |
+
- id: ruff
|
17 |
+
- repo: https://github.com/pycqa/isort
|
18 |
+
rev: 5.13.2
|
19 |
+
hooks:
|
20 |
+
- id: isort
|
21 |
+
args: ["--profile", "black"]
|
22 |
+
- repo: https://github.com/asottile/pyupgrade
|
23 |
+
rev: v3.19.1
|
24 |
+
hooks:
|
25 |
+
- id: pyupgrade
|
26 |
+
args: ["--py310-plus"]
|
27 |
+
- repo: https://github.com/psf/black
|
28 |
+
rev: 24.10.0
|
29 |
+
hooks:
|
30 |
+
- id: black
|
31 |
+
args:
|
32 |
+
- --line-length=120
|
33 |
+
- repo: https://github.com/jorisroovers/gitlint
|
34 |
+
rev: v0.19.1
|
35 |
+
hooks:
|
36 |
+
- id: gitlint
|
37 |
+
name: gitlint
|
38 |
+
language: python
|
39 |
+
entry: gitlint
|
40 |
+
args: [--staged, --msg-filename]
|
41 |
+
stages: [commit-msg]
|
42 |
+
- repo: https://github.com/python-poetry/poetry
|
43 |
+
rev: '1.8.0'
|
44 |
+
hooks:
|
45 |
+
- id: poetry-check
|
46 |
+
- id: poetry-lock
|
47 |
+
args: [--no-update]
|
48 |
+
language_version: "3.10"
|
49 |
+
- id: poetry-export
|
50 |
+
name: poetry export for base requirements
|
51 |
+
args: [-f, requirements.txt, -o, requirements.txt, -n, --only=main, --without-hashes]
|
DEVELOPMENT.md
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# IBM Research Hugging Face Spaces gradio template
|
2 |
+
|
3 |
+
This template repository lets you quickly build a [gradio](https://www.gradio.app/) Hugging Face spaces demo for the [ibm-granite org](https://huggingface.co/ibm-granite). It is set up with the requirements, theming and analytics for the ibm-granite org as well as pre-commit hooks and linting configuration to maintain a consistent code standard across all demos.
|
4 |
+
|
5 |
+
## 👩💻 Introduction
|
6 |
+
|
7 |
+
To deploy demos to the ibm-granite org on Hugging Face, you will be working with the Research Design Technical Experiences (RDTE) team via this GitHub org. You will not gain access to the ibm-granite Hugging Face org as there are limited seats available. Hence, you will work via the RDTE team (who have write access) to create and deploy demos to Hugging Face.
|
8 |
+
|
9 |
+
## 🛠️ Getting started
|
10 |
+
|
11 |
+
This is the place to start when building gradio demos for IBM Granite. Complete the following steps to get a repository set up and configured for your demo as well as the deployment pipeline to validate and push it to Hugging Face spaces.
|
12 |
+
|
13 |
+
1. [Raise an onboarding request](https://github.ibm.com/ibm-huggingface-space-demos/deployment/issues/new?assignees=james-sutton,gwhite&labels=onboarding%2Ctriage&projects=&template=onboarding.yaml&title=%5BOnboarding%5D%3A+). Please fill the templated onboarding request to get a new repository set up for you in this org and to give access to anything else required.
|
14 |
+
2. Once your repository has been created, please either update it with your existing demo if you have one, or have a play with the example and modify it to create your new demo. You'll be working in the `main` branch whilst developing your demo. Your `main` branch is linked to the "QA" instance of your demo in the IBM org on Hugging Face.
|
15 |
+
3. Make sure that you follow this development guide and use the pre-configured pre-commit hooks before every commit and push.
|
16 |
+
4. Once you are happy with your demo and want to get it deployed into production on Hugging Face spaces in the ibm-granite org, open a pull request to merge the `main` branch into the `stable` branch. The RDTE team will validate the demo works well both from a technical and UX standpoint. If your demo needs any custom environment variables or secrets, let the RDTE team know and we will contact you directly to get them added to the Space configuration on Hugging Face.
|
17 |
+
5. Once the Pull request has been approved, you can merge it into the `stable` branch. A deployment will then push your changes to Hugging Face spaces where it will build and become available for use. Initially, both the "QA" and "Production" versions of your demo will be marked as private and only visible to members of the ibm-research org (QA) and ibm-granite org (production) that have logged into Hugging Face. The "QA" version will always remain private in the ibm-research org. However, when the RDTE team are happy to publish the demo to stable, they will mark the "Production" version as public in the ibm-granite org.
|
18 |
+
|
19 |
+
### Onboarding Process Summary
|
20 |
+
|
21 |
+
The following diagram explains the onboarding process. Actions that you, the developer, take are shown in darker blue. Actions that we, the RDTE team, take are shown in lighter blue. The lighter blue steps that have darker borders are automations maintained by the RDTE team, these steps require no manual intervention.
|
22 |
+
|
23 |
+
```mermaid
|
24 |
+
flowchart TD
|
25 |
+
1[Developer opens onboarding ticket in GHE Org]:::developer-->2
|
26 |
+
2{RDTE team review the request}:::rdte--Request returned with comments-->1
|
27 |
+
2--Approved-->3
|
28 |
+
3[Developer is invited to join the ibm-research org on HF]:::rdte-->4
|
29 |
+
4[New git repository created from template and configured]:::rdte-->5
|
30 |
+
5[QA HF space created in IBM org]:::rdte-->6
|
31 |
+
6[Developers push commits to main branch]:::developer-->7
|
32 |
+
7-->6
|
33 |
+
7[Main branch deployed to QA space in ibm-granite org on HF]:::rdteauto-->8
|
34 |
+
8[Developers open/update PR to merge main branch to stable branch]:::developer-->9
|
35 |
+
9{PR review}:::rdte--PR Approved-->10
|
36 |
+
9--Changes requested-->6
|
37 |
+
10{Prod space exists?}:::rdte-- Yes -->12
|
38 |
+
10-- No -->11
|
39 |
+
11[Prod HF space created in ibm-granite org]:::rdte-->12
|
40 |
+
12[Merge PR]:::developer-->13
|
41 |
+
13[Stable branch deployed to prod space in ibm-granite org on HF]:::rdteauto
|
42 |
+
classDef rdte fill:#EDF5FF,stroke:#D0E2FF,color:#000;
|
43 |
+
classDef rdteauto fill:#EDF5FF,stroke:#78A9FF,color:#000;
|
44 |
+
classDef developer fill:#A6C8FF,stroke:#78A9FF,color:#000;
|
45 |
+
```
|
46 |
+
|
47 |
+
## 🛠️ Development guide
|
48 |
+
|
49 |
+
Further information on developing the code in this repository is provided below.
|
50 |
+
|
51 |
+
### Clone your code repository
|
52 |
+
|
53 |
+
Once you have been notified that your code repository has been created in this org, you can clone it to your local machine and start work.
|
54 |
+
|
55 |
+
If you just want to play with our template, you're welcome to [use it](https://github.ibm.com/new?template_name=gradio-template&template_owner=ibm-huggingface-space-demos) to create a new code repository in another org. Later, for deployment, you wil need to move your code to the repository created in this org.
|
56 |
+
|
57 |
+
### Prerequisites
|
58 |
+
|
59 |
+
Some things you will need to do on your machine before developing.
|
60 |
+
|
61 |
+
#### Precommit
|
62 |
+
|
63 |
+
[Precommit](https://pre-commit.com) is a tool that adds git commit hooks. You will need to [install](https://pre-commit.com/#install) it on your machine and then run within your code repository:
|
64 |
+
|
65 |
+
```shell
|
66 |
+
pre-commit install
|
67 |
+
```
|
68 |
+
|
69 |
+
You can manually run pre-commit using the following command:
|
70 |
+
|
71 |
+
```shell
|
72 |
+
# To run against staged files:
|
73 |
+
pre-commit run
|
74 |
+
|
75 |
+
# If you want to run against staged and unstaged files:
|
76 |
+
pre-commit run --all-files
|
77 |
+
```
|
78 |
+
|
79 |
+
It is important to run the pre-commit hooks and fix any files that fail before you commit and push to the repository as the pull request build will fail any PR that does not adhere to them i.e. the RDTE team will only accept your code for deployment to Hugging Face once it has passed all of the pre-commit checks.
|
80 |
+
|
81 |
+
#### Poetry
|
82 |
+
|
83 |
+
[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. You will need to install Poetry locally. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is likely using their installer, as follows:
|
84 |
+
|
85 |
+
```shell
|
86 |
+
curl -sSL https://install.python-poetry.org | python3 -
|
87 |
+
```
|
88 |
+
|
89 |
+
You can also use `pip` and `pipx` to install poetry, the details of which are at https://python-poetry.org/docs/
|
90 |
+
|
91 |
+
Once installed, the project is configured and controlled via the `pyproject.toml` file with the current dependency tree stored in `poetry.lock`. You may also [configure poetry](https://python-poetry.org/docs/configuration/) further if you wish but there is no need to do so as the default options are sufficient. You may, however, wish to change some of the options set in this template:
|
92 |
+
|
93 |
+
| Setting | Notes |
|
94 |
+
| ------- | ----- |
|
95 |
+
| name | **Update this**, to reflect the name of your demo |
|
96 |
+
| version | **Update this**, to reflect the current version of your demo |
|
97 |
+
| description | **Update this**, to a short description of your demo |
|
98 |
+
| authors | **Update this**, to the list of authors of your demo |
|
99 |
+
|
100 |
+
## 🛠️ Install and run locally
|
101 |
+
|
102 |
+
To get set up ready to run the code in development mode:
|
103 |
+
|
104 |
+
```shell
|
105 |
+
# add the poetry shell and export plugins (you only need to do this once on your machine)
|
106 |
+
poetry self add poetry-plugin-shell
|
107 |
+
poetry self add poetry-plugin-export
|
108 |
+
|
109 |
+
# create and activate a python virtual environment
|
110 |
+
poetry shell
|
111 |
+
poetry install
|
112 |
+
|
113 |
+
# run the demo locally (for development with automatic reload)
|
114 |
+
gradio src/app.py
|
115 |
+
```
|
116 |
+
|
117 |
+
## 📝 Documenting your demo
|
118 |
+
|
119 |
+
If you would like to write some information/documentation about your demo that is intended for developers or other people that might want to run the demo from scratch, please use the [README.md](README.md) file, leaving the Hugging Face Spaces configuration header in place at the top of the file.
|
120 |
+
|
121 |
+
### Hugging face spaces configuration settings
|
122 |
+
|
123 |
+
Hugging Face allow the configuration of spaces demonstrations via the [README.md](README.md) file in the root of the project. There is a [Spaces Configuration Reference](https://huggingface.co/docs/hub/en/spaces-config-reference) guide that you can use to gain an understanding of the configuration options that can be specified here.
|
124 |
+
|
125 |
+
The template has a set of initial defaults, similar to these:
|
126 |
+
|
127 |
+
```
|
128 |
+
---
|
129 |
+
title: Granite 3.0 Chat
|
130 |
+
colorFrom: blue
|
131 |
+
colorTo: indigo
|
132 |
+
sdk: gradio
|
133 |
+
sdk_version: 5.9.1
|
134 |
+
app_file: src/app.py
|
135 |
+
pinned: false
|
136 |
+
license: apache-2.0
|
137 |
+
short_description: Chat with IBM Granite 3.0
|
138 |
+
---
|
139 |
+
```
|
140 |
+
|
141 |
+
#### Options
|
142 |
+
|
143 |
+
The default options specified above:
|
144 |
+
|
145 |
+
| Setting | Notes |
|
146 |
+
| ------- | ----- |
|
147 |
+
| title | **Update this**, keep this short (recommend max 24 chars), this information is displayed in the centre of the demo description card |
|
148 |
+
| emoji | Do not update this, our demos will use a consistent emoji character |
|
149 |
+
| colorFrom | Do not update this, used in combination with colorTo to colourize the demo description card |
|
150 |
+
| colorTo | see colorFrom |
|
151 |
+
| sdk | Do not update this, our Gradio demos will always use the "gradio" setting |
|
152 |
+
| sdk_version | Update this if necessary for your demo to function, ideally should be set to the latest gradio version |
|
153 |
+
| app_file | Update this if necessary for your demo to function, should be set to the path of the main entry point to the demo |
|
154 |
+
| license | Do not update this, our demos are to always be apache-2.0 licensed |
|
155 |
+
| short_description | **Update this**, should be set to a few words that describe the demo in a little more detail than the title, this information is displayed in the bottom-right of the demo description card |
|
156 |
+
|
157 |
+
Other available options:
|
158 |
+
|
159 |
+
| Setting | Notes |
|
160 |
+
| ------- | ----- |
|
161 |
+
| python_version | You may optionally set this, best advice is to use the default Python version if possible (current default is Python 3.10) |
|
162 |
+
| suggested_hardware | Do not use this, unlikely to be required as demos run on ZeroGPU |
|
163 |
+
| suggested_storage | Do not use this, our demos do not require storage |
|
164 |
+
| app_port | Do not use this, not relevant for gradio demos |
|
165 |
+
| base_path | Do not use this, use the app_file setting |
|
166 |
+
| fullWidth | Do not use this, our demos will use a consistent default width |
|
167 |
+
| header | Do not use this, our demos will use a consistent header |
|
168 |
+
| models | Do not use this, let their parsing discover these from our code |
|
169 |
+
| datasets | Do not use this, let their parsing discover these from our code |
|
170 |
+
| tags | Do not use this, we are not tagging our demos |
|
171 |
+
| thumbnail | Do not use this, provides a thumbnail for social sharing of demos |
|
172 |
+
| pinned | Do not use this, the RDTE team will change this setting if it's deemed necessary |
|
173 |
+
| hf_oauth | Do not use this, we are not using OAuth |
|
174 |
+
| hf_oauth_scopes | Do not use this, we are not using OAuth |
|
175 |
+
| hf_oauth_expiration_minutes | Do not use this, we are not using OAuth |
|
176 |
+
| disable_embedding | Do not use this, leave at the default that allows embedding to take place |
|
177 |
+
| startup_duration_timeout | Do not use this, leave at the default 30 minutes |
|
178 |
+
| custom_headers | Do not use this, we do not need to add any custom HTTP headers |
|
179 |
+
| preload_from_hub | Do not use this, specifying this builds the models and data sets into the container image with the goal of making start up times faster due to not needing to download them each time. However, RDTE testing indicates this setting significantly increases the start up time for our relatively small Granite models |
|
README.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Multimodal RAG with Granite Vision
|
3 |
+
short_description: RAG example using Granite [vision, embedding, instruct]
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.16.1
|
8 |
+
app_file: src/rag_app.py
|
9 |
+
pinned: False
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
# Granite Vision Demos
|
14 |
+
|
15 |
+
## Install
|
16 |
+
|
17 |
+
(copied from DEVELOP.md)
|
18 |
+
```shell
|
19 |
+
# add the poetry shell and export plugins (you only need to do this once on your machine)
|
20 |
+
poetry self add poetry-plugin-shell
|
21 |
+
poetry self add poetry-plugin-export
|
22 |
+
|
23 |
+
# create and activate a python virtual environment
|
24 |
+
poetry shell
|
25 |
+
poetry install
|
26 |
+
```
|
27 |
+
|
28 |
+
|
29 |
+
## Run Demos
|
30 |
+
|
31 |
+
### RAG
|
32 |
+
Run with lazy loading models:
|
33 |
+
```
|
34 |
+
LAZY_LOADING=true gradio src/rag_app.py
|
35 |
+
```
|
36 |
+
|
37 |
+
### QA
|
38 |
+
|
39 |
+
```
|
40 |
+
gradio src/qa_app.py
|
41 |
+
```
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_20_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_3_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_1.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_7_2.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_1.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_2.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/ibm_report_2007/images/IBM_Annual_Report_2007_0-20_im_image_8_3.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_16_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_16_1.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_16_2.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_16_3.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_17_0.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_17_1.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/images/wihlborgs/images/image_17_2.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/milvus/.milvus.db.lock
ADDED
File without changes
|
data/final_v2_mar04/milvus/milvus.db
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e08ba08b5adbee90a02d7e02961d4b9f124cc5cc0281eaa93e594f3f033391f9
|
3 |
+
size 475136
|
data/final_v2_mar04/pdfs/IBM_Annual_Report_2007_3-20.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:496db869d12b0a31f00c872c1d9f6840500a6677221c51a362e9b1943062e7f9
|
3 |
+
size 988484
|
data/final_v2_mar04/pdfs/wihlborgs-2-13_16-18.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:487233ff87278df209c8bb4400048f43ba85ab10ccabd75905c362e55b797ddf
|
3 |
+
size 800249
|
data/final_v2_mar04/preview/IBM-Z.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/preview/IBM-financial-2010.png
ADDED
![]() |
Git LFS Details
|
data/final_v2_mar04/preview/Wilhlborg-financial.png
ADDED
![]() |
Git LFS Details
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "granite-vision-demos"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "A collection of gradio demos for granite vision models."
|
5 |
+
authors = ["Hendrik Strobelt <hendrik.strobelt@ibm.com>"]
|
6 |
+
license = "Apache-2.0"
|
7 |
+
readme = "README.md"
|
8 |
+
package-mode = false
|
9 |
+
|
10 |
+
[tool.poetry.dependencies]
|
11 |
+
python = ">=3.10,<3.11"
|
12 |
+
gradio = "5.16.1"
|
13 |
+
torch = "2.4.0"
|
14 |
+
spaces = "0.30.4"
|
15 |
+
transformers = { git = "https://github.com/huggingface/transformers" }
|
16 |
+
accelerate = "^1.2.1"
|
17 |
+
langchain-milvus = "^0.1.8"
|
18 |
+
langchain-core = "^0.3.36"
|
19 |
+
langchain-huggingface = "^0.1.2"
|
20 |
+
gradio-pdf = "^0.0.22"
|
21 |
+
|
22 |
+
|
23 |
+
[tool.poetry.group.dev.dependencies]
|
24 |
+
pre-commit = "^4.0.1"
|
25 |
+
git-lint = "^0.1.2"
|
26 |
+
#ruff = "^0.8.3"
|
27 |
+
ruff = "^0.9.3"
|
28 |
+
pytest = "^8.3.4"
|
29 |
+
|
30 |
+
|
31 |
+
[build-system]
|
32 |
+
requires = ["poetry-core"]
|
33 |
+
build-backend = "poetry.core.masonry.api"
|
34 |
+
|
35 |
+
|
36 |
+
[tool.ruff]
|
37 |
+
select = [
|
38 |
+
"E", # pycodestyle
|
39 |
+
"F", # pyflakes
|
40 |
+
"UP", # pyupgrade
|
41 |
+
"D", # pydocstyle
|
42 |
+
"I", # isort
|
43 |
+
"B", # bugbear
|
44 |
+
"ANN", # annotations
|
45 |
+
"N", # pep8-naming
|
46 |
+
"C4", # Comprehensions
|
47 |
+
"DTZ", # DatetimeZ
|
48 |
+
"Q", # Quotes
|
49 |
+
"SIM", # Simplify
|
50 |
+
"RUF", # Ruff
|
51 |
+
]
|
52 |
+
ignore = ["D203", "D213"]
|
53 |
+
fixable = ["ALL"]
|
54 |
+
unfixable = []
|
55 |
+
line-length = 120
|
56 |
+
|
57 |
+
|
58 |
+
[tool.black]
|
59 |
+
line-length = 120
|
60 |
+
|
61 |
+
[tool.ruff.lint.pydocstyle]
|
62 |
+
convention = "google"
|
requirements.txt
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==1.2.1 ; python_version >= "3.10" and python_version < "3.11"
|
2 |
+
aiofiles==23.2.1 ; python_version >= "3.10" and python_version < "3.11"
|
3 |
+
annotated-types==0.7.0 ; python_version >= "3.10" and python_version < "3.11"
|
4 |
+
anyio==4.8.0 ; python_version >= "3.10" and python_version < "3.11"
|
5 |
+
certifi==2024.12.14 ; python_version >= "3.10" and python_version < "3.11"
|
6 |
+
cffi==1.17.1 ; python_version >= "3.10" and python_version < "3.11" and platform_python_implementation == "PyPy"
|
7 |
+
charset-normalizer==3.4.1 ; python_version >= "3.10" and python_version < "3.11"
|
8 |
+
click==8.1.8 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
9 |
+
colorama==0.4.6 ; python_version >= "3.10" and python_version < "3.11" and platform_system == "Windows"
|
10 |
+
exceptiongroup==1.2.2 ; python_version >= "3.10" and python_version < "3.11"
|
11 |
+
fastapi==0.115.6 ; python_version >= "3.10" and python_version < "3.11"
|
12 |
+
ffmpy==0.5.0 ; python_version >= "3.10" and python_version < "3.11"
|
13 |
+
filelock==3.16.1 ; python_version >= "3.10" and python_version < "3.11"
|
14 |
+
fsspec==2024.12.0 ; python_version >= "3.10" and python_version < "3.11"
|
15 |
+
gradio-client==1.7.0 ; python_version >= "3.10" and python_version < "3.11"
|
16 |
+
gradio-pdf==0.0.22 ; python_version >= "3.10" and python_version < "3.11"
|
17 |
+
gradio==5.16.1 ; python_version >= "3.10" and python_version < "3.11"
|
18 |
+
grpcio==1.67.1 ; python_version >= "3.10" and python_version < "3.11"
|
19 |
+
h11==0.14.0 ; python_version >= "3.10" and python_version < "3.11"
|
20 |
+
httpcore==1.0.7 ; python_version >= "3.10" and python_version < "3.11"
|
21 |
+
httpx==0.28.1 ; python_version >= "3.10" and python_version < "3.11"
|
22 |
+
huggingface-hub==0.28.1 ; python_version >= "3.10" and python_version < "3.11"
|
23 |
+
idna==3.10 ; python_version >= "3.10" and python_version < "3.11"
|
24 |
+
jinja2==3.1.5 ; python_version >= "3.10" and python_version < "3.11"
|
25 |
+
joblib==1.4.2 ; python_version >= "3.10" and python_version < "3.11"
|
26 |
+
jsonpatch==1.33 ; python_version >= "3.10" and python_version < "3.11"
|
27 |
+
jsonpointer==3.0.0 ; python_version >= "3.10" and python_version < "3.11"
|
28 |
+
langchain-core==0.3.36 ; python_version >= "3.10" and python_version < "3.11"
|
29 |
+
langchain-huggingface==0.1.2 ; python_version >= "3.10" and python_version < "3.11"
|
30 |
+
langchain-milvus==0.1.8 ; python_version >= "3.10" and python_version < "3.11"
|
31 |
+
langsmith==0.3.8 ; python_version >= "3.10" and python_version < "3.11"
|
32 |
+
markdown-it-py==3.0.0 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
33 |
+
markupsafe==2.1.5 ; python_version >= "3.10" and python_version < "3.11"
|
34 |
+
mdurl==0.1.2 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
35 |
+
milvus-lite==2.4.11 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "win32"
|
36 |
+
mpmath==1.3.0 ; python_version >= "3.10" and python_version < "3.11"
|
37 |
+
networkx==3.4.2 ; python_version >= "3.10" and python_version < "3.11"
|
38 |
+
numpy==2.2.1 ; python_version >= "3.10" and python_version < "3.11"
|
39 |
+
nvidia-cublas-cu12==12.1.3.1 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
40 |
+
nvidia-cuda-cupti-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
41 |
+
nvidia-cuda-nvrtc-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
42 |
+
nvidia-cuda-runtime-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
43 |
+
nvidia-cudnn-cu12==9.1.0.70 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
44 |
+
nvidia-cufft-cu12==11.0.2.54 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
45 |
+
nvidia-curand-cu12==10.3.2.106 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
46 |
+
nvidia-cusolver-cu12==11.4.5.107 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
47 |
+
nvidia-cusparse-cu12==12.1.0.106 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
48 |
+
nvidia-nccl-cu12==2.20.5 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
49 |
+
nvidia-nvjitlink-cu12==12.6.85 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
50 |
+
nvidia-nvtx-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "3.11"
|
51 |
+
orjson==3.10.13 ; python_version >= "3.10" and python_version < "3.11"
|
52 |
+
packaging==24.2 ; python_version >= "3.10" and python_version < "3.11"
|
53 |
+
pandas==2.2.3 ; python_version >= "3.10" and python_version < "3.11"
|
54 |
+
pillow==11.1.0 ; python_version >= "3.10" and python_version < "3.11"
|
55 |
+
protobuf==5.29.3 ; python_version >= "3.10" and python_version < "3.11"
|
56 |
+
psutil==5.9.8 ; python_version >= "3.10" and python_version < "3.11"
|
57 |
+
pycparser==2.22 ; python_version >= "3.10" and python_version < "3.11" and platform_python_implementation == "PyPy"
|
58 |
+
pydantic-core==2.27.2 ; python_version >= "3.10" and python_version < "3.11"
|
59 |
+
pydantic==2.10.4 ; python_version >= "3.10" and python_version < "3.11"
|
60 |
+
pydub==0.25.1 ; python_version >= "3.10" and python_version < "3.11"
|
61 |
+
pygments==2.19.1 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
62 |
+
pymilvus==2.5.4 ; python_version >= "3.10" and python_version < "3.11"
|
63 |
+
python-dateutil==2.9.0.post0 ; python_version >= "3.10" and python_version < "3.11"
|
64 |
+
python-dotenv==1.0.1 ; python_version >= "3.10" and python_version < "3.11"
|
65 |
+
python-multipart==0.0.20 ; python_version >= "3.10" and python_version < "3.11"
|
66 |
+
pytz==2024.2 ; python_version >= "3.10" and python_version < "3.11"
|
67 |
+
pyyaml==6.0.2 ; python_version >= "3.10" and python_version < "3.11"
|
68 |
+
regex==2024.11.6 ; python_version >= "3.10" and python_version < "3.11"
|
69 |
+
requests-toolbelt==1.0.0 ; python_version >= "3.10" and python_version < "3.11"
|
70 |
+
requests==2.32.3 ; python_version >= "3.10" and python_version < "3.11"
|
71 |
+
rich==13.9.4 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
72 |
+
ruff==0.9.6 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
73 |
+
safehttpx==0.1.6 ; python_version >= "3.10" and python_version < "3.11"
|
74 |
+
safetensors==0.5.1 ; python_version >= "3.10" and python_version < "3.11"
|
75 |
+
scikit-learn==1.6.1 ; python_version >= "3.10" and python_version < "3.11"
|
76 |
+
scipy==1.15.2 ; python_version >= "3.10" and python_version < "3.11"
|
77 |
+
semantic-version==2.10.0 ; python_version >= "3.10" and python_version < "3.11"
|
78 |
+
sentence-transformers==3.4.1 ; python_version >= "3.10" and python_version < "3.11"
|
79 |
+
setuptools==75.8.0 ; python_version >= "3.10" and python_version < "3.11"
|
80 |
+
shellingham==1.5.4 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
81 |
+
six==1.17.0 ; python_version >= "3.10" and python_version < "3.11"
|
82 |
+
sniffio==1.3.1 ; python_version >= "3.10" and python_version < "3.11"
|
83 |
+
spaces==0.30.4 ; python_version >= "3.10" and python_version < "3.11"
|
84 |
+
starlette==0.41.3 ; python_version >= "3.10" and python_version < "3.11"
|
85 |
+
sympy==1.13.3 ; python_version >= "3.10" and python_version < "3.11"
|
86 |
+
tenacity==9.0.0 ; python_version >= "3.10" and python_version < "3.11"
|
87 |
+
threadpoolctl==3.5.0 ; python_version >= "3.10" and python_version < "3.11"
|
88 |
+
tokenizers==0.21.0 ; python_version >= "3.10" and python_version < "3.11"
|
89 |
+
tomlkit==0.13.2 ; python_version >= "3.10" and python_version < "3.11"
|
90 |
+
torch==2.4.0 ; python_version >= "3.10" and python_version < "3.11"
|
91 |
+
tqdm==4.67.1 ; python_version >= "3.10" and python_version < "3.11"
|
92 |
+
transformers @ git+https://github.com/huggingface/transformers@dd16acb8a3e93b643aa374c9fb80749f5235c1a6 ; python_version >= "3.10" and python_version < "3.11"
|
93 |
+
triton==3.0.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.11" and python_version >= "3.10"
|
94 |
+
typer==0.15.1 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
95 |
+
typing-extensions==4.12.2 ; python_version >= "3.10" and python_version < "3.11"
|
96 |
+
tzdata==2024.2 ; python_version >= "3.10" and python_version < "3.11"
|
97 |
+
ujson==5.10.0 ; python_version >= "3.10" and python_version < "3.11"
|
98 |
+
urllib3==2.3.0 ; python_version >= "3.10" and python_version < "3.11"
|
99 |
+
uvicorn==0.34.0 ; python_version >= "3.10" and python_version < "3.11" and sys_platform != "emscripten"
|
100 |
+
websockets==14.1 ; python_version >= "3.10" and python_version < "3.11"
|
101 |
+
zstandard==0.23.0 ; python_version >= "3.10" and python_version < "3.11"
|
src/README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Granite Vision Demos
|
2 |
+
|
3 |
+
### RAG
|
4 |
+
Run with lazy loading models:
|
5 |
+
```
|
6 |
+
LAZY_LOADING=true gradio src/rag_app.py
|
7 |
+
```
|
8 |
+
|
9 |
+
## QA
|
10 |
+
|
11 |
+
```
|
12 |
+
gradio src/qa_app.py
|
13 |
+
```
|
14 |
+
|
src/app.css
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
footer {
|
2 |
+
display: none !important;
|
3 |
+
}
|
4 |
+
.pdf_viewer .top-panel{
|
5 |
+
display: none !important;
|
6 |
+
}
|
7 |
+
.chatbot_view .top-panel {
|
8 |
+
display: none !important;
|
9 |
+
}
|
10 |
+
|
11 |
+
.jumping-dots span {
|
12 |
+
position: relative;
|
13 |
+
bottom: 0px;
|
14 |
+
animation: jump 2s infinite;
|
15 |
+
}
|
16 |
+
.jumping-dots .dot-1{
|
17 |
+
animation-delay: 200ms;
|
18 |
+
}
|
19 |
+
.jumping-dots .dot-2{
|
20 |
+
animation-delay: 400ms;
|
21 |
+
}
|
22 |
+
.jumping-dots .dot-3{
|
23 |
+
animation-delay: 600ms;
|
24 |
+
}
|
25 |
+
|
26 |
+
@keyframes jump {
|
27 |
+
0% {bottom: 0px;}
|
28 |
+
20% {bottom: 5px;}
|
29 |
+
40% {bottom: 0px;}
|
30 |
+
}
|
31 |
+
|
32 |
+
.inactive_div {
|
33 |
+
pointer-events: none;
|
34 |
+
opacity: .5;
|
35 |
+
}
|
36 |
+
|
37 |
+
/*.gallery-container .grid-container {*/
|
38 |
+
/* width: 50px;*/
|
39 |
+
/*}*/
|
src/app_head.html
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script
|
2 |
+
async
|
3 |
+
src="https://www.googletagmanager.com/gtag/js?id=G-C6LFT227RC"
|
4 |
+
></script>
|
5 |
+
<script>
|
6 |
+
window.dataLayer = window.dataLayer || [];
|
7 |
+
function gtag() {
|
8 |
+
dataLayer.push(arguments);
|
9 |
+
}
|
10 |
+
gtag("js", new Date());
|
11 |
+
gtag("config", "G-C6LFT227RC");
|
12 |
+
</script>
|
src/qa_app.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Template Demo for IBM Granite Hugging Face spaces."""
|
2 |
+
|
3 |
+
from collections.abc import Iterator
|
4 |
+
from datetime import datetime
|
5 |
+
from pathlib import Path
|
6 |
+
from threading import Thread
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
import PIL
|
10 |
+
import spaces
|
11 |
+
import torch
|
12 |
+
from PIL.Image import Image as PILImage
|
13 |
+
from PIL.Image import Resampling
|
14 |
+
from transformers import (
|
15 |
+
AutoModelForCausalLM,
|
16 |
+
AutoModelForVision2Seq,
|
17 |
+
AutoProcessor,
|
18 |
+
AutoTokenizer,
|
19 |
+
LlavaNextForConditionalGeneration,
|
20 |
+
LlavaNextProcessor,
|
21 |
+
TextIteratorStreamer,
|
22 |
+
)
|
23 |
+
|
24 |
+
from themes.research_monochrome import theme
|
25 |
+
|
26 |
+
dir_ = Path(__file__).parent.parent
|
27 |
+
today_date = datetime.today().strftime("%B %-d, %Y") # noqa: DTZ002
|
28 |
+
|
29 |
+
MODEL_ID = "ibm-granite/granite-vision-3.2-2b"
|
30 |
+
MODEL_ID_PREVIEW = "ibm-granite/granite-vision-3.1-2b-preview"
|
31 |
+
# SYS_PROMPT = f"""Knowledge Cutoff Date: April 2024.
|
32 |
+
# Today's Date: {today_date}.
|
33 |
+
# You are Granite, developed by IBM. You are a helpful AI assistant"""
|
34 |
+
TITLE = "IBM Granite VISION 3.1 2b preview"
|
35 |
+
|
36 |
+
DESCRIPTION = "Try one of the sample prompts below or write your own. Remember, \
|
37 |
+
AI models can make mistakes."
|
38 |
+
MAX_INPUT_TOKEN_LENGTH = 4096
|
39 |
+
MAX_NEW_TOKENS = 1024
|
40 |
+
TEMPERATURE = 0.7
|
41 |
+
TOP_P = 0.85
|
42 |
+
TOP_K = 50
|
43 |
+
REPETITION_PENALTY = 1.05
|
44 |
+
|
45 |
+
sample_data = [
|
46 |
+
[
|
47 |
+
"https://www.ibm.com/design/language/static/159e89b3d8d6efcb5db43f543df36b23/a5df1/rebusgallery_tshirt.png",
|
48 |
+
["What are the three symbols on the tshirt?"],
|
49 |
+
],
|
50 |
+
[
|
51 |
+
str(dir_ / "data" / "p2-report.png"),
|
52 |
+
[
|
53 |
+
"What's the difference in rental income between 2020 and 2019?",
|
54 |
+
"Which table entries are less in 2020 than 2019?",
|
55 |
+
],
|
56 |
+
],
|
57 |
+
[
|
58 |
+
"https://www.ibm.com/design/language/static/159e89b3d8d6efcb5db43f543df36b23/a5df1/rebusgallery_tshirt.png",
|
59 |
+
["What's this?"],
|
60 |
+
],
|
61 |
+
]
|
62 |
+
|
63 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu")
|
64 |
+
|
65 |
+
processor: LlavaNextProcessor = None
|
66 |
+
model: LlavaNextForConditionalGeneration = None
|
67 |
+
|
68 |
+
selected_image: PILImage = None
|
69 |
+
|
70 |
+
|
71 |
+
def image_changed(im: PILImage):
|
72 |
+
global selected_image
|
73 |
+
if im is None:
|
74 |
+
selected_image = None
|
75 |
+
else:
|
76 |
+
selected_image = im.copy()
|
77 |
+
selected_image.thumbnail((800, 800))
|
78 |
+
# return selected_image
|
79 |
+
|
80 |
+
|
81 |
+
def create_single_turn(image: PILImage, text: str) -> dict:
|
82 |
+
if image is None:
|
83 |
+
return {
|
84 |
+
"role": "user",
|
85 |
+
"content": [
|
86 |
+
{"type": "text", "text": text},
|
87 |
+
],
|
88 |
+
}
|
89 |
+
else:
|
90 |
+
return {
|
91 |
+
"role": "user",
|
92 |
+
"content": [
|
93 |
+
{"type": "image", "image": image},
|
94 |
+
{"type": "text", "text": text},
|
95 |
+
],
|
96 |
+
}
|
97 |
+
|
98 |
+
|
99 |
+
@spaces.GPU
|
100 |
+
def generate(
|
101 |
+
image: PILImage,
|
102 |
+
message: str,
|
103 |
+
chat_history: list[dict],
|
104 |
+
temperature: float = TEMPERATURE,
|
105 |
+
repetition_penalty: float = REPETITION_PENALTY,
|
106 |
+
top_p: float = TOP_P,
|
107 |
+
top_k: float = TOP_K,
|
108 |
+
max_new_tokens: int = MAX_NEW_TOKENS,
|
109 |
+
):
|
110 |
+
"""Generate function for chat demo.
|
111 |
+
|
112 |
+
Args:
|
113 |
+
max_new_tokens:
|
114 |
+
top_k:
|
115 |
+
top_p:
|
116 |
+
repetition_penalty:
|
117 |
+
temperature:
|
118 |
+
image: the image to be talked about...
|
119 |
+
message (str): The latest input message from the user.
|
120 |
+
chat_history (list[dict]): A list of dictionaries representing previous chat history, where each dictionary
|
121 |
+
contains 'role' and 'content'.
|
122 |
+
|
123 |
+
Yields:
|
124 |
+
str: The generated response, broken down into smaller chunks.
|
125 |
+
"""
|
126 |
+
|
127 |
+
print(top_p)
|
128 |
+
# Build messages
|
129 |
+
conversation = []
|
130 |
+
# TODO: maybe add back custom sys prompt
|
131 |
+
# conversation.append({"role": "system", "content": SYS_PROMPT})
|
132 |
+
conversation += chat_history
|
133 |
+
conversation.append(create_single_turn(image, message))
|
134 |
+
|
135 |
+
# Convert messages to prompt format
|
136 |
+
inputs = processor.apply_chat_template(
|
137 |
+
conversation, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
|
138 |
+
).to(device)
|
139 |
+
|
140 |
+
# TODO: This might cut out the image tokens -- find better strategy
|
141 |
+
# if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
142 |
+
# input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
143 |
+
# gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
144 |
+
|
145 |
+
generate_kwargs = dict(
|
146 |
+
max_new_tokens=max_new_tokens,
|
147 |
+
do_sample=True,
|
148 |
+
top_p=top_p,
|
149 |
+
top_k=top_k,
|
150 |
+
temperature=temperature,
|
151 |
+
num_beams=1,
|
152 |
+
repetition_penalty=repetition_penalty,
|
153 |
+
)
|
154 |
+
output = model.generate(**inputs, **generate_kwargs)
|
155 |
+
out = processor.decode(output[0], skip_special_tokens=True)
|
156 |
+
out_s = out.strip().split("<|assistant|>")
|
157 |
+
return [gr.ChatMessage(role="user", content=message), gr.ChatMessage(role="assistant", content=out_s[-1])]
|
158 |
+
|
159 |
+
|
160 |
+
def multimodal_generate_v2(
|
161 |
+
msg: str,
|
162 |
+
temperature: float = TEMPERATURE,
|
163 |
+
repetition_penalty: float = REPETITION_PENALTY,
|
164 |
+
top_p: float = TOP_P,
|
165 |
+
top_k: float = TOP_K,
|
166 |
+
max_new_tokens: int = MAX_NEW_TOKENS,
|
167 |
+
):
|
168 |
+
global model, processor
|
169 |
+
|
170 |
+
# lazy loading and adding image
|
171 |
+
if model is None:
|
172 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
173 |
+
model = AutoModelForVision2Seq.from_pretrained(MODEL_ID, device_map="auto").to(device)
|
174 |
+
|
175 |
+
return generate(
|
176 |
+
selected_image,
|
177 |
+
msg,
|
178 |
+
[],
|
179 |
+
temperature=temperature,
|
180 |
+
repetition_penalty=repetition_penalty,
|
181 |
+
top_p=top_p,
|
182 |
+
top_k=top_k,
|
183 |
+
max_new_tokens=max_new_tokens,
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
tb = gr.Textbox(submit_btn=True)
|
188 |
+
# advanced settings (displayed in Accordion)
|
189 |
+
temperature_slider = gr.Slider(
|
190 |
+
minimum=0,
|
191 |
+
maximum=1.0,
|
192 |
+
value=TEMPERATURE,
|
193 |
+
step=0.1,
|
194 |
+
label="Temperature",
|
195 |
+
elem_classes=["gr_accordion_element"],
|
196 |
+
interactive=True,
|
197 |
+
)
|
198 |
+
top_p_slider = gr.Slider(
|
199 |
+
minimum=0,
|
200 |
+
maximum=1.0,
|
201 |
+
value=TOP_P,
|
202 |
+
step=0.05,
|
203 |
+
label="Top P",
|
204 |
+
elem_classes=["gr_accordion_element"],
|
205 |
+
interactive=True,
|
206 |
+
)
|
207 |
+
top_k_slider = gr.Slider(
|
208 |
+
minimum=0, maximum=100, value=TOP_K, step=1, label="Top K", elem_classes=["gr_accordion_element"], interactive=True
|
209 |
+
)
|
210 |
+
repetition_penalty_slider = gr.Slider(
|
211 |
+
minimum=0,
|
212 |
+
maximum=2.0,
|
213 |
+
value=REPETITION_PENALTY,
|
214 |
+
step=0.05,
|
215 |
+
label="Repetition Penalty",
|
216 |
+
elem_classes=["gr_accordion_element"],
|
217 |
+
interactive=True,
|
218 |
+
)
|
219 |
+
max_new_tokens_slider = gr.Slider(
|
220 |
+
minimum=1,
|
221 |
+
maximum=2000,
|
222 |
+
value=MAX_NEW_TOKENS,
|
223 |
+
step=1,
|
224 |
+
label="Max New Tokens",
|
225 |
+
elem_classes=["gr_accordion_element"],
|
226 |
+
interactive=True,
|
227 |
+
)
|
228 |
+
|
229 |
+
chatbot = gr.Chatbot(examples=[{"text": "Hello World!"}], type="messages", label="Q&A about selected document")
|
230 |
+
|
231 |
+
css_file_path = Path(Path(__file__).parent / "app.css")
|
232 |
+
head_file_path = Path(Path(__file__).parent / "app_head.html")
|
233 |
+
|
234 |
+
with gr.Blocks(fill_height=True, css_paths=css_file_path, head_paths=head_file_path, theme=theme, title=TITLE) as demo:
|
235 |
+
is_in_edit_mode = gr.State(True) # in block to be reactive
|
236 |
+
|
237 |
+
gr.Markdown(f"# {TITLE}")
|
238 |
+
gr.Markdown(DESCRIPTION)
|
239 |
+
with gr.Row():
|
240 |
+
with gr.Column():
|
241 |
+
# create sample image object for reference, render later
|
242 |
+
image_x = gr.Image(
|
243 |
+
type="pil",
|
244 |
+
label="Example image",
|
245 |
+
render=False,
|
246 |
+
interactive=False,
|
247 |
+
show_label=False,
|
248 |
+
show_fullscreen_button=False,
|
249 |
+
height=800,
|
250 |
+
)
|
251 |
+
image_x.change(fn=image_changed, inputs=image_x)
|
252 |
+
|
253 |
+
# Create Dataset object and render it
|
254 |
+
ds = gr.Dataset(label="Select one document", samples=sample_data, components=[gr.Image(render=False)])
|
255 |
+
|
256 |
+
def sample_image_selected(d: gr.SelectData, dx):
|
257 |
+
return gr.Image(dx[0]), gr.update(examples=[{"text": x} for x in dx[1]])
|
258 |
+
|
259 |
+
ds.select(lambda: [], outputs=[chatbot])
|
260 |
+
ds.select(sample_image_selected, inputs=[ds], outputs=[image_x, chatbot])
|
261 |
+
|
262 |
+
# Render image object after DS
|
263 |
+
image_x.render()
|
264 |
+
with gr.Column():
|
265 |
+
# Render ChatBot
|
266 |
+
chatbot.render()
|
267 |
+
|
268 |
+
# Define behavior for example selection
|
269 |
+
def update_user_chat_x(x: gr.SelectData):
|
270 |
+
return [gr.ChatMessage(role="user", content=x.value["text"])]
|
271 |
+
|
272 |
+
def send_generate_x(x: gr.SelectData, temperature, repetition_penalty, top_p, top_k, max_new_tokens):
|
273 |
+
txt = x.value["text"]
|
274 |
+
return multimodal_generate_v2(txt, temperature, repetition_penalty, top_p, top_k, max_new_tokens)
|
275 |
+
|
276 |
+
chatbot.example_select(lambda: False, outputs=is_in_edit_mode)
|
277 |
+
chatbot.example_select(update_user_chat_x, outputs=[chatbot])
|
278 |
+
chatbot.example_select(
|
279 |
+
send_generate_x,
|
280 |
+
inputs=[
|
281 |
+
temperature_slider,
|
282 |
+
repetition_penalty_slider,
|
283 |
+
top_p_slider,
|
284 |
+
top_k_slider,
|
285 |
+
max_new_tokens_slider,
|
286 |
+
],
|
287 |
+
outputs=[chatbot],
|
288 |
+
)
|
289 |
+
|
290 |
+
# Create User Chat Textbox and Reset Button
|
291 |
+
tbb = gr.Textbox(submit_btn=True, show_label=False)
|
292 |
+
fb = gr.Button("Reset Chat", visible=False)
|
293 |
+
fb.click(lambda: [], outputs=[chatbot])
|
294 |
+
|
295 |
+
# Handle toggling betwwen edit and non-edit mode
|
296 |
+
def textbox_switch(emode):
|
297 |
+
# if t.visible:
|
298 |
+
if not emode:
|
299 |
+
return [gr.update(visible=False), gr.update(visible=True)]
|
300 |
+
else:
|
301 |
+
return [gr.update(visible=True), gr.update(visible=False)]
|
302 |
+
|
303 |
+
tbb.submit(lambda: False, outputs=[is_in_edit_mode])
|
304 |
+
fb.click(lambda: True, outputs=[is_in_edit_mode])
|
305 |
+
is_in_edit_mode.change(textbox_switch, inputs=[is_in_edit_mode], outputs=[tbb, fb])
|
306 |
+
|
307 |
+
# submit user question
|
308 |
+
tbb.submit(lambda x: [gr.ChatMessage(role="user", content=x)], inputs=tbb, outputs=chatbot)
|
309 |
+
tbb.submit(
|
310 |
+
multimodal_generate_v2,
|
311 |
+
inputs=[
|
312 |
+
tbb,
|
313 |
+
temperature_slider,
|
314 |
+
repetition_penalty_slider,
|
315 |
+
top_p_slider,
|
316 |
+
top_k_slider,
|
317 |
+
max_new_tokens_slider,
|
318 |
+
],
|
319 |
+
outputs=[chatbot],
|
320 |
+
)
|
321 |
+
|
322 |
+
# extra model parameters
|
323 |
+
with gr.Accordion("Advanced Settings", open=False):
|
324 |
+
max_new_tokens_slider.render()
|
325 |
+
temperature_slider.render()
|
326 |
+
top_k_slider.render()
|
327 |
+
top_p_slider.render()
|
328 |
+
repetition_penalty_slider.render()
|
329 |
+
|
330 |
+
if __name__ == "__main__":
|
331 |
+
demo.queue(max_size=20).launch()
|
src/rag_app.py
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Template Demo for IBM Granite Hugging Face spaces."""
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from pathlib import Path
|
5 |
+
import re
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import spaces
|
9 |
+
import torch
|
10 |
+
from gradio_pdf import PDF
|
11 |
+
|
12 |
+
from sandbox.light_rag.light_rag import LightRAG
|
13 |
+
from themes.research_monochrome import theme
|
14 |
+
|
15 |
+
dir_ = Path(__file__).parent.parent
|
16 |
+
|
17 |
+
TITLE = "Multimodal RAG with Granite Vision 3.2"
|
18 |
+
|
19 |
+
DESCRIPTION = """
|
20 |
+
<p>This experimental demo highlights granite-vision-3.2-2b capabilities within a multimodal retrieval-augmented generation (RAG) pipeline, demonstrating Granite's document understanding in real-world applications. Explore the sample document excerpts and try the sample prompts or enter your own. Keep in mind that AI can occasionally make mistakes.
|
21 |
+
<span class="gr_docs_link">
|
22 |
+
<a href="https://www.ibm.com/granite/docs/models/vision/">View Documentation <i class="fa fa-external-link"></i></a>
|
23 |
+
</span>
|
24 |
+
</p>
|
25 |
+
"""
|
26 |
+
|
27 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu")
|
28 |
+
|
29 |
+
BASE_PATH = dir_ / "data" / "final_v2_mar04"
|
30 |
+
PDFS_PATH = BASE_PATH / "pdfs"
|
31 |
+
MILVUS_PATH = BASE_PATH / "milvus"
|
32 |
+
IMAGES_PATH = BASE_PATH / "images"
|
33 |
+
PREVIEWS_PATH = BASE_PATH / "preview"
|
34 |
+
|
35 |
+
sample_data = [
|
36 |
+
{
|
37 |
+
"preview_image": str(PREVIEWS_PATH / "IBM-financial-2010.png"),
|
38 |
+
"prompts": """Where geographically was the greatest growth in revenue in 2007?
|
39 |
+
Which year had the highest income in billion?
|
40 |
+
Did the net income decrease in 2007 compared to 2006?
|
41 |
+
Net cash from operations on 2005?
|
42 |
+
What does it mean to be Globally Integrated Enterprise?
|
43 |
+
What are the segments for pretax income?""".split("\n"),
|
44 |
+
"pdf": str(PDFS_PATH / "IBM_Annual_Report_2007_3-20.pdf"),
|
45 |
+
"index": "ibm_report_2007_short_text_milvus_lite_2048_128_slate_278m_cosine",
|
46 |
+
"db": str(MILVUS_PATH / "milvus.db"),
|
47 |
+
"name": "IBM annual report 2007",
|
48 |
+
"origin": "https://www.ibm.com/investor/att/pdf/IBM_Annual_Report_2007.pdf",
|
49 |
+
"image_paths": {"prefix": str(IMAGES_PATH / "ibm_report_2007") + "/", "use_last": 2},
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"preview_image": str(PREVIEWS_PATH / "Wilhlborg-financial.png"),
|
53 |
+
"prompts": """Where does Wihlborgs mainly operate?
|
54 |
+
Which year had the second lowest Equity/assets ratio?
|
55 |
+
Which year had the highest Project investments value?
|
56 |
+
What is the trend of equity/assets ratio?
|
57 |
+
What was the Growth percentage in income from property management in 2020?
|
58 |
+
Has the company’s interest coverage ratio increased or decreased in recent years?""".split("\n")
|
59 |
+
,
|
60 |
+
"pdf": str(PDFS_PATH / "wihlborgs-2-13_16-18.pdf"),
|
61 |
+
"index": "wihlborgs_short_text_milvus_lite_2048_128_slate_278m_cosine",
|
62 |
+
"db": str(MILVUS_PATH / "milvus.db"),
|
63 |
+
"name": "Wihlborgs Report 2020",
|
64 |
+
"origin": "https://www.wihlborgs.se/globalassets/investor-relations/rapporter/2021/20210401-wihlborgs-annual-report-and-sustainability-report-2020-c24a6b51-c124-44fc-a4af-4237a33a29fb.pdf",
|
65 |
+
"image_paths": {"prefix": str(IMAGES_PATH / "wihlborgs") + "/", "use_last": 2},
|
66 |
+
},
|
67 |
+
]
|
68 |
+
|
69 |
+
config = {
|
70 |
+
"embedding_model_id": "ibm-granite/granite-embedding-278m-multilingual",
|
71 |
+
"generation_model_id": "ibm-granite/granite-3.1-8b-instruct",
|
72 |
+
"milvus_collection_name": "granite_vision_tech_report_text_milvus_lite_512_128_slate_125m_cosine",
|
73 |
+
"milvus_db_path": str(dir_ / "data" / MILVUS_PATH / "milvus_text_sample.db"),
|
74 |
+
}
|
75 |
+
|
76 |
+
if gr.NO_RELOAD:
|
77 |
+
light_rag: LightRAG = LightRAG(config)
|
78 |
+
if not os.environ.get("LAZY_LOADING") == "true":
|
79 |
+
for sample in sample_data:
|
80 |
+
light_rag.precache_milvus(sample["index"], sample["db"])
|
81 |
+
|
82 |
+
|
83 |
+
def lower_md_headers(md: str) -> str:
|
84 |
+
return re.sub(r'(?:^|\n)##?\s(.+)', lambda m: '\n### ' + m.group(1), md)
|
85 |
+
|
86 |
+
|
87 |
+
# Parser for retrival results
|
88 |
+
def format_retrieval_result(i, d, cb, selected_sample):
|
89 |
+
image_paths = sample_data[selected_sample]["image_paths"]
|
90 |
+
|
91 |
+
if d.metadata["type"] == "text":
|
92 |
+
context_string = f"---\n## Context {i + 1}\n#### (text extracted from document)\n{lower_md_headers(d.page_content)}\n"
|
93 |
+
cb.append(gr.ChatMessage(role="assistant", content=context_string))
|
94 |
+
return True
|
95 |
+
elif d.metadata["type"] == "image_description":
|
96 |
+
context_string = f"---\n## Context {i + 1}\n#### (image description generated by Granite Vision)"
|
97 |
+
cb.append(gr.ChatMessage(role="assistant", content=context_string))
|
98 |
+
|
99 |
+
# /dccstor/mm-rag/idanfr/granite_vision_demo/wdu_output/IBM_Annual_Report_2007/images/IBM_Annual_Report_2007_im_image_7_1.png
|
100 |
+
image_path_parts = d.metadata["image_fullpath"].split("/")
|
101 |
+
image_path = image_paths["prefix"] + ("/".join(image_path_parts[-image_paths["use_last"]:]))
|
102 |
+
# print(f"image_path: {image_path}")
|
103 |
+
cb.append(gr.ChatMessage(role="assistant", content=gr.Image(image_path)))
|
104 |
+
|
105 |
+
cb.append(gr.ChatMessage(role="assistant", content=f"\n{lower_md_headers(d.metadata['image_description'])}\n"))
|
106 |
+
|
107 |
+
|
108 |
+
chatbot = gr.Chatbot(
|
109 |
+
examples=[{"text": x} for x in sample_data[0]["prompts"]],
|
110 |
+
type="messages",
|
111 |
+
label=f"Q&A about {sample_data[0]['name']}",
|
112 |
+
height=685,
|
113 |
+
group_consecutive_messages=True,
|
114 |
+
autoscroll=False,
|
115 |
+
elem_classes=["chatbot_view"],
|
116 |
+
)
|
117 |
+
|
118 |
+
|
119 |
+
@spaces.GPU()
|
120 |
+
def generate_with_llm(query, context):
|
121 |
+
if os.environ.get("NO_LLM"):
|
122 |
+
time.sleep(2)
|
123 |
+
return "Now answer, just a string", query
|
124 |
+
return light_rag.generate(query=query, context=context)
|
125 |
+
|
126 |
+
|
127 |
+
# TODO: maybe add GPU back ?
|
128 |
+
def retrieval(collection, db, q):
|
129 |
+
return light_rag.search(q, top_n=3, collection=collection, db=db)
|
130 |
+
|
131 |
+
|
132 |
+
# ################
|
133 |
+
# User Interface
|
134 |
+
# ################
|
135 |
+
css_file_path = Path(Path(__file__).parent / "app.css")
|
136 |
+
head_file_path = Path(Path(__file__).parent / "app_head.html")
|
137 |
+
|
138 |
+
with gr.Blocks(fill_height=True, css_paths=css_file_path, head_paths=head_file_path, theme=theme, title=TITLE) as demo:
|
139 |
+
is_in_edit_mode = gr.State(True) # in block to be reactive
|
140 |
+
selected_doc = gr.State(0)
|
141 |
+
current_question = gr.State("")
|
142 |
+
|
143 |
+
gr.Markdown(f"# {TITLE}")
|
144 |
+
gr.Markdown(DESCRIPTION)
|
145 |
+
with gr.Row():
|
146 |
+
# LEFT COLUMN: Sample selection, download, and PDF viewer
|
147 |
+
with gr.Column():
|
148 |
+
# Show preview images
|
149 |
+
images_only = [sd["preview_image"] for sd in sample_data]
|
150 |
+
document_gallery = gr.Gallery(
|
151 |
+
images_only,
|
152 |
+
label="Select a document",
|
153 |
+
rows=1,
|
154 |
+
columns=3,
|
155 |
+
height="125px",
|
156 |
+
# width="125px",
|
157 |
+
allow_preview=False,
|
158 |
+
selected_index=0,
|
159 |
+
elem_classes=["preview_im_element"],
|
160 |
+
)
|
161 |
+
with gr.Group():
|
162 |
+
pdf_display = PDF(
|
163 |
+
sample_data[0]["pdf"],
|
164 |
+
label=f"Preview for {sample_data[0]['name']}",
|
165 |
+
height=460,
|
166 |
+
interactive=False,
|
167 |
+
elem_classes=["pdf_viewer"],
|
168 |
+
)
|
169 |
+
dl_btn = gr.DownloadButton(
|
170 |
+
label=f"Download PDF ({sample_data[0]['name']})", value=sample_data[0]["pdf"], visible=True
|
171 |
+
)
|
172 |
+
|
173 |
+
|
174 |
+
def sample_image_selected(d: gr.SelectData):
|
175 |
+
dx = sample_data[d.index]
|
176 |
+
# print(f"DX:{dx}")
|
177 |
+
return (
|
178 |
+
gr.update(examples=[{"text": x} for x in dx["prompts"]], label=f"Q&A about {dx['name']}"),
|
179 |
+
gr.update(value=dx["pdf"], label=f"Preview for {dx['name']}"),
|
180 |
+
gr.DownloadButton(value=dx["pdf"], label=f"Download PDF ({dx['name']})"),
|
181 |
+
d.index
|
182 |
+
)
|
183 |
+
|
184 |
+
|
185 |
+
document_gallery.select(lambda: [], outputs=[chatbot])
|
186 |
+
document_gallery.select(sample_image_selected, inputs=[],
|
187 |
+
outputs=[chatbot, pdf_display, dl_btn, selected_doc])
|
188 |
+
|
189 |
+
# Right Column: Chat interface
|
190 |
+
with gr.Column():
|
191 |
+
# Render ChatBot
|
192 |
+
chatbot.render()
|
193 |
+
|
194 |
+
|
195 |
+
# Define behavior for example selection
|
196 |
+
def update_user_chat_x(x: gr.SelectData):
|
197 |
+
return [gr.ChatMessage(role="user", content=x.value["text"])]
|
198 |
+
|
199 |
+
|
200 |
+
def question_from_selection(x: gr.SelectData):
|
201 |
+
return x.value["text"]
|
202 |
+
|
203 |
+
|
204 |
+
def _decorate_yield_result(cb, fb_status=False, gallery_status=False):
|
205 |
+
return cb, gr.Button(interactive=fb_status), gr.Gallery(
|
206 |
+
elem_classes=["preview_im_element"] if gallery_status else ["preview_im_element", "inactive_div"])
|
207 |
+
|
208 |
+
|
209 |
+
def send_generate(msg, cb, selected_sample):
|
210 |
+
collection = sample_data[selected_sample]["index"]
|
211 |
+
db = sample_data[selected_sample]["db"]
|
212 |
+
# print(f"collection: {collection}, {db}")
|
213 |
+
|
214 |
+
original_msg = gr.ChatMessage(role="user", content=msg)
|
215 |
+
cb.append(original_msg)
|
216 |
+
waiting_for_retrieval_msg = gr.ChatMessage(role="assistant",
|
217 |
+
content='## Answer\n*Querying Index*<span class="jumping-dots"><span class="dot-1">.</span> <span class="dot-2">.</span> <span class="dot-3">.</span></span>')
|
218 |
+
cb.append(waiting_for_retrieval_msg)
|
219 |
+
yield _decorate_yield_result(cb)
|
220 |
+
|
221 |
+
q = msg.strip()
|
222 |
+
results = retrieval(collection, db, q)
|
223 |
+
|
224 |
+
# for d in results:
|
225 |
+
# print(f"****\n{d}")
|
226 |
+
|
227 |
+
context_string = "## Context Documents for Answer\n\n"
|
228 |
+
for i, d in enumerate(results):
|
229 |
+
if format_retrieval_result(i, d, cb, selected_sample):
|
230 |
+
yield _decorate_yield_result(cb)
|
231 |
+
waiting_for_llm_msg = gr.ChatMessage(role="assistant",
|
232 |
+
content='## Answer\n *Waiting for LLM* <span class="jumping-dots"><span class="dot-1">.</span> <span class="dot-2">.</span> <span class="dot-3">.</span></span> ')
|
233 |
+
cb[1] = waiting_for_llm_msg
|
234 |
+
yield _decorate_yield_result(cb)
|
235 |
+
|
236 |
+
answer, prompt = generate_with_llm(q, results)
|
237 |
+
cb[1] = gr.ChatMessage(role="assistant", content=f"## Answer\n<b>{answer.strip()}</b>")
|
238 |
+
# cb.pop()
|
239 |
+
# cb.append(gr.ChatMessage(role="assistant", content=f"## Answer\n<b>{answer.strip()}</b>"))
|
240 |
+
yield _decorate_yield_result(cb, fb_status=True, gallery_status=True)
|
241 |
+
|
242 |
+
|
243 |
+
# Create User Chat Textbox and Reset Button
|
244 |
+
tbb = gr.Textbox(submit_btn=True, show_label=False, placeholder="Type a message...")
|
245 |
+
fb = gr.Button("Ask new question", visible=False)
|
246 |
+
fb.click(lambda: [], outputs=[chatbot])
|
247 |
+
|
248 |
+
chatbot.example_select(lambda: False, outputs=is_in_edit_mode)
|
249 |
+
# chatbot.example_select(update_user_chat_x, outputs=[chatbot])
|
250 |
+
chatbot.example_select(question_from_selection, inputs=[], outputs=[current_question]
|
251 |
+
).then(send_generate, inputs=[current_question, chatbot, selected_doc],
|
252 |
+
outputs=[chatbot, fb, document_gallery])
|
253 |
+
|
254 |
+
|
255 |
+
def textbox_switch(e_mode): # Handle toggling between edit and non-edit mode
|
256 |
+
if not e_mode:
|
257 |
+
return [gr.update(visible=False), gr.update(visible=True)]
|
258 |
+
else:
|
259 |
+
return [gr.update(visible=True), gr.update(visible=False)]
|
260 |
+
|
261 |
+
|
262 |
+
tbb.submit(lambda: False, outputs=[is_in_edit_mode])
|
263 |
+
fb.click(lambda: True, outputs=[is_in_edit_mode])
|
264 |
+
is_in_edit_mode.change(textbox_switch, inputs=[is_in_edit_mode], outputs=[tbb, fb])
|
265 |
+
|
266 |
+
# submit user question
|
267 |
+
# tbb.submit(lambda x: [gr.ChatMessage(role="user", content=x)], inputs=tbb, outputs=chatbot)
|
268 |
+
tbb.submit(lambda x: x, inputs=[tbb], outputs=[current_question]
|
269 |
+
).then(send_generate,
|
270 |
+
inputs=[current_question, chatbot, selected_doc],
|
271 |
+
outputs=[chatbot, fb, document_gallery])
|
272 |
+
|
273 |
+
if __name__ == "__main__":
|
274 |
+
# demo.queue(max_size=20).launch()
|
275 |
+
demo.launch()
|
src/sandbox/light_rag/credits.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Light RAG code is adapted from work by:
|
2 |
+
Idan Friedmann (IBM)
|
3 |
+
Roi Pony (IBM)
|
4 |
+
Adi Raz Goldfarb (IBM)
|
src/sandbox/light_rag/hf_embedding.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
4 |
+
|
5 |
+
from sandbox.light_rag.utils import get_device
|
6 |
+
|
7 |
+
|
8 |
+
class HFEmbedding:
|
9 |
+
|
10 |
+
def __init__(
|
11 |
+
self,
|
12 |
+
model_id: str,
|
13 |
+
):
|
14 |
+
device = get_device()
|
15 |
+
# TODO: hack for zeroGPU
|
16 |
+
device = "cpu"
|
17 |
+
|
18 |
+
print(f"Using device: {device}")
|
19 |
+
if device == "cpu":
|
20 |
+
print("Using CPU might be too slow")
|
21 |
+
|
22 |
+
self.model_name = model_id
|
23 |
+
|
24 |
+
print(f"Loading embeddings model from: {self.model_name}")
|
25 |
+
self.embeddings_service = HuggingFaceEmbeddings(
|
26 |
+
model_name=self.model_name,
|
27 |
+
model_kwargs={"device": device},
|
28 |
+
)
|
29 |
+
|
30 |
+
def embed_batch(self, batch: list[str]):
|
31 |
+
return self.embeddings_service.embed_documents(batch)
|
32 |
+
|
33 |
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
34 |
+
embeddings = self.embeddings_service.embed_documents(texts)
|
35 |
+
return embeddings
|
36 |
+
|
37 |
+
def embed_query(self, text: str) -> list[float]:
|
38 |
+
return self.embed_documents([text])[0]
|
src/sandbox/light_rag/hf_llm.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
from sandbox.light_rag.utils import get_device
|
4 |
+
|
5 |
+
|
6 |
+
class HFLLM:
|
7 |
+
def __init__(self, model_name: str):
|
8 |
+
self.device = get_device()
|
9 |
+
self.model_name = model_name
|
10 |
+
print("Loading HF model...")
|
11 |
+
# Load the tokenizer and model from Hugging Face
|
12 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_name).to(self.device)
|
14 |
+
|
15 |
+
def generate(self, prompt: str) -> list:
|
16 |
+
# tokenize the text
|
17 |
+
|
18 |
+
model_inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
|
19 |
+
generated_ids = self.model.generate(**model_inputs, max_new_tokens=1024)
|
20 |
+
generated_texts = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=False)
|
21 |
+
# print(f"gen txt: {generated_texts}")
|
22 |
+
|
23 |
+
response = [{"answer": generated_texts[0].split("<|end_of_role|>")[-1].split("<|end_of_text|>")[0]}]
|
24 |
+
return response
|
src/sandbox/light_rag/light_rag.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
from langchain_core.documents import Document
|
6 |
+
from langchain_core.vectorstores import VectorStore
|
7 |
+
from langchain_milvus import Milvus
|
8 |
+
|
9 |
+
from sandbox.light_rag.hf_embedding import HFEmbedding
|
10 |
+
from sandbox.light_rag.hf_llm import HFLLM
|
11 |
+
|
12 |
+
context_template = "Document:\n{document}\n"
|
13 |
+
token_limit = 4096
|
14 |
+
logger = logging.getLogger()
|
15 |
+
|
16 |
+
|
17 |
+
class LightRAG:
|
18 |
+
def __init__(self, config: dict):
|
19 |
+
self.config = config
|
20 |
+
lazy_loading = os.environ.get("LAZY_LOADING")
|
21 |
+
self.gen_model = None if lazy_loading else HFLLM(config['generation_model_id'])
|
22 |
+
self._embedding_model = None if lazy_loading else HFEmbedding(config['embedding_model_id'])
|
23 |
+
# self._vector_store = None
|
24 |
+
self._pre_cached_indices = {}
|
25 |
+
# now lazy:
|
26 |
+
# Milvus(
|
27 |
+
# embedding_function=self._embedding_model,
|
28 |
+
# collection_name=config['milvus_collection_name'].replace("-", "_"),
|
29 |
+
# index_params={"metric_ttpe": "cosine".upper()},
|
30 |
+
# # connection_args = ({"uri": "./milvus/text/milvus.db"})
|
31 |
+
# connection_args = ({"uri": config['milvus_db_path']})
|
32 |
+
# )
|
33 |
+
|
34 |
+
def _get_embedding_model(self):
|
35 |
+
if self._embedding_model is None:
|
36 |
+
self._embedding_model = HFEmbedding(self.config['embedding_model_id'])
|
37 |
+
return self._embedding_model
|
38 |
+
|
39 |
+
def precache_milvus(self, collection, db):
|
40 |
+
# col_name = self.config["milvus_collection_name"] if collection is None else collection
|
41 |
+
# db = self.config["milvus_db_path"] if db is None else db
|
42 |
+
key = self._cache_key(collection, db)
|
43 |
+
self._pre_cached_indices[key] = Milvus(
|
44 |
+
embedding_function=self._get_embedding_model(),
|
45 |
+
collection_name=collection.replace("-", "_"),
|
46 |
+
index_params={"metric_ttpe": "cosine".upper()},
|
47 |
+
# connection_args = ({"uri": "./milvus/text/milvus.db"})
|
48 |
+
connection_args=({"uri": db}),
|
49 |
+
)
|
50 |
+
|
51 |
+
def _get_milvus_index(self, collection, db):
|
52 |
+
key = self._cache_key(collection, db)
|
53 |
+
if key in self._pre_cached_indices:
|
54 |
+
print(f"cache hit: {key}")
|
55 |
+
|
56 |
+
return self._pre_cached_indices[key]
|
57 |
+
else:
|
58 |
+
return Milvus(
|
59 |
+
embedding_function=self._get_embedding_model(),
|
60 |
+
collection_name=collection.replace("-", "_"),
|
61 |
+
index_params={"metric_ttpe": "cosine".upper()},
|
62 |
+
# connection_args = ({"uri": "./milvus/text/milvus.db"})
|
63 |
+
connection_args=({"uri": db}),
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
def search(self, query: str, top_n: int = 5, collection=None, db=None) -> list[Document]:
|
68 |
+
|
69 |
+
# if self._vector_store is None:
|
70 |
+
# TODO: be more clever :)
|
71 |
+
col_name = self.config["milvus_collection_name"] if collection is None else collection
|
72 |
+
db = self.config["milvus_db_path"] if db is None else db
|
73 |
+
# print(f"col_name: {col_name} on db: {db}")
|
74 |
+
vs = self._get_milvus_index(col_name, db)
|
75 |
+
# self._vector_store = Milvus(
|
76 |
+
# embedding_function=self._get_embedding_model(),
|
77 |
+
# collection_name=col_name.replace("-", "_"),
|
78 |
+
# index_params={"metric_ttpe": "cosine".upper()},
|
79 |
+
# # connection_args = ({"uri": "./milvus/text/milvus.db"})
|
80 |
+
# connection_args=({"uri": db}),
|
81 |
+
# )
|
82 |
+
|
83 |
+
context = vs.similarity_search(
|
84 |
+
query=query,
|
85 |
+
k=100,
|
86 |
+
)
|
87 |
+
|
88 |
+
results = []
|
89 |
+
for d in context:
|
90 |
+
if d.metadata.get("type") == "text": # and not ("Picture placeholder" in d.page_content):
|
91 |
+
results.append(d)
|
92 |
+
elif d.metadata.get("type") == "image_description":
|
93 |
+
if not any(r.metadata["document_id"] == d.metadata.get("document_id") for r in results):
|
94 |
+
results.append(d)
|
95 |
+
|
96 |
+
top_n = min(top_n, len(results))
|
97 |
+
return results[:top_n]
|
98 |
+
|
99 |
+
def _build_prompt(self, question: str, context: List[Document]):
|
100 |
+
|
101 |
+
# Prepare documents:
|
102 |
+
text_documents = []
|
103 |
+
for doc in context:
|
104 |
+
if doc.metadata['type'] == 'text':
|
105 |
+
text_documents.append(doc.page_content.strip())
|
106 |
+
elif doc.metadata['type'] == 'image_description':
|
107 |
+
text_documents.append(doc.metadata['image_description'].strip())
|
108 |
+
else:
|
109 |
+
logger.warning('Should not get here!')
|
110 |
+
|
111 |
+
documents = [{"text": x} for x in text_documents]
|
112 |
+
prompt = self.gen_model.tokenizer.apply_chat_template(
|
113 |
+
conversation=[
|
114 |
+
{
|
115 |
+
"role": "user",
|
116 |
+
"content": question,
|
117 |
+
}
|
118 |
+
],
|
119 |
+
documents=documents, # This uses the documents support in the Granite chat template
|
120 |
+
add_generation_prompt=True,
|
121 |
+
tokenize=False,
|
122 |
+
)
|
123 |
+
return prompt
|
124 |
+
|
125 |
+
def generate(self, query, context=None):
|
126 |
+
if self.gen_model is None:
|
127 |
+
self.gen_model = HFLLM(self.config["generation_model_id"])
|
128 |
+
|
129 |
+
# build prompt
|
130 |
+
question = query
|
131 |
+
prompt = self._build_prompt(question, context)
|
132 |
+
|
133 |
+
# print(f"prompt: |||{prompt}|||")
|
134 |
+
# infer
|
135 |
+
results = self.gen_model.generate(prompt)
|
136 |
+
# print(f"results: {results}")
|
137 |
+
answer = results[0]["answer"]
|
138 |
+
|
139 |
+
return answer, prompt
|
140 |
+
|
141 |
+
def _cache_key(self, collection, db):
|
142 |
+
return collection + "___" + db
|
143 |
+
|
144 |
+
# if __name__ == '__main__':
|
145 |
+
# from dotenv import load_dotenv
|
146 |
+
# load_dotenv()
|
147 |
+
#
|
148 |
+
# config = {
|
149 |
+
# "embedding_model_id": "ibm-granite/granite-embedding-125m-english",
|
150 |
+
# "generation_model_id": "ibm-granite/granite-3.1-8b-instruct",
|
151 |
+
# "milvus_collection_name": "granite_vision_tech_report_text_milvus_lite_512_128_slate_125m_cosine",
|
152 |
+
# "milvus_db_path": "/dccstor/mm-rag/adi/code/RAGEval/milvus/text/milvus.db"
|
153 |
+
# }
|
154 |
+
#
|
155 |
+
# rag_app = LightRAG(config)
|
156 |
+
#
|
157 |
+
# query = "What models are available in Watsonx?"
|
158 |
+
#
|
159 |
+
# # run retrieval
|
160 |
+
# context = rag_app.search(query=query, top_n=5)
|
161 |
+
# # generate answers
|
162 |
+
# answer, prompt = rag_app.generate(query=query, context=context)
|
163 |
+
#
|
164 |
+
# print(f"Answer:\n{answer}")
|
165 |
+
# print(f"Used prompt:\n{prompt}")
|
166 |
+
|
167 |
+
|
168 |
+
# python -m debugpy --connect cccxl009.pok.ibm.com:3002 ./sandbox/light_rag/light_rag.py
|
src/sandbox/light_rag/utils.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
from collections.abc import Sequence
|
5 |
+
from datetime import datetime, timezone
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import yaml
|
10 |
+
|
11 |
+
|
12 |
+
def batchify(seq: Sequence, batch_size: int):
|
13 |
+
for i in range(0, len(seq), batch_size):
|
14 |
+
yield seq[i : i + batch_size]
|
15 |
+
|
16 |
+
|
17 |
+
def get_device():
|
18 |
+
if torch.backends.mps.is_available():
|
19 |
+
return "mps" # mac GPU
|
20 |
+
elif torch.cuda.is_available():
|
21 |
+
return "cuda"
|
22 |
+
else:
|
23 |
+
return "cpu"
|
24 |
+
|
25 |
+
|
26 |
+
def init_logger():
|
27 |
+
logging.basicConfig(
|
28 |
+
level=logging.INFO, # Set the logging level
|
29 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", # Define the log format
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
def get_timestamp():
|
34 |
+
return datetime.now(timezone.utc).strftime("%Y_%m_%d-%H_%M_%S")
|
35 |
+
|
36 |
+
|
37 |
+
TIMESTAMP_PATTERN = re.compile(r"^\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2}$")
|
38 |
+
|
39 |
+
|
40 |
+
def get_last_timestamp(path: Path):
|
41 |
+
if os.path.exists(path):
|
42 |
+
timestamps = [f for f in os.listdir(path) if TIMESTAMP_PATTERN.match(f)]
|
43 |
+
if len(timestamps) > 0:
|
44 |
+
return sorted(timestamps)[-1]
|
45 |
+
|
46 |
+
return None
|
src/themes/carbon.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""IBM Carbon theme for gradio demos.
|
2 |
+
|
3 |
+
This version builds on top of the Carbon theme to make it more playful with rounded corners, a larger font family to
|
4 |
+
enhance readability, and the IBM Cool Gray color palette for better consistency with other IBM Research demos, such as
|
5 |
+
Bee.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
from gradio.themes.utils import sizes
|
10 |
+
|
11 |
+
theme = gr.themes.Base(
|
12 |
+
primary_hue=gr.themes.Color(
|
13 |
+
c100="#EDF5FF",
|
14 |
+
c200="#D0E2FF",
|
15 |
+
c300="#A6C8FF",
|
16 |
+
c400="#78A9FF",
|
17 |
+
c50="#F9F9FB",
|
18 |
+
c500="#4589FF",
|
19 |
+
c600="#0F62FE",
|
20 |
+
c700="#0043CE",
|
21 |
+
c800="#002D9C",
|
22 |
+
c900="#001D6C",
|
23 |
+
c950="#001141",
|
24 |
+
),
|
25 |
+
secondary_hue=gr.themes.Color(
|
26 |
+
c100="#EDF5FF",
|
27 |
+
c200="#D0E2FF",
|
28 |
+
c300="#A6C8FF",
|
29 |
+
c400="#78A9FF",
|
30 |
+
c50="#F9F9FB",
|
31 |
+
c500="#4589FF",
|
32 |
+
c600="#0F62FE",
|
33 |
+
c700="#0043CE",
|
34 |
+
c800="#002D9C",
|
35 |
+
c900="#001D6C",
|
36 |
+
c950="#001141",
|
37 |
+
),
|
38 |
+
neutral_hue=gr.themes.Color(
|
39 |
+
c100="#F2F4F8",
|
40 |
+
c200="#DDE1E6",
|
41 |
+
c300="#C1C7CD",
|
42 |
+
c400="#A2A9B0",
|
43 |
+
c50="#F9F9FB",
|
44 |
+
c500="#878D96",
|
45 |
+
c600="#697077",
|
46 |
+
c700="#4D5358",
|
47 |
+
c800="#393939",
|
48 |
+
c900="#21272A",
|
49 |
+
c950="#121619",
|
50 |
+
),
|
51 |
+
spacing_size=sizes.spacing_md, # change spacing to default size
|
52 |
+
radius_size=sizes.radius_md, # change spacing to default size and Keep Radius to make demo feel more playful
|
53 |
+
text_size=sizes.text_lg, # change fontsize to default size
|
54 |
+
# spacing_size: sizes.Size | str = sizes.spacing_md, #change spacing to default size
|
55 |
+
# radius_size: sizes.Size | str = sizes.radius_md, #change spacing to default size and Keep Radius to make
|
56 |
+
# demo feel more playful
|
57 |
+
# text_size: sizes.Size | str = sizes.text_lg, #change fontsize to default size
|
58 |
+
font=["IBM Plex Sans", "ui-sans-serif", "system-ui", "sans-serif"], # update font
|
59 |
+
font_mono=["IBM Plex Mono", "ui-monospace", "Consolas", "monospace"], # update font
|
60 |
+
).set(
|
61 |
+
# Colors
|
62 |
+
background_fill_primary="*neutral_100", # Coolgray10 background
|
63 |
+
background_fill_primary_dark="*neutral_950", # Coolgray95 background for dark mode
|
64 |
+
slider_color="*primary_600", # Blue60
|
65 |
+
slider_color_dark="*primary_500", # Blue50
|
66 |
+
# Shadows
|
67 |
+
shadow_drop="0 1px 4px 0 rgb(0 0 0 / 0.1)",
|
68 |
+
shadow_drop_lg="0 2px 5px 0 rgb(0 0 0 / 0.1)",
|
69 |
+
# Block Labels
|
70 |
+
block_background_fill="white",
|
71 |
+
block_label_background_fill="white", # same color as blockback gound fill
|
72 |
+
block_label_radius="*radius_md",
|
73 |
+
block_label_text_size="*text_md",
|
74 |
+
block_label_text_weight="600",
|
75 |
+
block_label_text_color="black",
|
76 |
+
block_label_text_color_dark="white",
|
77 |
+
block_title_radius="*block_label_radius",
|
78 |
+
block_title_background_fill="*block_label_background_fill",
|
79 |
+
block_title_text_weight="600",
|
80 |
+
block_title_text_color="black",
|
81 |
+
block_title_text_color_dark="white",
|
82 |
+
block_label_margin="*spacing_md",
|
83 |
+
# Inputs
|
84 |
+
input_background_fill="white",
|
85 |
+
input_background_fill_dark="*block-background-fill",
|
86 |
+
input_border_color="*neutral_100",
|
87 |
+
input_shadow="*shadow_drop",
|
88 |
+
input_shadow_focus="*shadow_drop_lg",
|
89 |
+
checkbox_shadow="none",
|
90 |
+
# Buttons
|
91 |
+
shadow_spread="6px",
|
92 |
+
button_primary_shadow="*shadow_drop_lg",
|
93 |
+
button_primary_shadow_hover="*shadow_drop_lg",
|
94 |
+
button_primary_shadow_active="*shadow_inset",
|
95 |
+
button_secondary_shadow="*shadow_drop_lg",
|
96 |
+
button_secondary_shadow_hover="*shadow_drop_lg",
|
97 |
+
button_secondary_shadow_active="*shadow_inset",
|
98 |
+
checkbox_label_shadow="*shadow_drop_lg",
|
99 |
+
button_primary_background_fill="*primary_600",
|
100 |
+
button_primary_background_fill_hover="*primary_500",
|
101 |
+
button_primary_background_fill_hover_dark="*primary_500",
|
102 |
+
button_primary_text_color="white",
|
103 |
+
button_secondary_background_fill="white",
|
104 |
+
button_secondary_background_fill_hover="*neutral_100",
|
105 |
+
button_secondary_background_fill_dark="*neutral_800", # Secondary cool gray 80
|
106 |
+
button_secondary_background_fill_hover_dark="*primary_500",
|
107 |
+
button_secondary_text_color="*neutral_800",
|
108 |
+
button_cancel_background_fill="*button_secondary_background_fill",
|
109 |
+
button_cancel_background_fill_hover="*button_secondary_background_fill_hover",
|
110 |
+
button_cancel_background_fill_hover_dark="*button_secondary_background_fill_hover",
|
111 |
+
button_cancel_text_color="*button_secondary_text_color",
|
112 |
+
checkbox_label_background_fill_selected="*primary_200",
|
113 |
+
checkbox_label_background_fill_selected_dark="*primary_500",
|
114 |
+
checkbox_border_width="1px",
|
115 |
+
checkbox_border_color="*neutral_200",
|
116 |
+
checkbox_background_color_dark="*neutral_700", # Jan 18 test to fix checkbox, radio button background color
|
117 |
+
checkbox_background_color_selected="*primary_600",
|
118 |
+
checkbox_background_color_selected_dark="*primary_500",
|
119 |
+
checkbox_border_color_focus="*primary_600",
|
120 |
+
checkbox_border_color_focus_dark="*primary_500",
|
121 |
+
checkbox_border_color_selected="*primary_600",
|
122 |
+
checkbox_border_color_selected_dark="*primary_500",
|
123 |
+
checkbox_label_text_color_selected="black",
|
124 |
+
# Borders
|
125 |
+
block_border_width="1px", # test example border
|
126 |
+
panel_border_width="1px",
|
127 |
+
# Chatbubble related colors
|
128 |
+
# light
|
129 |
+
# color_accent = "*secondary_400",
|
130 |
+
border_color_accent_subdued="*color_accent_soft", # chatbubble human border color, use Blue 20 as an accent color
|
131 |
+
color_accent_soft="*secondary_200", # chatbubble human color
|
132 |
+
# darkmode
|
133 |
+
# chatbubble human border color in darkmode, use Blue 20 as an accent color
|
134 |
+
border_color_accent_subdued_dark="*secondary_500",
|
135 |
+
color_accent_soft_dark="*secondary_500", # chatbubble human color in dark mode
|
136 |
+
# Chatbot related font
|
137 |
+
chatbot_text_size="*text_md", # make it larger
|
138 |
+
# additional dark mode related tweaks:
|
139 |
+
# block_background_fill_dark="*neutral_950", # Jan 18 test coolgray95 background for dark mode
|
140 |
+
block_label_background_fill_dark="*neutral_800", # same color as blockback gound fill
|
141 |
+
block_title_background_fill_dark="*block_label_background_fill",
|
142 |
+
# input_background_fill_dark="*neutral_800", #This attribute help match fill color cool gray 80 to match background
|
143 |
+
# however cause the problem for the general theme.
|
144 |
+
# input_shadow_dark="*shadow_drop", #Test if it could make the border without the color
|
145 |
+
# input_border_color_dark="*neutral_200",#add attribute for border Jan 18
|
146 |
+
checkbox_border_color_dark="*neutral_600", # Jan 18 test to fix border
|
147 |
+
)
|
src/themes/research_monochrome.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""IBM Research Monochrome theme for gradio demos.
|
2 |
+
|
3 |
+
This version is a variation of CarbonSoft style, where the primary button is dark gray to create monochrome style. This
|
4 |
+
version uses the style from Research demos such as Bee to make it more playful with rounded corners, a larger font
|
5 |
+
family to enhance readability, and the IBM Cool Gray color palette for better consistency with other IBM Research demos,
|
6 |
+
such as Bee.
|
7 |
+
"""
|
8 |
+
|
9 |
+
import gradio as gr
|
10 |
+
from gradio.themes.utils import sizes
|
11 |
+
|
12 |
+
theme = gr.themes.Base(
|
13 |
+
primary_hue=gr.themes.Color(
|
14 |
+
c100="#EDF5FF",
|
15 |
+
c200="#D0E2FF",
|
16 |
+
c300="#A6C8FF",
|
17 |
+
c400="#78A9FF",
|
18 |
+
c50="#F9F9FB",
|
19 |
+
c500="#4589FF",
|
20 |
+
c600="#0F62FE",
|
21 |
+
c700="#0043CE",
|
22 |
+
c800="#002D9C",
|
23 |
+
c900="#001D6C",
|
24 |
+
c950="#001141",
|
25 |
+
),
|
26 |
+
secondary_hue=gr.themes.Color(
|
27 |
+
c100="#EDF5FF",
|
28 |
+
c200="#D0E2FF",
|
29 |
+
c300="#A6C8FF",
|
30 |
+
c400="#78A9FF",
|
31 |
+
c50="#F9F9FB",
|
32 |
+
c500="#4589FF",
|
33 |
+
c600="#0F62FE",
|
34 |
+
c700="#0043CE",
|
35 |
+
c800="#002D9C",
|
36 |
+
c900="#001D6C",
|
37 |
+
c950="#001141",
|
38 |
+
),
|
39 |
+
neutral_hue=gr.themes.Color(
|
40 |
+
c100="#F2F4F8",
|
41 |
+
c200="#DDE1E6",
|
42 |
+
c300="#C1C7CD",
|
43 |
+
c400="#A2A9B0",
|
44 |
+
c50="#F9F9FB",
|
45 |
+
c500="#878D96",
|
46 |
+
c600="#697077",
|
47 |
+
c700="#4D5358",
|
48 |
+
c800="#393939",
|
49 |
+
c900="#21272A",
|
50 |
+
c950="#121619",
|
51 |
+
),
|
52 |
+
spacing_size=sizes.spacing_md, # change spacing to default size
|
53 |
+
radius_size=sizes.radius_md, # change spacing to default size and Keep Radius to make demo feel more playful
|
54 |
+
text_size=sizes.text_md, # change fontsize to default size
|
55 |
+
# spacing_size: sizes.Size | str = sizes.spacing_md, #change spacing to default size
|
56 |
+
# radius_size: sizes.Size | str = sizes.radius_md, #change spacing to default size and Keep Radius to make
|
57 |
+
# demo feel more playful
|
58 |
+
# text_size: sizes.Size | str = sizes.text_lg, #change fontsize to default size
|
59 |
+
font=["IBM Plex Sans", "ui-sans-serif", "system-ui", "sans-serif"], # update font
|
60 |
+
font_mono=["IBM Plex Mono", "ui-monospace", "Consolas", "monospace"], # update font
|
61 |
+
).set(
|
62 |
+
# Colors
|
63 |
+
background_fill_primary="*neutral_100", # Coolgray10 background
|
64 |
+
background_fill_primary_dark="*neutral_950", # Coolgray95 background for dark mode
|
65 |
+
# Change blue to black to create monochrome style
|
66 |
+
slider_color="*neutral_900",
|
67 |
+
slider_color_dark="*primary_500",
|
68 |
+
# Shadows
|
69 |
+
shadow_drop="0 1px 4px 0 rgb(0 0 0 / 0.1)",
|
70 |
+
shadow_drop_lg="0 2px 5px 0 rgb(0 0 0 / 0.1)",
|
71 |
+
# Block Labels
|
72 |
+
block_background_fill="white",
|
73 |
+
block_label_background_fill="white", # same color as blockback gound fill
|
74 |
+
block_label_radius="*radius_md",
|
75 |
+
block_label_text_size="*text_md",
|
76 |
+
block_label_text_weight="600",
|
77 |
+
block_label_text_color="black",
|
78 |
+
block_label_text_color_dark="white",
|
79 |
+
block_title_radius="*block_label_radius",
|
80 |
+
block_title_background_fill="*block_label_background_fill",
|
81 |
+
block_title_text_weight="400",
|
82 |
+
block_title_text_color="black",
|
83 |
+
block_title_text_color_dark="white",
|
84 |
+
block_label_margin="*spacing_md",
|
85 |
+
# Inputs
|
86 |
+
input_background_fill="white",
|
87 |
+
input_background_fill_dark="*block-background-fill",
|
88 |
+
input_border_color="*neutral_100",
|
89 |
+
input_shadow="*shadow_drop",
|
90 |
+
input_shadow_dark="0 1px 4px #000",
|
91 |
+
input_shadow_focus="*shadow_drop_lg",
|
92 |
+
checkbox_shadow="none",
|
93 |
+
# Buttons
|
94 |
+
shadow_spread="6px",
|
95 |
+
button_primary_shadow="*shadow_drop_lg",
|
96 |
+
button_primary_shadow_hover="*shadow_drop_lg",
|
97 |
+
button_primary_shadow_active="*shadow_inset",
|
98 |
+
button_secondary_shadow="*shadow_drop_lg",
|
99 |
+
button_secondary_shadow_hover="*shadow_drop_lg",
|
100 |
+
button_secondary_shadow_active="*shadow_inset",
|
101 |
+
checkbox_label_shadow="*shadow_drop_lg",
|
102 |
+
# Change blue to black to create monochrome style
|
103 |
+
button_primary_background_fill="*neutral_900",
|
104 |
+
button_primary_background_fill_dark="*neutral_600",
|
105 |
+
button_primary_background_fill_hover="*neutral_700",
|
106 |
+
button_primary_background_fill_hover_dark="*primary_500", # hover to be blue
|
107 |
+
button_primary_text_color="white",
|
108 |
+
button_secondary_background_fill="white",
|
109 |
+
button_secondary_background_fill_hover="*neutral_100",
|
110 |
+
button_secondary_background_fill_dark="*neutral_800", # Secondary cool gray 80
|
111 |
+
button_secondary_background_fill_hover_dark="*primary_500",
|
112 |
+
button_secondary_text_color="*neutral_800",
|
113 |
+
button_cancel_background_fill="*button_secondary_background_fill",
|
114 |
+
button_cancel_background_fill_hover="*button_secondary_background_fill_hover",
|
115 |
+
button_cancel_background_fill_hover_dark="*button_secondary_background_fill_hover",
|
116 |
+
button_cancel_text_color="*button_secondary_text_color",
|
117 |
+
checkbox_label_background_fill_selected="*primary_200",
|
118 |
+
checkbox_label_background_fill_selected_dark="*primary_500",
|
119 |
+
checkbox_border_width="1px",
|
120 |
+
checkbox_border_color="*neutral_200",
|
121 |
+
checkbox_background_color_dark="*neutral_700", # Jan 18 test to fix checkbox, radio button background color
|
122 |
+
checkbox_background_color_selected="*primary_600",
|
123 |
+
checkbox_background_color_selected_dark="*primary_500",
|
124 |
+
checkbox_border_color_focus="*primary_600",
|
125 |
+
checkbox_border_color_focus_dark="*primary_500",
|
126 |
+
checkbox_border_color_selected="*primary_600",
|
127 |
+
checkbox_border_color_selected_dark="*primary_500",
|
128 |
+
checkbox_label_text_color_selected="black",
|
129 |
+
# Borders
|
130 |
+
block_border_width="1px", # test example border
|
131 |
+
panel_border_width="1px",
|
132 |
+
# Chatbubble related colors
|
133 |
+
# light
|
134 |
+
# color_accent = "*secondary_400",
|
135 |
+
border_color_accent_subdued="*color_accent_soft", # chatbubble human border color, use Blue 20 as an accent color
|
136 |
+
color_accent_soft="*secondary_200", # chatbubble human color
|
137 |
+
# darkmode
|
138 |
+
# chatbubble human border color in darkmode, use Blue 20 as an accent color
|
139 |
+
border_color_accent_subdued_dark="*secondary_500",
|
140 |
+
color_accent_soft_dark="*secondary_500", # chatbubble human color in dark mode
|
141 |
+
# Chatbot related font
|
142 |
+
chatbot_text_size="*text_md", # make it larger
|
143 |
+
# additional dark mode related tweaks:
|
144 |
+
# block_background_fill_dark="*neutral_950", # Jan 18 test coolgray95 background for dark mode
|
145 |
+
block_label_background_fill_dark="*neutral_800", # same color as blockback gound fill
|
146 |
+
block_title_background_fill_dark="*block_label_background_fill",
|
147 |
+
# input_background_fill_dark="*neutral_800", #This attribute help match fill color cool gray 80 to match background
|
148 |
+
# however cause the problem for the general theme.
|
149 |
+
# input_shadow_dark="*shadow_drop", #Test if it could make the border without the color
|
150 |
+
# input_border_color_dark="*neutral_200",#add attribute for border Jan 18
|
151 |
+
checkbox_border_color_dark="*neutral_600", # Jan 18 test to fix border
|
152 |
+
)
|