asofter commited on
Commit
6131bcb
·
verified ·
1 Parent(s): 05bf37a

upgrades to dependencies and fixing the docs

Browse files
Files changed (6) hide show
  1. Dockerfile +1 -1
  2. README.md +1 -1
  3. app.py +3 -3
  4. output.py +5 -5
  5. prompt.py +5 -5
  6. requirements.txt +4 -4
Dockerfile CHANGED
@@ -1,4 +1,4 @@
1
- FROM python:3.11-slim
2
 
3
  RUN apt-get update && apt-get install -y \
4
  build-essential \
 
1
+ FROM python:3.12-slim
2
 
3
  RUN apt-get update && apt-get install -y \
4
  build-essential \
README.md CHANGED
@@ -20,7 +20,7 @@ A live version can be found here: https://huggingface.co/spaces/ProtectAI/llm-gu
20
  2. Install dependencies (preferably in a virtual environment)
21
 
22
  ```sh
23
- pip install -r requirements.txt
24
  ```
25
 
26
  3. Start the app:
 
20
  2. Install dependencies (preferably in a virtual environment)
21
 
22
  ```sh
23
+ pip install -r requirements.txt -U
24
  ```
25
 
26
  3. Start the app:
app.py CHANGED
@@ -20,7 +20,7 @@ st.set_page_config(
20
  layout="wide",
21
  initial_sidebar_state="expanded",
22
  menu_items={
23
- "About": "https://llm-guard.com/",
24
  },
25
  )
26
 
@@ -30,7 +30,7 @@ logger.setLevel(logging.INFO)
30
  # Sidebar
31
  st.sidebar.header(
32
  """
33
- Scanning prompt and output using [LLM Guard](https://llm-guard.com/)
34
  """
35
  )
36
 
@@ -53,7 +53,7 @@ with st.expander("About", expanded=False):
53
  st.info(
54
  """LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs).
55
  \n\n[Code](https://github.com/protectai/llm-guard) |
56
- [Documentation](https://llm-guard.com/)"""
57
  )
58
 
59
  analyzer_load_state = st.info("Starting LLM Guard...")
 
20
  layout="wide",
21
  initial_sidebar_state="expanded",
22
  menu_items={
23
+ "About": "https://protectai.github.io/llm-guard/",
24
  },
25
  )
26
 
 
30
  # Sidebar
31
  st.sidebar.header(
32
  """
33
+ Scanning prompt and output using [LLM Guard](https://protectai.github.io/llm-guard/)
34
  """
35
  )
36
 
 
53
  st.info(
54
  """LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs).
55
  \n\n[Code](https://github.com/protectai/llm-guard) |
56
+ [Documentation](https://protectai.github.io/llm-guard/)"""
57
  )
58
 
59
  analyzer_load_state = st.info("Starting LLM Guard...")
output.py CHANGED
@@ -4,7 +4,7 @@ from datetime import timedelta
4
  from typing import Dict, List
5
 
6
  import streamlit as st
7
- from llm_guard.input_scanners.anonymize import default_entity_types
8
  from llm_guard.input_scanners.code import SUPPORTED_LANGUAGES as SUPPORTED_CODE_LANGUAGES
9
  from llm_guard.output_scanners import get_scanner_by_name
10
  from llm_guard.output_scanners.bias import MatchType as BiasMatchType
@@ -47,7 +47,7 @@ def init_settings() -> (List, Dict):
47
  "Select scanners",
48
  options=all_scanners,
49
  default=all_scanners,
50
- help="The list can be found here: https://llm-guard.com/output_scanners/bias/",
51
  )
52
 
53
  settings = {}
@@ -424,14 +424,14 @@ def init_settings() -> (List, Dict):
424
  st_sens_entity_types = st_tags(
425
  label="Sensitive entities",
426
  text="Type and press enter",
427
- value=default_entity_types,
428
- suggestions=default_entity_types
429
  + ["DATE_TIME", "NRP", "LOCATION", "MEDICAL_LICENSE", "US_PASSPORT"],
430
  maxtags=30,
431
  key="sensitive_entity_types",
432
  )
433
  st.caption(
434
- "Check all supported entities: https://llm-guard.com/input_scanners/anonymize/"
435
  )
436
  st_sens_redact = st.checkbox("Redact", value=False, key="sens_redact")
437
  st_sens_threshold = st.slider(
 
4
  from typing import Dict, List
5
 
6
  import streamlit as st
7
+ from llm_guard.input_scanners.anonymize import DEFAULT_ENTITY_TYPES
8
  from llm_guard.input_scanners.code import SUPPORTED_LANGUAGES as SUPPORTED_CODE_LANGUAGES
9
  from llm_guard.output_scanners import get_scanner_by_name
10
  from llm_guard.output_scanners.bias import MatchType as BiasMatchType
 
47
  "Select scanners",
48
  options=all_scanners,
49
  default=all_scanners,
50
+ help="The list can be found here: https://protectai.github.io/llm-guard/output_scanners/bias/",
51
  )
52
 
53
  settings = {}
 
424
  st_sens_entity_types = st_tags(
425
  label="Sensitive entities",
426
  text="Type and press enter",
427
+ value=DEFAULT_ENTITY_TYPES,
428
+ suggestions=DEFAULT_ENTITY_TYPES
429
  + ["DATE_TIME", "NRP", "LOCATION", "MEDICAL_LICENSE", "US_PASSPORT"],
430
  maxtags=30,
431
  key="sensitive_entity_types",
432
  )
433
  st.caption(
434
+ "Check all supported entities: https://protectai.github.io/llm-guard/input_scanners/anonymize/"
435
  )
436
  st_sens_redact = st.checkbox("Redact", value=False, key="sens_redact")
437
  st_sens_threshold = st.slider(
prompt.py CHANGED
@@ -5,7 +5,7 @@ from typing import Dict, List
5
 
6
  import streamlit as st
7
  from llm_guard.input_scanners import get_scanner_by_name
8
- from llm_guard.input_scanners.anonymize import default_entity_types
9
  from llm_guard.input_scanners.code import SUPPORTED_LANGUAGES as SUPPORTED_CODE_LANGUAGES
10
  from llm_guard.input_scanners.gibberish import MatchType as GibberishMatchType
11
  from llm_guard.input_scanners.language import MatchType as LanguageMatchType
@@ -39,7 +39,7 @@ def init_settings() -> (List, Dict):
39
  "Select scanners",
40
  options=all_scanners,
41
  default=all_scanners,
42
- help="The list can be found here: https://llm-guard.com/input_scanners/anonymize/",
43
  )
44
 
45
  settings = {}
@@ -54,14 +54,14 @@ def init_settings() -> (List, Dict):
54
  st_anon_entity_types = st_tags(
55
  label="Anonymize entities",
56
  text="Type and press enter",
57
- value=default_entity_types,
58
- suggestions=default_entity_types
59
  + ["DATE_TIME", "NRP", "LOCATION", "MEDICAL_LICENSE", "US_PASSPORT"],
60
  maxtags=30,
61
  key="anon_entity_types",
62
  )
63
  st.caption(
64
- "Check all supported entities: https://llm-guard.com/input_scanners/anonymize/"
65
  )
66
  st_anon_hidden_names = st_tags(
67
  label="Hidden names to be anonymized",
 
5
 
6
  import streamlit as st
7
  from llm_guard.input_scanners import get_scanner_by_name
8
+ from llm_guard.input_scanners.anonymize import DEFAULT_ENTITY_TYPES
9
  from llm_guard.input_scanners.code import SUPPORTED_LANGUAGES as SUPPORTED_CODE_LANGUAGES
10
  from llm_guard.input_scanners.gibberish import MatchType as GibberishMatchType
11
  from llm_guard.input_scanners.language import MatchType as LanguageMatchType
 
39
  "Select scanners",
40
  options=all_scanners,
41
  default=all_scanners,
42
+ help="The list can be found here: https://protectai.github.io/llm-guard/input_scanners/anonymize/",
43
  )
44
 
45
  settings = {}
 
54
  st_anon_entity_types = st_tags(
55
  label="Anonymize entities",
56
  text="Type and press enter",
57
+ value=DEFAULT_ENTITY_TYPES,
58
+ suggestions=DEFAULT_ENTITY_TYPES
59
  + ["DATE_TIME", "NRP", "LOCATION", "MEDICAL_LICENSE", "US_PASSPORT"],
60
  maxtags=30,
61
  key="anon_entity_types",
62
  )
63
  st.caption(
64
+ "Check all supported entities: https://protectai.github.io/llm-guard/input_scanners/anonymize/"
65
  )
66
  st_anon_hidden_names = st_tags(
67
  label="Hidden names to be anonymized",
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- llm-guard==0.3.12
2
- llm-guard[onnxruntime]==0.3.12
3
- pandas==2.2.2
4
- streamlit==1.33.0
5
  streamlit-tags==1.2.8
 
1
+ llm-guard==0.3.16
2
+ llm-guard[onnxruntime]==0.3.16
3
+ pandas==2.3.1
4
+ streamlit==1.46.1
5
  streamlit-tags==1.2.8