Maria Tsilimos commited on
Commit
d2e00e4
·
unverified ·
1 Parent(s): 75df685

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +408 -0
app.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import streamlit as st
3
+ import pandas as pd
4
+ import io
5
+ from transformers import pipeline
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import plotly.express as px
8
+ import zipfile
9
+ import os
10
+ from comet_ml import Experiment
11
+ import re
12
+ import numpy as np
13
+ import json
14
+ from cryptography.fernet import Fernet
15
+
16
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
17
+
18
+
19
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
20
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
21
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
22
+
23
+ comet_initialized = False
24
+ if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
25
+ comet_initialized = True
26
+
27
+
28
+ if 'file_upload_attempts' not in st.session_state:
29
+ st.session_state['file_upload_attempts'] = 0
30
+
31
+
32
+ if 'encrypted_extracted_text' not in st.session_state:
33
+ st.session_state['encrypted_extracted_text'] = None
34
+
35
+
36
+ if 'json_dataframe' not in st.session_state:
37
+ st.session_state['json_dataframe'] = None
38
+
39
+ max_attempts = 10
40
+
41
+
42
+ @st.cache_resource
43
+ def load_ner_model():
44
+
45
+ try:
46
+ return pipeline("token-classification",
47
+ model="saattrupdan/nbailab-base-ner-scandi",
48
+ aggregation_strategy="max",
49
+ stride=128)
50
+ except Exception as e:
51
+ st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
52
+ st.stop()
53
+
54
+
55
+
56
+ @st.cache_resource
57
+ def load_encryption_key():
58
+ """
59
+ Loads the Fernet encryption key from environment variables.
60
+ This key is crucial for encrypting/decrypting sensitive data.
61
+ It's cached as a resource to be loaded only once.
62
+ """
63
+ try:
64
+ # Get the key string from environment variables
65
+ key_str = os.environ.get("FERNET_KEY")
66
+ if not key_str:
67
+ raise ValueError("FERNET_KEY environment variable not set. Cannot perform encryption/decryption.")
68
+
69
+ # Fernet key must be bytes, so encode the string
70
+ key_bytes = key_str.encode('utf-8')
71
+ return Fernet(key_bytes)
72
+ except ValueError as ve:
73
+ st.error(f"Configuration Error: {ve}. Please ensure the 'FERNET_KEY' environment variable is set securely in your deployment environment (e.g., Hugging Face Spaces secrets, Render environment variables) or in a local .env file for development.")
74
+ st.stop() # Stop the app if the key is not found, as security is compromised
75
+ except Exception as e:
76
+ st.error(f"An unexpected error occurred while loading encryption key: {e}. Please check your key format and environment settings.")
77
+ st.stop()
78
+
79
+ # Initialize the Fernet cipher instance
80
+ fernet = load_encryption_key()
81
+
82
+ def encrypt_text(text_content: str) -> bytes:
83
+ """
84
+ Encrypts a string using the loaded Fernet cipher.
85
+ The input string is first encoded to UTF-8 bytes.
86
+ """
87
+ return fernet.encrypt(text_content.encode('utf-8'))
88
+
89
+ def decrypt_text(encrypted_bytes: bytes) -> str | None:
90
+ """
91
+ Decrypts bytes using the loaded Fernet cipher.
92
+ Returns the decrypted string, or None if decryption fails (e.g., tampering).
93
+ """
94
+ try:
95
+ return fernet.decrypt(encrypted_bytes).decode('utf-8')
96
+ except Exception as e:
97
+ st.error(f"Decryption failed. This might indicate data tampering or an incorrect encryption key. Error: {e}")
98
+ return None
99
+
100
+ # --- UI Elements ---
101
+ st.subheader("Scandinavian JSON Entity Finder", divider="orange")
102
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
103
+
104
+ expander = st.expander("**Important notes on the Scandinavian JSON Entity Finder**")
105
+ expander.write('''
106
+ **Named Entities:** This Scandinavian JSON Entity Finder predicts four
107
+ (4) labels (“PER: person”, “LOC: location”, “ORG: organization”, “MISC:
108
+ miscellaneous”). Results are presented in an easy-to-read table, visualized in
109
+ an interactive tree map, pie chart, and bar chart, and are available for
110
+ download along with a Glossary of tags.
111
+
112
+ **How to Use:** Upload your JSON file. Then, click the 'Results' button
113
+ to extract and tag entities in your text data.
114
+
115
+ **Usage Limits:** You can request results up to 10 times.
116
+
117
+ **Language settings:** Please check and adjust the language settings in
118
+ your computer, so the Danish, Swedish, Norwegian, Icelandic and Faroese
119
+ characters are handled properly in your downloaded file.
120
+
121
+ **Customization:** To change the app's background color to white or
122
+ black, click the three-dot menu on the right-hand side of your app, go to
123
+ Settings and then Choose app theme, colors and fonts.
124
+
125
+ **Technical issues:** If your connection times out, please refresh the
126
+ page or reopen the app's URL.
127
+
128
+ For any errors or inquiries, please contact us at info@nlpblogs.com
129
+ ''')
130
+
131
+ with st.sidebar:
132
+ container = st.container(border=True)
133
+ container.write("**Named Entity Recognition (NER)** is the task of "
134
+ "extracting and tagging entities in text data. Entities can be persons, "
135
+ "organizations, locations, countries, products, events etc.")
136
+ st.subheader("Related NLP Web Apps", divider="orange")
137
+ st.link_button("Italian URL & TXT Entity Finder",
138
+ "https://nlpblogs.com/shop/named-entity-recognition-ner/monolingual-ner-web-apps/italian-url-txt-entity-finder/",
139
+ type="primary")
140
+
141
+
142
+ uploaded_file = st.file_uploader("Choose a JSON file", type=["json"])
143
+
144
+ # Initialize text for the current run outside the if uploaded_file block
145
+ # This will be populated if a file is uploaded, otherwise it remains None
146
+ current_run_text = None
147
+
148
+ if uploaded_file is not None:
149
+ try:
150
+ # Read the content as bytes first, then decode for JSON parsing
151
+ file_contents_bytes = uploaded_file.read()
152
+
153
+ # Reset the file pointer after reading, so json.load can read from the beginning
154
+ uploaded_file.seek(0)
155
+ dados = json.load(uploaded_file)
156
+
157
+ # Attempt to convert JSON to DataFrame and extract text
158
+ try:
159
+ st.session_state['json_dataframe'] = pd.DataFrame(dados)
160
+
161
+ # Concatenate all content into a single string for NER
162
+ df_string_representation = st.session_state['json_dataframe'].to_string(index=False, header=False)
163
+ # Simple regex to remove non-alphanumeric characters but keep spaces and periods
164
+ text_content = re.sub(r'[^\w\s.]', '', df_string_representation)
165
+ # Remove the specific string "Empty DataFrame Columns" if it appears due to conversion
166
+ text_content = text_content.replace("Empty DataFrame Columns", "").strip()
167
+ current_run_text = text_content # Set text for current run
168
+
169
+ if not current_run_text.strip(): # Check if text is effectively empty
170
+ st.warning("No meaningful text could be extracted from the JSON DataFrame for analysis.")
171
+ current_run_text = None # Reset to None if empty
172
+
173
+ except ValueError:
174
+ # If direct conversion to DataFrame fails, try to extract strings directly from JSON structure
175
+ st.info("JSON data could not be directly converted to a simple DataFrame for display. Attempting to extract text directly.")
176
+ extracted_texts_list = []
177
+ if isinstance(dados, list):
178
+ for item in dados:
179
+ if isinstance(item, str):
180
+ extracted_texts_list.append(item)
181
+ elif isinstance(item, dict):
182
+ # Recursively get string values from dicts in a list
183
+ for val in item.values():
184
+ if isinstance(val, str):
185
+ extracted_texts_list.append(val)
186
+ elif isinstance(val, list):
187
+ for sub_val in val:
188
+ if isinstance(sub_val, str):
189
+ extracted_texts_list.append(sub_val)
190
+ elif isinstance(dados, dict):
191
+ # Get string values from a dictionary
192
+ for value in dados.values():
193
+ if isinstance(value, str):
194
+ extracted_texts_list.append(value)
195
+ elif isinstance(value, list):
196
+ for sub_val in value:
197
+ if isinstance(sub_val, str):
198
+ extracted_texts_list.append(sub_val)
199
+
200
+ if extracted_texts_list:
201
+ current_run_text = " ".join(extracted_texts_list).strip()
202
+ else:
203
+ st.warning("No string text could be extracted from the JSON for analysis.")
204
+ current_run_text = None
205
+
206
+ if current_run_text:
207
+ # --- ENCRYPT THE EXTRACTED TEXT BEFORE STORING IN SESSION STATE ---
208
+ encrypted_text_bytes = encrypt_text(current_run_text)
209
+ st.session_state['encrypted_extracted_text'] = encrypted_text_bytes
210
+ # Optionally clear the unencrypted version from session state if you only want the encrypted one
211
+ # st.session_state['extracted_text_for_ner'] = None
212
+
213
+ st.success("JSON file uploaded successfully. File content encrypted and secured. Due to security protocols, the file content is hidden.")
214
+ st.divider()
215
+ else:
216
+ st.session_state['encrypted_extracted_text'] = None
217
+ # st.session_state['extracted_text_for_ner'] = None
218
+ st.error("Could not extract meaningful text from the uploaded JSON file.")
219
+
220
+ except json.JSONDecodeError as e:
221
+ st.error(f"JSON Decode Error: {e}")
222
+ st.error("Please ensure the uploaded file contains valid JSON data.")
223
+ st.session_state['encrypted_extracted_text'] = None
224
+ st.session_state['json_dataframe'] = None
225
+ except Exception as e:
226
+ st.error(f"An unexpected error occurred during file processing: {e}")
227
+ st.session_state['encrypted_extracted_text'] = None
228
+ st.session_state['json_dataframe'] = None
229
+
230
+
231
+ # --- Results Button and Processing Logic ---
232
+ if st.button("Results"):
233
+ start_time = time.time()
234
+ if not comet_initialized:
235
+ st.warning("Comet ML not initialized. Check environment variables if you wish to log data.")
236
+
237
+ if st.session_state['file_upload_attempts'] >= max_attempts:
238
+ st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
239
+ st.stop()
240
+
241
+ # --- DECRYPT THE TEXT BEFORE PASSING TO NER MODEL ---
242
+ text_for_ner = None
243
+ if st.session_state['encrypted_extracted_text'] is not None:
244
+ text_for_ner = decrypt_text(st.session_state['encrypted_extracted_text'])
245
+
246
+ if text_for_ner is None or not text_for_ner.strip():
247
+ st.warning("No extractable text content available for analysis. Please upload a valid JSON file.")
248
+ st.stop()
249
+
250
+ st.session_state['file_upload_attempts'] += 1
251
+
252
+ with st.spinner("Analyzing text...", show_time=True):
253
+ model = load_ner_model()
254
+ text_entities = model(text_for_ner) # Use the decrypted text
255
+ df = pd.DataFrame(text_entities)
256
+
257
+ if 'word' in df.columns:
258
+ # Ensure 'word' column is string type before applying regex
259
+ if df['word'].dtype == 'object':
260
+ pattern = r'[^\w\s]' # Regex to remove non-alphanumeric characters but keep spaces and periods
261
+ df['word'] = df['word'].astype(str).replace(pattern, '', regex=True)
262
+ else:
263
+ st.warning("The 'word' column is not of string type; skipping character cleaning.")
264
+ else:
265
+ st.error("The 'word' column does not exist in the DataFrame. Cannot perform cleaning.")
266
+ st.stop() # Stop execution if the column is missing
267
+
268
+ # Replace empty strings with 'Unknown' and drop rows with NaN after cleaning
269
+ df = df.replace('', 'Unknown').dropna()
270
+
271
+ if df.empty:
272
+ st.warning("No entities were extracted from the uploaded text.")
273
+ st.stop()
274
+
275
+ if comet_initialized:
276
+ experiment = Experiment(
277
+ api_key=COMET_API_KEY,
278
+ workspace=COMET_WORKSPACE,
279
+ project_name=COMET_PROJECT_NAME,
280
+ )
281
+ experiment.log_parameter("input_text_length", len(text_for_ner))
282
+ experiment.log_table("predicted_entities", df)
283
+
284
+ # --- Display Results ---
285
+ properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
286
+ df_styled = df.style.set_properties(**properties)
287
+ st.dataframe(df_styled, use_container_width=True)
288
+
289
+ with st.expander("See Glossary of tags"):
290
+ st.write('''
291
+ '**word**': ['entity extracted from your text data']
292
+
293
+ '**score**': ['accuracy score; how accurately a tag has been assigned to
294
+ a given entity']
295
+
296
+ '**entity_group**': ['label (tag) assigned to a given extracted entity']
297
+
298
+ '**start**': ['index of the start of the corresponding entity']
299
+
300
+ '**end**': ['index of the end of the corresponding entity']
301
+ ''')
302
+
303
+ entity_groups = {"PER": "person",
304
+ "LOC": "location",
305
+ "ORG": "organization",
306
+ "MISC": "miscellaneous",
307
+ }
308
+
309
+ st.subheader("Grouped entities", divider = "orange")
310
+
311
+ # Convert entity_groups dictionary to a list of (key, title) tuples
312
+ entity_items = list(entity_groups.items())
313
+ # Define how many tabs per row
314
+ tabs_per_row = 5
315
+ # Loop through the entity items in chunks
316
+ for i in range(0, len(entity_items), tabs_per_row):
317
+ current_row_entities = entity_items[i : i + tabs_per_row]
318
+ tab_titles = [item[1] for item in current_row_entities]
319
+
320
+ tabs = st.tabs(tab_titles)
321
+ for j, (entity_group_key, tab_title) in enumerate(current_row_entities):
322
+ with tabs[j]:
323
+ if entity_group_key in df["entity_group"].unique():
324
+ df_filtered = df[df["entity_group"] == entity_group_key]
325
+ st.dataframe(df_filtered, use_container_width=True)
326
+ else:
327
+ st.info(f"No '{tab_title}' entities found in the text.")
328
+ # Display an empty DataFrame for consistency if no entities are found
329
+ st.dataframe(pd.DataFrame({
330
+ 'entity_group': [entity_group_key],
331
+ 'score': [np.nan],
332
+ 'word': [np.nan],
333
+ 'start': [np.nan],
334
+ 'end': [np.nan]
335
+ }), hide_index=True)
336
+
337
+ st.divider()
338
+
339
+ # --- Visualizations ---
340
+ st.subheader("Tree map", divider="orange")
341
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'word',
342
+ 'entity_group'],
343
+ values='score', color='entity_group')
344
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25))
345
+ st.plotly_chart(fig_treemap)
346
+ if comet_initialized:
347
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap")
348
+
349
+ value_counts1 = df['entity_group'].value_counts()
350
+ final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group"})
351
+
352
+ col1, col2 = st.columns(2)
353
+ with col1:
354
+ st.subheader("Pie Chart", divider="orange")
355
+ fig_pie = px.pie(final_df_counts, values='count', names='entity_group',
356
+ hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
357
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
358
+ st.plotly_chart(fig_pie)
359
+ if comet_initialized:
360
+ experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart")
361
+
362
+ with col2:
363
+ st.subheader("Bar Chart", divider="orange")
364
+ fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", text_auto=True,
365
+ title='Occurrences of predicted labels')
366
+ st.plotly_chart(fig_bar)
367
+ if comet_initialized:
368
+ experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart")
369
+
370
+ # --- Downloadable Content ---
371
+ dfa = pd.DataFrame(
372
+ data={
373
+ 'Column Name': ['word', 'entity_group','score', 'start', 'end'],
374
+ 'Description': [
375
+ 'entity extracted from your text data',
376
+ 'label (tag) assigned to a given extracted entity',
377
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
378
+ 'index of the start of the corresponding entity',
379
+ 'index of the end of the corresponding entity',
380
+ ]
381
+ }
382
+ )
383
+
384
+ buf = io.BytesIO()
385
+ with zipfile.ZipFile(buf, "w") as myzip:
386
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
387
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
388
+
389
+ with stylable_container(
390
+ key="download_button",
391
+ css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
392
+ ):
393
+ st.download_button(
394
+ label="Download zip file",
395
+ data=buf.getvalue(),
396
+ file_name="nlpblogs_ner_results.zip",
397
+ mime="application/zip",
398
+ )
399
+ if comet_initialized:
400
+ experiment.log_asset(buf.getvalue(), file_name="downloadable_results.zip")
401
+
402
+ st.divider()
403
+ if comet_initialized:
404
+ experiment.end()
405
+ end_time = time.time()
406
+ elapsed_time = end_time - start_time
407
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")
408
+ st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**")