Maria Tsilimos commited on
Commit
f3126c4
·
unverified ·
1 Parent(s): c0465f3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +283 -0
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import streamlit as st
3
+ import pandas as pd
4
+ import io
5
+ from transformers import pipeline
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import plotly.express as px
8
+ import zipfile
9
+ import os
10
+ from comet_ml import Experiment
11
+ import re
12
+ import numpy as np
13
+ from bs4 import BeautifulSoup
14
+
15
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
16
+
17
+
18
+
19
+ # --- Configuration ---
20
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
21
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
22
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
23
+
24
+ comet_initialized = False
25
+ if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
26
+ comet_initialized = True
27
+
28
+ # --- Initialize session state ---
29
+ if 'file_upload_attempts' not in st.session_state:
30
+ st.session_state['file_upload_attempts'] = 0
31
+
32
+ max_attempts = 10
33
+
34
+ # --- Helper function for model loading ---
35
+ @st.cache_resource
36
+ def load_ner_model():
37
+ """Loads the pre-trained NER model and caches it."""
38
+ return pipeline("token-classification", model="dslim/bert-base-NER", aggregation_strategy="max")
39
+
40
+ # --- UI Elements ---
41
+ st.subheader("English HTML Entity Finder", divider="rainbow")
42
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
43
+
44
+ expander = st.expander("**Important notes on the English HTML Entity Finder**")
45
+ expander.write('''
46
+ **Named Entities:** This English HTML Entity Finder predicts four (4) labels (“PER: person”, “LOC: location”, “ORG: organization”, “MISC: miscellaneous”). Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
47
+
48
+ **How to Use:** Upload your html file. Then, click the 'Results' button to extract and tag entities in your text data.
49
+
50
+ **Usage Limits:** You can request results up to 10 times.
51
+
52
+ **Customization:** To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
53
+
54
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
55
+
56
+ For any errors or inquiries, please contact us at info@nlpblogs.com
57
+ ''')
58
+
59
+ with st.sidebar:
60
+ container = st.container(border=True)
61
+ container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
62
+ st.subheader("Related NLP Web Apps", divider="rainbow")
63
+ st.link_button("Italian URL & TXT Entity Finder", "https://nlpblogs.com/shop/named-entity-recognition-ner/monolingual-ner-web-apps/italian-url-txt-entity-finder/", type="primary")
64
+
65
+
66
+
67
+
68
+
69
+
70
+ uploaded_file = st.file_uploader("Choose an HTML file", type="html")
71
+ text = None
72
+ df = None
73
+
74
+
75
+ if uploaded_file is not None:
76
+ # Read the content of the uploaded HTML file
77
+ html_content = uploaded_file.read().decode("utf-8")
78
+
79
+ # Display the HTML content using components.html
80
+
81
+ st.success("File uploaded successfully. Due to security protocols, the file content is hidden.")
82
+
83
+ # --- NEW: Extract plain text from HTML ---
84
+ soup = BeautifulSoup(html_content, 'html.parser')
85
+ text = soup.get_text() # This extracts all visible text
86
+
87
+
88
+
89
+
90
+
91
+ st.divider()
92
+
93
+ # --- Results Button and Processing Logic ---
94
+ if st.button("Results"):
95
+ start_time = time.time()
96
+ if not comet_initialized:
97
+ st.warning("Comet ML not initialized. Check environment variables if you wish to log data.")
98
+
99
+ if st.session_state['file_upload_attempts'] >= max_attempts:
100
+ st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
101
+ st.stop()
102
+
103
+ if text is None:
104
+ st.warning("Please upload a supported file (.pdf or .docx) before requesting results.")
105
+ st.stop()
106
+
107
+ st.session_state['file_upload_attempts'] += 1
108
+
109
+ with st.spinner("Analyzing text...", show_time=True):
110
+
111
+ model = load_ner_model()
112
+ text_entities = model(text)
113
+ df = pd.DataFrame(text_entities)
114
+
115
+
116
+
117
+ if 'word' in df.columns:
118
+ st.write("Data type of 'word' column:", df['word'].dtype)
119
+
120
+
121
+
122
+
123
+ pattern = r'[^\w\s]'
124
+ if 'word' in df.columns: # Add this check
125
+ df['word'] = df['word'].replace(pattern, '', regex=True)
126
+ else:
127
+ st.error("The 'word' column does not exist in the DataFrame. Cannot perform cleaning.")
128
+ st.stop() # Stop execution if the column is missing
129
+ df = df.replace('', 'Unknown').dropna()
130
+
131
+
132
+
133
+ if df.empty:
134
+ st.warning("No entities were extracted from the uploaded text.")
135
+ st.stop()
136
+
137
+ if comet_initialized:
138
+ experiment = Experiment(
139
+ api_key=COMET_API_KEY,
140
+ workspace=COMET_WORKSPACE,
141
+ project_name=COMET_PROJECT_NAME,
142
+ )
143
+ experiment.log_parameter("input_text_length", len(text))
144
+ experiment.log_table("predicted_entities", df)
145
+
146
+ # --- Display Results ---
147
+ properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
148
+ df_styled = df.style.set_properties(**properties)
149
+ st.dataframe(df_styled, use_container_width=True)
150
+
151
+ with st.expander("See Glossary of tags"):
152
+ st.write('''
153
+ '**word**': ['entity extracted from your text data']
154
+
155
+ '**score**': ['accuracy score; how accurately a tag has been assigned to a given entity']
156
+
157
+ '**entity_group**': ['label (tag) assigned to a given extracted entity']
158
+
159
+ '**start**': ['index of the start of the corresponding entity']
160
+
161
+ '**end**': ['index of the end of the corresponding entity']
162
+ ''')
163
+
164
+
165
+ entity_groups = {"PER": "person",
166
+ "LOC": "location",
167
+ "ORG": "organization",
168
+ "MISC": "miscellaneous",
169
+
170
+
171
+ }
172
+
173
+
174
+
175
+ st.subheader("Grouped entities", divider = "blue")
176
+
177
+
178
+ # Convert entity_groups dictionary to a list of (key, title) tuples
179
+ entity_items = list(entity_groups.items())
180
+ # Define how many tabs per row
181
+ tabs_per_row = 5
182
+ # Loop through the entity items in chunks
183
+ for i in range(0, len(entity_items), tabs_per_row):
184
+ current_row_entities = entity_items[i : i + tabs_per_row]
185
+ tab_titles = [item[1] for item in current_row_entities]
186
+
187
+ tabs = st.tabs(tab_titles)
188
+ for j, (entity_group_key, tab_title) in enumerate(current_row_entities):
189
+ with tabs[j]:
190
+ if entity_group_key in df["entity_group"].unique():
191
+ df_filtered = df[df["entity_group"] == entity_group_key]
192
+ st.dataframe(df_filtered, use_container_width=True)
193
+ else:
194
+ st.info(f"No '{tab_title}' entities found in the text.")
195
+ st.dataframe(pd.DataFrame({
196
+ 'entity_group': [entity_group_key],
197
+ 'score': [np.nan],
198
+ 'word': [np.nan],
199
+ 'start': [np.nan],
200
+ 'end': [np.nan]
201
+ }), hide_index=True)
202
+
203
+ st.divider()
204
+
205
+
206
+
207
+
208
+
209
+
210
+ # --- Visualizations ---
211
+ st.subheader("Tree map", divider="rainbow")
212
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'word', 'entity_group'],
213
+ values='score', color='entity_group')
214
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25))
215
+ st.plotly_chart(fig_treemap)
216
+ if comet_initialized:
217
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap")
218
+
219
+ value_counts1 = df['entity_group'].value_counts()
220
+ final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group"})
221
+
222
+ col1, col2 = st.columns(2)
223
+ with col1:
224
+ st.subheader("Pie Chart", divider="rainbow")
225
+ fig_pie = px.pie(final_df_counts, values='count', names='entity_group', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
226
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
227
+ st.plotly_chart(fig_pie)
228
+ if comet_initialized:
229
+ experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart")
230
+
231
+ with col2:
232
+ st.subheader("Bar Chart", divider="rainbow")
233
+ fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", text_auto=True, title='Occurrences of predicted labels')
234
+ st.plotly_chart(fig_bar)
235
+ if comet_initialized:
236
+ experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart")
237
+
238
+ # --- Downloadable Content ---
239
+ dfa = pd.DataFrame(
240
+ data={
241
+ 'Column Name': ['word', 'entity_group','score', 'start', 'end'],
242
+ 'Description': [
243
+ 'entity extracted from your text data',
244
+ 'label (tag) assigned to a given extracted entity',
245
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
246
+ 'index of the start of the corresponding entity',
247
+ 'index of the end of the corresponding entity',
248
+ ]
249
+ }
250
+ )
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+ buf = io.BytesIO()
260
+ with zipfile.ZipFile(buf, "w") as myzip:
261
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
262
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
263
+
264
+ with stylable_container(
265
+ key="download_button",
266
+ css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
267
+ ):
268
+ st.download_button(
269
+ label="Download zip file",
270
+ data=buf.getvalue(),
271
+ file_name="nlpblogs_ner_results.zip",
272
+ mime="application/zip",
273
+ )
274
+ if comet_initialized:
275
+ experiment.log_asset(buf.getvalue(), file_name="downloadable_results.zip")
276
+
277
+ st.divider()
278
+ if comet_initialized:
279
+ experiment.end()
280
+ end_time = time.time()
281
+ elapsed_time = end_time - start_time
282
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")
283
+ st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**")