AIEcosystem commited on
Commit
7032547
·
verified ·
1 Parent(s): 8bd89e7

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +169 -121
src/streamlit_app.py CHANGED
@@ -12,6 +12,7 @@ from streamlit_extras.stylable_container import stylable_container
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
 
15
  st.markdown(
16
  """
17
  <style>
@@ -51,7 +52,9 @@ st.markdown(
51
  }
52
  </style>
53
  """,
54
- unsafe_allow_html=True)
 
 
55
  # --- Page Configuration and UI Elements ---
56
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
57
  st.subheader("MediaTagger", divider="violet")
@@ -59,13 +62,13 @@ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
59
  expander = st.expander("**Important notes**")
60
  expander.write("""**Named Entities:** This MediaTagger web app predicts eighteen (18) labels: 'person', 'organization', 'location', 'date', 'time', 'event', 'title', 'product', 'law', 'policy', 'work of art', 'geopolitical entity', 'number', 'cause of death','weapon', 'vehicle', 'facility', 'temporal expression'
61
 
62
- Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
63
 
64
- **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
65
 
66
- **Usage Limits:** You can request results unlimited times for one (1) month.
67
 
68
- **Supported Languages:** English
69
 
70
  **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
71
 
@@ -88,6 +91,7 @@ with st.sidebar:
88
  st.divider()
89
  st.subheader("🚀 Ready to build your own AI Web App?", divider="violet")
90
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
 
91
  # --- Comet ML Setup ---
92
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
93
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
@@ -95,6 +99,7 @@ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
95
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
96
  if not comet_initialized:
97
  st.warning("Comet ML not initialized. Check environment variables.")
 
98
  # --- Label Definitions ---
99
  labels = [
100
  'person',
@@ -116,150 +121,193 @@ labels = [
116
  'facility',
117
  'temporal expression',
118
  ]
119
- # Corrected mapping dictionary
120
  # Create a mapping dictionary for labels to categories
121
  category_mapping = {
122
  "People & Groups": ["person", "organization", "title"],
123
  "Topics & Objects": ["event", "product", "law", "policy", "work of art", "weapon", "vehicle"],
124
  "Temporal": ["date", "time", "temporal expression"],
125
  "Locations": ["location", "geopolitical entity", "facility"],
126
- "Quantitative & Contextual": ["number", "cause of death"]}
 
 
127
  # --- Model Loading ---
128
- @st.cache_resourcedef load_ner_model():
 
129
  """Loads the GLiNER model and caches it."""
130
  try:
131
- return GLiNER.from_pretrained("EmergentMethods/gliner_large_news-v2.1", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
132
  except Exception as e:
133
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
134
  st.stop()
135
  model = load_ner_model()
136
  # Flatten the mapping to a single dictionary
137
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
 
 
 
 
 
 
 
 
 
 
 
138
  # --- Text Input and Clear Button ---
139
  word_limit = 200
140
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
141
  word_count = len(text.split())
142
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
 
143
  def clear_text():
144
- """Clears the text area."""
145
  st.session_state['my_text_area'] = ""
 
 
 
 
146
  st.button("Clear text", on_click=clear_text)
 
147
  # --- Results Section ---
148
  if st.button("Results"):
149
- start_time = time.time()
150
  if not text.strip():
151
  st.warning("Please enter some text to extract entities.")
 
152
  elif word_count > word_limit:
153
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
 
154
  else:
155
- with st.spinner("Extracting entities...", show_time=True):
156
- entities = model.predict_entities(text, labels)
157
- df = pd.DataFrame(entities)
158
- if not df.empty:
159
- df['category'] = df['label'].map(reverse_category_mapping)
160
- if comet_initialized:
161
- experiment = Experiment(
162
- api_key=COMET_API_KEY,
163
- workspace=COMET_WORKSPACE,
164
- project_name=COMET_PROJECT_NAME,
165
- )
166
- experiment.log_parameter("input_text", text)
167
- experiment.log_table("predicted_entities", df)
168
- st.subheader("Grouped Entities by Category", divider = "violet")
169
- # Create tabs for each category
170
- category_names = sorted(list(category_mapping.keys()))
171
- category_tabs = st.tabs(category_names)
172
- for i, category_name in enumerate(category_names):
173
- with category_tabs[i]:
174
- df_category_filtered = df[df['category'] == category_name]
175
- if not df_category_filtered.empty:
176
- st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
177
- else:
178
- st.info(f"No entities found for the '{category_name}' category.")
179
- with st.expander("See Glossary of tags"):
180
- st.write('''
181
- - **text**: ['entity extracted from your text data']
182
- - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
183
- - **label**: ['label (tag) assigned to a given extracted entity']
184
- - **start**: ['index of the start of the corresponding entity']
185
- - **end**: ['index of the end of the corresponding entity']
186
- ''')
187
- st.divider()
188
- # Tree map
189
- st.subheader("Tree map", divider = "violet")
190
- fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
191
- fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#F3E5F5', plot_bgcolor='#F3E5F5')
192
- st.plotly_chart(fig_treemap)
193
- # Pie and Bar charts
194
- grouped_counts = df['category'].value_counts().reset_index()
195
- grouped_counts.columns = ['category', 'count']
196
- col1, col2 = st.columns(2)
197
- with col1:
198
- st.subheader("Pie chart", divider = "violet")
199
- fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
200
- fig_pie.update_traces(textposition='inside', textinfo='percent+label')
201
- fig_pie.update_layout(
202
- paper_bgcolor='#F3E5F5',
203
- plot_bgcolor='#F3E5F5'
204
- )
205
- st.plotly_chart(fig_pie)
206
- with col2:
207
- st.subheader("Bar chart", divider = "violet")
208
- fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
209
- fig_bar.update_layout( # Changed from fig_pie to fig_bar
210
- paper_bgcolor='#F3E5F5',
211
- plot_bgcolor='#F3E5F5'
212
- )
213
- st.plotly_chart(fig_bar)
214
- # Most Frequent Entities
215
- st.subheader("Most Frequent Entities", divider="violet")
216
- word_counts = df['text'].value_counts().reset_index()
217
- word_counts.columns = ['Entity', 'Count']
218
- repeating_entities = word_counts[word_counts['Count'] > 1]
219
- if not repeating_entities.empty:
220
- st.dataframe(repeating_entities, use_container_width=True)
221
- fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
222
- fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
223
- paper_bgcolor='#F3E5F5',
224
- plot_bgcolor='#F3E5F5')
225
- st.plotly_chart(fig_repeating_bar)
226
  else:
227
- st.warning("No entities were found that occur more than once.")
228
- # Download Section
229
- st.divider()
230
- dfa = pd.DataFrame(
231
- data={
232
- 'Column Name': ['text', 'label', 'score', 'start', 'end'],
233
- 'Description': [
234
- 'entity extracted from your text data',
235
- 'label (tag) assigned to a given extracted entity',
236
- 'accuracy score; how accurately a tag has been assigned to a given entity',
237
- 'index of the start of the corresponding entity',
238
- 'index of the end of the corresponding entity',
239
- ]
240
- }
241
- )
242
- buf = io.BytesIO()
243
- with zipfile.ZipFile(buf, "w") as myzip:
244
- myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
245
- myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
246
- with stylable_container(
247
- key="download_button",
248
- css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
249
- ):
250
- st.download_button(
251
- label="Download results and glossary (zip)",
252
- data=buf.getvalue(),
253
- file_name="nlpblogs_results.zip",
254
- mime="application/zip",
255
- )
256
- if comet_initialized:
257
- experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
258
- experiment.end()
259
- else: # If df is empty
260
- st.warning("No entities were found in the provided text.")
261
- end_time = time.time()
262
- elapsed_time = end_time - start_time
263
- st.text("")
264
- st.text("")
265
- st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
15
+
16
  st.markdown(
17
  """
18
  <style>
 
52
  }
53
  </style>
54
  """,
55
+ unsafe_allow_html=True
56
+ )
57
+
58
  # --- Page Configuration and UI Elements ---
59
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
60
  st.subheader("MediaTagger", divider="violet")
 
62
  expander = st.expander("**Important notes**")
63
  expander.write("""**Named Entities:** This MediaTagger web app predicts eighteen (18) labels: 'person', 'organization', 'location', 'date', 'time', 'event', 'title', 'product', 'law', 'policy', 'work of art', 'geopolitical entity', 'number', 'cause of death','weapon', 'vehicle', 'facility', 'temporal expression'
64
 
65
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
66
 
67
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
68
 
69
+ **Usage Limits:** You can request results unlimited times for one (1) month.
70
 
71
+ **Supported Languages:** English
72
 
73
  **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
74
 
 
91
  st.divider()
92
  st.subheader("🚀 Ready to build your own AI Web App?", divider="violet")
93
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
94
+
95
  # --- Comet ML Setup ---
96
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
97
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
 
99
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
100
  if not comet_initialized:
101
  st.warning("Comet ML not initialized. Check environment variables.")
102
+
103
  # --- Label Definitions ---
104
  labels = [
105
  'person',
 
121
  'facility',
122
  'temporal expression',
123
  ]
 
124
  # Create a mapping dictionary for labels to categories
125
  category_mapping = {
126
  "People & Groups": ["person", "organization", "title"],
127
  "Topics & Objects": ["event", "product", "law", "policy", "work of art", "weapon", "vehicle"],
128
  "Temporal": ["date", "time", "temporal expression"],
129
  "Locations": ["location", "geopolitical entity", "facility"],
130
+ "Quantitative & Contextual": ["number", "cause of death"]
131
+ }
132
+
133
  # --- Model Loading ---
134
+ @st.cache_resource
135
+ def load_ner_model():
136
  """Loads the GLiNER model and caches it."""
137
  try:
138
+ return GLiNER.from_pretrained("EmergentMethods/gliner_large_news-v2.1", nested_ner=True, num_gen_sequences=2, gen_constraints=labels)
139
  except Exception as e:
140
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
141
  st.stop()
142
  model = load_ner_model()
143
  # Flatten the mapping to a single dictionary
144
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
145
+
146
+ # --- Session State Initialization ---
147
+ if 'show_results' not in st.session_state:
148
+ st.session_state.show_results = False
149
+ if 'last_text' not in st.session_state:
150
+ st.session_state.last_text = ""
151
+ if 'results_df' not in st.session_state:
152
+ st.session_state.results_df = pd.DataFrame()
153
+ if 'elapsed_time' not in st.session_state:
154
+ st.session_state.elapsed_time = 0.0
155
+
156
  # --- Text Input and Clear Button ---
157
  word_limit = 200
158
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
159
  word_count = len(text.split())
160
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
161
+
162
  def clear_text():
163
+ """Clears the text area and hides results."""
164
  st.session_state['my_text_area'] = ""
165
+ st.session_state.show_results = False
166
+ st.session_state.last_text = ""
167
+ st.session_state.results_df = pd.DataFrame()
168
+ st.session_state.elapsed_time = 0.0
169
  st.button("Clear text", on_click=clear_text)
170
+
171
  # --- Results Section ---
172
  if st.button("Results"):
 
173
  if not text.strip():
174
  st.warning("Please enter some text to extract entities.")
175
+ st.session_state.show_results = False
176
  elif word_count > word_limit:
177
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
178
+ st.session_state.show_results = False
179
  else:
180
+ # Check if the text is different from the last time
181
+ if text != st.session_state.last_text:
182
+ st.session_state.show_results = True
183
+ st.session_state.last_text = text
184
+ start_time = time.time()
185
+ with st.spinner("Extracting entities...", show_time=True):
186
+ entities = model.predict_entities(text, labels)
187
+ df = pd.DataFrame(entities)
188
+ st.session_state.results_df = df
189
+ if not df.empty:
190
+ df['category'] = df['label'].map(reverse_category_mapping)
191
+ if comet_initialized:
192
+ experiment = Experiment(api_key=COMET_API_KEY, workspace=COMET_WORKSPACE, project_name=COMET_PROJECT_NAME)
193
+ experiment.log_parameter("input_text", text)
194
+ experiment.log_table("predicted_entities", df)
195
+ experiment.end()
196
+ end_time = time.time()
197
+ st.session_state.elapsed_time = end_time - start_time
198
+ else:
199
+ # If the text is the same, simply display the results from cache
200
+ st.session_state.show_results = True
201
+
202
+ # Display results if the state variable is True
203
+ if st.session_state.show_results:
204
+ df = st.session_state.results_df
205
+ if not df.empty:
206
+ # Re-map categories for display
207
+ df['category'] = df['label'].map(reverse_category_mapping)
208
+ st.subheader("Grouped Entities by Category", divider="violet")
209
+
210
+ # Create tabs for each category
211
+ category_names = sorted(list(category_mapping.keys()))
212
+ category_tabs = st.tabs(category_names)
213
+
214
+ for i, category_name in enumerate(category_names):
215
+ with category_tabs[i]:
216
+ df_category_filtered = df[df['category'] == category_name]
217
+ if not df_category_filtered.empty:
218
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  else:
220
+ st.info(f"No entities found for the '{category_name}' category.")
221
+
222
+ with st.expander("See Glossary of tags"):
223
+ st.write('''
224
+ - **text**: ['entity extracted from your text data']
225
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
226
+ - **label**: ['label (tag) assigned to a given extracted entity']
227
+ - **start**: ['index of the start of the corresponding entity']
228
+ - **end**: ['index of the end of the corresponding entity']
229
+ ''')
230
+ st.divider()
231
+
232
+ # Tree map
233
+ st.subheader("Tree map", divider="violet")
234
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
235
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#F3E5F5', plot_bgcolor='#F3E5F5')
236
+ st.plotly_chart(fig_treemap)
237
+
238
+ # Pie and Bar charts
239
+ grouped_counts = df['category'].value_counts().reset_index()
240
+ grouped_counts.columns = ['category', 'count']
241
+ col1, col2 = st.columns(2)
242
+
243
+ with col1:
244
+ st.subheader("Pie chart", divider="violet")
245
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
246
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
247
+ fig_pie.update_layout(
248
+ paper_bgcolor='#F3E5F5',
249
+ plot_bgcolor='#F3E5F5'
250
+ )
251
+ st.plotly_chart(fig_pie)
252
+
253
+ with col2:
254
+ st.subheader("Bar chart", divider="violet")
255
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
256
+ fig_bar.update_layout(
257
+ paper_bgcolor='#F3E5F5',
258
+ plot_bgcolor='#F3E5F5'
259
+ )
260
+ st.plotly_chart(fig_bar)
261
+
262
+ # Most Frequent Entities
263
+ st.subheader("Most Frequent Entities", divider="violet")
264
+ word_counts = df['text'].value_counts().reset_index()
265
+ word_counts.columns = ['Entity', 'Count']
266
+ repeating_entities = word_counts[word_counts['Count'] > 1]
267
+
268
+ if not repeating_entities.empty:
269
+ st.dataframe(repeating_entities, use_container_width=True)
270
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
271
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
272
+ paper_bgcolor='#F3E5F5',
273
+ plot_bgcolor='#F3E5F5')
274
+ st.plotly_chart(fig_repeating_bar)
275
+ else:
276
+ st.warning("No entities were found that occur more than once.")
277
+
278
+ # Download Section
279
+ st.divider()
280
+ dfa = pd.DataFrame(
281
+ data={
282
+ 'Column Name': ['text', 'label', 'score', 'start', 'end'],
283
+ 'Description': [
284
+ 'entity extracted from your text data',
285
+ 'label (tag) assigned to a given extracted entity',
286
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
287
+ 'index of the start of the corresponding entity',
288
+ 'index of the end of the corresponding entity',
289
+ ]
290
+ }
291
+ )
292
+ buf = io.BytesIO()
293
+ with zipfile.ZipFile(buf, "w") as myzip:
294
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
295
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
296
+
297
+ with stylable_container(
298
+ key="download_button",
299
+ css_styles="""button { background-color: #4A148C; border: 1px solid black; padding: 5px; color: white; }""",
300
+ ):
301
+ st.download_button(
302
+ label="Download results and glossary (zip)",
303
+ data=buf.getvalue(),
304
+ file_name="nlpblogs_results.zip",
305
+ mime="application/zip",
306
+ )
307
+
308
+ st.text("")
309
+ st.text("")
310
+ st.info(f"Results processed in **{st.session_state.elapsed_time:.2f} seconds**.")
311
+
312
+ else: # If df is empty after the button click
313
+ st.warning("No entities were found in the provided text.")