Spaces:
Build error
Build error
Disable summary generation at the end
Browse files
app.py
CHANGED
|
@@ -470,84 +470,85 @@ if summarize_button:
|
|
| 470 |
for current_drawing_list in total_unmatched_deps:
|
| 471 |
render_dependency_parsing(current_drawing_list)
|
| 472 |
|
| 473 |
-
#
|
| 474 |
-
|
| 475 |
-
st.
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
st.markdown(
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
|
|
|
| 551 |
|
| 552 |
# session = SessionState.get(code=print("TEST"))
|
| 553 |
# a = st.radio("Edit or show", ['Edit', 'Show'], 1)
|
|
|
|
| 470 |
for current_drawing_list in total_unmatched_deps:
|
| 471 |
render_dependency_parsing(current_drawing_list)
|
| 472 |
|
| 473 |
+
# CURRENTLY DISABLED
|
| 474 |
+
# # OUTRO/CONCLUSION
|
| 475 |
+
# st.header("π€ Bringing it together")
|
| 476 |
+
# st.markdown("We have presented 2 methods that try to detect errors in summaries via post-processing steps. Entity "
|
| 477 |
+
# "matching can be used to solve hallucinations, while dependency comparison can be used to filter out "
|
| 478 |
+
# "some bad sentences (and thus worse summaries). These methods highlight the possibilities of "
|
| 479 |
+
# "post-processing AI-made summaries, but are only a first introduction. As the methods were "
|
| 480 |
+
# "empirically tested they are definitely not sufficiently robust for general use-cases.")
|
| 481 |
+
# st.markdown("####")
|
| 482 |
+
# st.markdown(
|
| 483 |
+
# "Below we generate 3 different kind of summaries, and based on the two discussed methods, their errors are "
|
| 484 |
+
# "detected to estimate a factualness score. Based on this basic approach, "
|
| 485 |
+
# "the best summary (read: the one that a human would prefer or indicate as the best one) "
|
| 486 |
+
# "will hopefully be at the top. Summaries with the same scores will get the same rank displayed.")
|
| 487 |
+
# st.markdown("####")
|
| 488 |
+
#
|
| 489 |
+
# with st.spinner("Calculating more summaries and scoring them, this might take a minute or two..."):
|
| 490 |
+
# summaries_list = []
|
| 491 |
+
# deduction_points = []
|
| 492 |
+
# # ENTITIES
|
| 493 |
+
# _, amount_unmatched = get_and_compare_entities(False)
|
| 494 |
+
#
|
| 495 |
+
# # DEPS
|
| 496 |
+
# summary_deps = check_dependency(False)
|
| 497 |
+
# article_deps = check_dependency(True)
|
| 498 |
+
# total_unmatched_deps = []
|
| 499 |
+
# for summ_dep in summary_deps:
|
| 500 |
+
# if not any(summ_dep['identifier'] in art_dep['identifier'] for art_dep in article_deps):
|
| 501 |
+
# total_unmatched_deps.append(summ_dep)
|
| 502 |
+
#
|
| 503 |
+
# summaries_list.append(st.session_state.summary_output)
|
| 504 |
+
# deduction_points.append(len(amount_unmatched) + len(total_unmatched_deps))
|
| 505 |
+
#
|
| 506 |
+
# # FOR NEW GENERATED SUMMARY
|
| 507 |
+
# st.session_state.summary_output = generate_abstractive_summary(st.session_state.article_text,
|
| 508 |
+
# type="beam",
|
| 509 |
+
# do_sample=True, num_beams=15,
|
| 510 |
+
# no_repeat_ngram_size=5)
|
| 511 |
+
# _, amount_unmatched = get_and_compare_entities(False)
|
| 512 |
+
#
|
| 513 |
+
# summary_deps = check_dependency(False)
|
| 514 |
+
# article_deps = check_dependency(True)
|
| 515 |
+
# total_unmatched_deps = []
|
| 516 |
+
# for summ_dep in summary_deps:
|
| 517 |
+
# if not any(summ_dep['identifier'] in art_dep['identifier'] for art_dep in article_deps):
|
| 518 |
+
# total_unmatched_deps.append(summ_dep)
|
| 519 |
+
#
|
| 520 |
+
# summaries_list.append(st.session_state.summary_output)
|
| 521 |
+
# deduction_points.append(len(amount_unmatched) + len(total_unmatched_deps))
|
| 522 |
+
#
|
| 523 |
+
# # FOR NEW GENERATED SUMMARY
|
| 524 |
+
# st.session_state.summary_output = generate_abstractive_summary(st.session_state.article_text,
|
| 525 |
+
# type="top_p",
|
| 526 |
+
# do_sample=True,
|
| 527 |
+
# no_repeat_ngram_size=5)
|
| 528 |
+
# _, amount_unmatched = get_and_compare_entities(False)
|
| 529 |
+
#
|
| 530 |
+
# summary_deps = check_dependency(False)
|
| 531 |
+
# article_deps = check_dependency(True)
|
| 532 |
+
# total_unmatched_deps = []
|
| 533 |
+
# for summ_dep in summary_deps:
|
| 534 |
+
# if not any(summ_dep['identifier'] in art_dep['identifier'] for art_dep in article_deps):
|
| 535 |
+
# total_unmatched_deps.append(summ_dep)
|
| 536 |
+
#
|
| 537 |
+
# summaries_list.append(st.session_state.summary_output)
|
| 538 |
+
# deduction_points.append(len(amount_unmatched) + len(total_unmatched_deps))
|
| 539 |
+
#
|
| 540 |
+
# # RANKING AND SHOWING THE SUMMARIES
|
| 541 |
+
# deduction_points, summaries_list = (list(t) for t in zip(*sorted(zip(deduction_points, summaries_list))))
|
| 542 |
+
#
|
| 543 |
+
# cur_rank = 1
|
| 544 |
+
# rank_downgrade = 0
|
| 545 |
+
# for i in range(len(deduction_points)):
|
| 546 |
+
# st.write(f'π Rank {cur_rank} summary: π', display_summary(summaries_list[i]), unsafe_allow_html=True)
|
| 547 |
+
# if i < len(deduction_points) - 1:
|
| 548 |
+
# rank_downgrade += 1
|
| 549 |
+
# if not deduction_points[i + 1] == deduction_points[i]:
|
| 550 |
+
# cur_rank += rank_downgrade
|
| 551 |
+
# rank_downgrade = 0
|
| 552 |
|
| 553 |
# session = SessionState.get(code=print("TEST"))
|
| 554 |
# a = st.radio("Edit or show", ['Edit', 'Show'], 1)
|