GabrielSalem commited on
Commit
0a73e83
·
verified ·
1 Parent(s): 4edc9b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -243
app.py CHANGED
@@ -1,17 +1,11 @@
1
  """
2
- AURA Chat — Gradio Space
3
- Single-file Gradio app that:
4
- - Accepts newline-separated prompts (data queries) from the user.
5
- - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM,
6
- and returns a polished analysis with a ranked list of best stocks and an
7
- "Investment Duration" (when to enter / when to exit) for each stock.
8
- - Seeds a chat component with the generated analysis; user can then chat about it.
9
-
10
- Notes:
11
- - Model, max tokens, and delay between scrapes are fixed and cannot be changed via UI.
12
- - Set OPENAI_API_KEY in environment (Space Secrets).
13
  """
14
-
15
  import os
16
  import time
17
  import sys
@@ -19,12 +13,12 @@ import asyncio
19
  import requests
20
  import atexit
21
  import traceback
22
- from datetime import datetime
23
  from typing import List
24
  import gradio as gr
25
 
26
-
27
- # Defensive: ensure a fresh event loop early to avoid fd race on shutdown.
 
28
  if sys.platform != "win32":
29
  try:
30
  loop = asyncio.new_event_loop()
@@ -32,214 +26,104 @@ if sys.platform != "win32":
32
  except Exception:
33
  traceback.print_exc()
34
 
35
-
36
  # =============================================================================
37
- # CONFIGURATION (fixed)
38
  # =============================================================================
39
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
40
- SCRAPER_HEADERS = {
41
- "User-Agent": "Mozilla/5.0",
42
- "Content-Type": "application/json"
43
- }
44
-
45
- # FIXED model & tokens (cannot be changed from UI)
46
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
47
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
48
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
49
-
50
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
51
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
52
 
53
-
54
- # =============================================================================
55
- # PROMPT ENGINEERING (fixed)
56
- # =============================================================================
57
  PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
58
-
59
  Task:
60
- - Given scraped data below, produce a clear, readable analysis that:
61
- 1) Lists the top 5 stock picks (or fewer if not enough data).
62
- 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets),
63
- and an explicit **Investment Duration** entry: a one-line "When to Invest"
64
- and a one-line "When to Sell" instruction (these two lines are mandatory
65
- for each stock).
66
- 3) Keep each stock entry short and scannable. Use a bullet list or numbered list.
67
- 4) At the top, provide a 2-3 sentence summary conclusion (market context +
68
- highest conviction pick).
69
- 5) Output in plain text, clean formatting, easy for humans to read. No JSON.
70
- 6) After the list, include a concise "Assumptions & Risks" section (2-3 bullet points).
71
-
72
- Important: Be decisive. If data is insufficient, state that clearly and provide
73
- the best-available picks with lower confidence.
74
-
75
- Max tokens for the LLM response: {MAX_TOKENS}
76
- Model: {LLM_MODEL}"""
77
-
78
 
79
  # =============================================================================
80
- # SCRAPING HELPERS
81
  # =============================================================================
82
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
83
- """Post a query to SCRAPER_API_URL and return a readable aggregation (or an error string)."""
84
- payload = {"query": query}
85
  last_err = None
86
-
87
- for attempt in range(1, retries + 1):
88
  try:
89
- resp = requests.post(
90
- SCRAPER_API_URL,
91
- headers=SCRAPER_HEADERS,
92
- json=payload,
93
- timeout=timeout
94
- )
95
  resp.raise_for_status()
96
  data = resp.json()
97
-
98
- # Format into readable text
99
  if isinstance(data, dict):
100
- parts = [f"{k.upper()}:\n{v}\n" for k, v in data.items()]
101
- return "\n".join(parts)
102
- else:
103
- return str(data)
104
-
105
  except Exception as e:
106
  last_err = e
107
- if attempt < retries:
108
- time.sleep(1.0)
109
-
110
  return f"ERROR: Scraper failed: {last_err}"
111
 
112
-
113
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
114
- """Scrape multiple queries and join results into one large string."""
115
  aggregated = []
116
  for q in queries:
117
- q = q.strip()
118
- if not q:
119
- continue
120
- aggregated.append(f"\n=== QUERY: {q} ===\n")
121
- scraped = deep_scrape(q)
122
- aggregated.append(scraped)
123
  time.sleep(delay)
124
  return "\n".join(aggregated)
125
 
126
-
127
  # =============================================================================
128
- # LLM INTERACTION
129
  # =============================================================================
130
  try:
131
  from openai import OpenAI
132
  except Exception:
133
  OpenAI = None
134
 
135
-
136
- def run_llm_system_and_user(
137
- system_prompt: str,
138
- user_text: str,
139
- model: str = LLM_MODEL,
140
- max_tokens: int = MAX_TOKENS
141
- ) -> str:
142
- """Create the OpenAI client lazily, call the chat completions endpoint, then close."""
143
- if OpenAI is None:
144
- return "ERROR: openai package not installed or available. See requirements."
145
-
146
- if not OPENAI_API_KEY:
147
- return "ERROR: OPENAI_API_KEY not set in environment. Please add it to Space Secrets."
148
-
149
- client = None
150
  try:
151
- client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
152
  completion = client.chat.completions.create(
153
- model=model,
154
- messages=[
155
- {"role": "system", "content": system_prompt},
156
- {"role": "user", "content": user_text},
157
- ],
158
- max_tokens=max_tokens,
159
  )
160
-
161
- # Extract content robustly
162
- if hasattr(completion, "choices") and len(completion.choices) > 0:
163
- try:
164
- return completion.choices[0].message.content
165
- except Exception:
166
- return str(completion.choices[0])
167
- return str(completion)
168
-
169
  except Exception as e:
170
  return f"ERROR: LLM call failed: {e}"
171
  finally:
172
- # Try to close client transport
173
- try:
174
- if client is not None:
175
- try:
176
- client.close()
177
- except Exception:
178
- try:
179
- asyncio.get_event_loop().run_until_complete(client.aclose())
180
- except Exception:
181
- pass
182
- except Exception:
183
- pass
184
-
185
 
186
  # =============================================================================
187
- # MAIN PIPELINE
188
  # =============================================================================
189
  def analyze_and_seed_chat(prompts_text: str):
190
- """Called when user clicks Analyze. Returns: (analysis_text, initial_chat_messages_list)"""
191
  if not prompts_text.strip():
192
- return "Please enter at least one prompt (query) describing what data to gather.", []
193
-
194
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
195
- scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
196
-
197
- if scraped.startswith("ERROR"):
198
- return scraped, []
199
-
200
- # Compose user payload for LLM
201
- user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
202
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
203
-
204
- if analysis.startswith("ERROR"):
205
- return analysis, []
206
-
207
- # Seed chat with user request and assistant analysis
208
- initial_chat = [
209
- {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
210
  {"role": "assistant", "content": analysis}
211
  ]
212
- return analysis, initial_chat
213
-
214
 
215
  def continue_chat(chat_messages, user_message: str, analysis_text: str):
216
- """Handle chat follow-ups. Returns updated list of message dicts."""
217
- if chat_messages is None:
218
- chat_messages = []
219
- if not user_message or not user_message.strip():
220
- return chat_messages
221
-
222
- # Append user's new message
223
  chat_messages.append({"role": "user", "content": user_message})
224
-
225
- # Build LLM input using analysis as reference context
226
- followup_system = (
227
- "You are AURA, a helpful analyst. The conversation context includes a recently "
228
- "generated analysis from scraped data. Use that analysis as ground truth context; "
229
- "answer follow-up questions, explain rationale, and provide clarifications. "
230
- "Be concise and actionable."
231
- )
232
- user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate."
233
-
234
- assistant_reply = run_llm_system_and_user(followup_system, user_payload)
235
- if assistant_reply.startswith("ERROR"):
236
- assistant_reply = assistant_reply
237
-
238
- # Append assistant reply
239
  chat_messages.append({"role": "assistant", "content": assistant_reply})
240
  return chat_messages
241
 
242
-
243
  # =============================================================================
244
  # GRADIO UI
245
  # =============================================================================
@@ -248,90 +132,73 @@ def build_demo():
248
  # Custom CSS
249
  gr.HTML("""
250
  <style>
251
- .gradio-container { max-width: 1100px; margin: 18px auto; }
252
- .header { text-align: left; margin-bottom: 6px; }
253
- .muted { color: #7d8590; font-size: 14px; }
254
- .analysis-box { background: #ffffff; border-radius: 8px; padding: 12px; box-shadow: 0 4px 14px rgba(0,0,0,0.06); }
 
 
255
  </style>
256
  """)
257
-
258
- gr.Markdown("# AURA Chat — Hedge Fund Picks")
259
- gr.Markdown(
260
- "**Enter one or more data prompts (one per line)** — e.g. SEC insider transactions october 2025 company XYZ.\n\n"
261
- "Only input prompts; model, tokens and timing are fixed. Press **Analyze** to fetch & generate the picks. "
262
- "After analysis you can chat with the assistant about the results."
263
- )
264
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  with gr.Row():
266
  with gr.Column(scale=1):
267
- prompts = gr.Textbox(
268
- lines=6,
269
- label="Data Prompts (one per line)",
270
- placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys"
271
- )
272
  analyze_btn = gr.Button("Analyze", variant="primary")
273
  error_box = gr.Markdown("", visible=False)
274
  gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} • Max tokens = {MAX_TOKENS} • Scrape delay = {SCRAPE_DELAY}s")
275
- gr.Markdown("**Important:** Add your OPENAI_API_KEY to Space Secrets before running.")
276
-
277
  with gr.Column(scale=1):
278
- analysis_out = gr.Textbox(
279
- label="Generated Analysis (Top picks with Investment Duration)",
280
- lines=18,
281
- interactive=False
282
- )
283
  gr.Markdown("**Chat with AURA about this analysis**")
284
- chatbot = gr.Chatbot(label="AURA Chat", height=420)
285
- user_input = gr.Textbox(
286
- placeholder="Ask a follow-up question about the analysis...",
287
- label="Your question"
288
- )
289
  send_btn = gr.Button("Send")
290
 
291
- # States
292
  analysis_state = gr.State("")
293
  chat_state = gr.State([])
294
-
295
- # Handler functions
296
  def on_analyze(prompts_text):
297
  analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
298
  if analysis_text.startswith("ERROR"):
299
  return "", f"**Error:** {analysis_text}", "", []
300
  return analysis_text, "", analysis_text, initial_chat
301
-
302
  def on_send(chat_state_list, user_msg, analysis_text):
303
- if not user_msg or not user_msg.strip():
304
- return chat_state_list or [], ""
305
  updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
306
  return updated_history, ""
307
-
308
- def render_chat(chat_messages):
309
- return chat_messages or []
310
-
311
- # Wire handlers
312
- analyze_btn.click(
313
- fn=on_analyze,
314
- inputs=[prompts],
315
- outputs=[analysis_out, error_box, analysis_state, chat_state]
316
- )
317
- send_btn.click(
318
- fn=on_send,
319
- inputs=[chat_state, user_input, analysis_state],
320
- outputs=[chat_state, user_input]
321
- )
322
- user_input.submit(
323
- fn=on_send,
324
- inputs=[chat_state, user_input, analysis_state],
325
- outputs=[chat_state, user_input]
326
- )
327
- chat_state.change(
328
- fn=render_chat,
329
- inputs=[chat_state],
330
- outputs=[chatbot]
331
- )
332
-
333
- return demo
334
 
 
 
 
 
 
 
335
 
336
  # =============================================================================
337
  # CLEAN SHUTDOWN
@@ -340,26 +207,16 @@ def _cleanup_on_exit():
340
  try:
341
  loop = asyncio.get_event_loop()
342
  if loop and not loop.is_closed():
343
- try:
344
- loop.stop()
345
- except Exception:
346
- pass
347
- try:
348
- loop.close()
349
- except Exception:
350
- pass
351
- except Exception:
352
- pass
353
-
354
  atexit.register(_cleanup_on_exit)
355
 
356
-
357
  # =============================================================================
358
  # RUN
359
  # =============================================================================
360
  if __name__ == "__main__":
361
  demo = build_demo()
362
- demo.launch(
363
- server_name="0.0.0.0",
364
- server_port=int(os.environ.get("PORT", 7860))
365
- )
 
1
  """
2
+ AURA Chat — Hedge Fund Picks (Enhanced UI + Info + Video)
3
+ Single-file Gradio app with:
4
+ - YouTube explainer video
5
+ - Info container (what it does, accuracy, example prompts)
6
+ - Modern two-column layout: prompts/input on left, analysis/chat on right
7
+ - Chat component for follow-up questions
 
 
 
 
 
8
  """
 
9
  import os
10
  import time
11
  import sys
 
13
  import requests
14
  import atexit
15
  import traceback
 
16
  from typing import List
17
  import gradio as gr
18
 
19
+ # =============================================================================
20
+ # EVENT LOOP FOR NON-WINDOWS
21
+ # =============================================================================
22
  if sys.platform != "win32":
23
  try:
24
  loop = asyncio.new_event_loop()
 
26
  except Exception:
27
  traceback.print_exc()
28
 
 
29
  # =============================================================================
30
+ # CONFIGURATION
31
  # =============================================================================
32
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
33
+ SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
 
 
 
 
 
34
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
35
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
36
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
 
37
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
38
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
39
 
 
 
 
 
40
  PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
 
41
  Task:
42
+ - List top 5 stock picks (or fewer if data limited), with short rationale and Investment Duration (entry/exit).
43
+ - Include a summary (2-3 sentences) and Assumptions & Risks (2-3 bullet points).
44
+ - Keep entries short, scannable, plain text, no JSON.
45
+ Max tokens: {MAX_TOKENS}, Model: {LLM_MODEL}"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  # =============================================================================
48
+ # SCRAPER
49
  # =============================================================================
50
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
 
 
51
  last_err = None
52
+ for attempt in range(retries):
 
53
  try:
54
+ resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json={"query": query}, timeout=timeout)
 
 
 
 
 
55
  resp.raise_for_status()
56
  data = resp.json()
 
 
57
  if isinstance(data, dict):
58
+ return "\n".join([f"{k.upper()}:\n{v}" for k, v in data.items()])
59
+ return str(data)
 
 
 
60
  except Exception as e:
61
  last_err = e
62
+ time.sleep(1)
 
 
63
  return f"ERROR: Scraper failed: {last_err}"
64
 
 
65
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
 
66
  aggregated = []
67
  for q in queries:
68
+ if not q.strip(): continue
69
+ aggregated.append(f"\n=== QUERY: {q.strip()} ===\n")
70
+ aggregated.append(deep_scrape(q.strip()))
 
 
 
71
  time.sleep(delay)
72
  return "\n".join(aggregated)
73
 
 
74
  # =============================================================================
75
+ # LLM
76
  # =============================================================================
77
  try:
78
  from openai import OpenAI
79
  except Exception:
80
  OpenAI = None
81
 
82
+ def run_llm_system_and_user(system_prompt: str, user_text: str) -> str:
83
+ if OpenAI is None: return "ERROR: openai package not installed."
84
+ if not OPENAI_API_KEY: return "ERROR: OPENAI_API_KEY not set."
85
+ client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
86
  try:
 
87
  completion = client.chat.completions.create(
88
+ model=LLM_MODEL,
89
+ messages=[{"role": "system", "content": system_prompt},
90
+ {"role": "user", "content": user_text}],
91
+ max_tokens=MAX_TOKENS
 
 
92
  )
93
+ return completion.choices[0].message.content if hasattr(completion, "choices") else str(completion)
 
 
 
 
 
 
 
 
94
  except Exception as e:
95
  return f"ERROR: LLM call failed: {e}"
96
  finally:
97
+ try: client.close()
98
+ except: pass
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  # =============================================================================
101
+ # ANALYSIS PIPELINE
102
  # =============================================================================
103
  def analyze_and_seed_chat(prompts_text: str):
 
104
  if not prompts_text.strip():
105
+ return "Please enter at least one prompt.", []
 
106
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
107
+ scraped = multi_scrape(queries)
108
+ if scraped.startswith("ERROR"): return scraped, []
109
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nFollow instructions and output analysis."
 
 
 
 
110
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
111
+ if analysis.startswith("ERROR"): return analysis, []
112
+ return analysis, [
113
+ {"role": "user", "content": f"Analyze the data (prompts: {', '.join(queries)})"},
 
 
 
 
114
  {"role": "assistant", "content": analysis}
115
  ]
 
 
116
 
117
  def continue_chat(chat_messages, user_message: str, analysis_text: str):
118
+ if not user_message.strip(): return chat_messages or []
119
+ chat_messages = chat_messages or []
 
 
 
 
 
120
  chat_messages.append({"role": "user", "content": user_message})
121
+ system_prompt = "You are AURA. Use previous analysis as reference and answer concisely."
122
+ user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}"
123
+ assistant_reply = run_llm_system_and_user(system_prompt, user_payload)
 
 
 
 
 
 
 
 
 
 
 
 
124
  chat_messages.append({"role": "assistant", "content": assistant_reply})
125
  return chat_messages
126
 
 
127
  # =============================================================================
128
  # GRADIO UI
129
  # =============================================================================
 
132
  # Custom CSS
133
  gr.HTML("""
134
  <style>
135
+ .gradio-container { max-width: 1200px; margin: 20px auto; font-family: Arial, sans-serif; }
136
+ .info-box { background:#f0f4f8; border-radius:10px; padding:20px; margin-bottom:20px; }
137
+ .analysis-box { background:#ffffff; border-radius:10px; padding:15px; box-shadow:0 4px 14px rgba(0,0,0,0.06);}
138
+ .section-title { font-size:20px; color:#333; margin-bottom:8px; }
139
+ .example { background:#e6f0ff; padding:8px; border-radius:5px; font-family:monospace; }
140
+ .header-title { color:#0a3d62; font-size:32px; font-weight:bold; margin-bottom:12px; text-align:center; }
141
  </style>
142
  """)
143
+ # YouTube video
144
+ gr.HTML("""
145
+ <div style="text-align:center; margin-bottom:20px;">
146
+ <iframe width="800" height="450" src="https://www.youtube.com/embed/56zpjyHd3d4"
147
+ title="AURA Chat Explainer" frameborder="0" allowfullscreen></iframe>
148
+ </div>
149
+ """)
150
+ # Info container
151
+ gr.HTML("""
152
+ <div class="info-box">
153
+ <div class="section-title">What this app does:</div>
154
+ Fetches latest public data on insider trading and top stock market insights based on your prompts.
155
+ Provides a ranked list of the best stocks to invest in with entry/exit alerts.
156
+ <br><br>
157
+ <div class="section-title">Example prompts:</div>
158
+ <div class="example">
159
+ SEC insider transactions october 2025<br>
160
+ 13F filings Q3 2025<br>
161
+ company: ACME corp insider buys
162
+ </div>
163
+ <br>
164
+ <div class="section-title">Output:</div>
165
+ Ranked top stock picks with short rationale, investment duration, and actionable insights.
166
+ </div>
167
+ """)
168
+ # Main columns
169
  with gr.Row():
170
  with gr.Column(scale=1):
171
+ prompts = gr.Textbox(lines=6, label="Data Prompts (one per line)", placeholder="Enter prompts here")
 
 
 
 
172
  analyze_btn = gr.Button("Analyze", variant="primary")
173
  error_box = gr.Markdown("", visible=False)
174
  gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} • Max tokens = {MAX_TOKENS} • Scrape delay = {SCRAPE_DELAY}s")
 
 
175
  with gr.Column(scale=1):
176
+ analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False)
 
 
 
 
177
  gr.Markdown("**Chat with AURA about this analysis**")
178
+ chatbot = gr.Chatbot(height=420)
179
+ user_input = gr.Textbox(placeholder="Ask follow-up question...", label="Your question")
 
 
 
180
  send_btn = gr.Button("Send")
181
 
 
182
  analysis_state = gr.State("")
183
  chat_state = gr.State([])
184
+
 
185
  def on_analyze(prompts_text):
186
  analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
187
  if analysis_text.startswith("ERROR"):
188
  return "", f"**Error:** {analysis_text}", "", []
189
  return analysis_text, "", analysis_text, initial_chat
190
+
191
  def on_send(chat_state_list, user_msg, analysis_text):
192
+ if not user_msg.strip(): return chat_state_list or [], ""
 
193
  updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
194
  return updated_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
+ analyze_btn.click(fn=on_analyze, inputs=[prompts], outputs=[analysis_out, error_box, analysis_state, chat_state])
197
+ send_btn.click(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
198
+ user_input.submit(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
199
+ chat_state.change(fn=lambda x: x or [], inputs=[chat_state], outputs=[chatbot])
200
+
201
+ return demo
202
 
203
  # =============================================================================
204
  # CLEAN SHUTDOWN
 
207
  try:
208
  loop = asyncio.get_event_loop()
209
  if loop and not loop.is_closed():
210
+ try: loop.stop()
211
+ except: pass
212
+ try: loop.close()
213
+ except: pass
214
+ except: pass
 
 
 
 
 
 
215
  atexit.register(_cleanup_on_exit)
216
 
 
217
  # =============================================================================
218
  # RUN
219
  # =============================================================================
220
  if __name__ == "__main__":
221
  demo = build_demo()
222
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))