PublicAlpha / app.py
GabrielSalem's picture
Update app.py
d4f0271 verified
"""
AURA Chat β€” Hedge Fund Picks (Dark Mode + Enhanced UI)
Single-file Gradio app with:
- YouTube explainer video
- Info container (what it does, accuracy, example prompts)
- Dark theme (green, gray, black)
- Two-column layout: inputs left, analysis/chat right
- Interactive chat component
"""
import os
import time
import sys
import asyncio
import requests
import atexit
import traceback
from typing import List
import gradio as gr
# =============================================================================
# EVENT LOOP FOR NON-WINDOWS
# =============================================================================
if sys.platform != "win32":
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
except Exception:
traceback.print_exc()
# =============================================================================
# CONFIGURATION
# =============================================================================
SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
Task:
- List top 5 stock picks (or fewer if data limited), with short rationale and Investment Duration (entry/exit).
- Include a summary (2-3 sentences) and Assumptions & Risks (2-3 bullet points).
- Keep entries short, scannable, plain text, no JSON.
Max tokens: {MAX_TOKENS}, Model: {LLM_MODEL}"""
# =============================================================================
# SCRAPER
# =============================================================================
def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
last_err = None
for attempt in range(retries):
try:
resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json={"query": query}, timeout=timeout)
resp.raise_for_status()
data = resp.json()
if isinstance(data, dict):
return "\n".join([f"{k.upper()}:\n{v}" for k, v in data.items()])
return str(data)
except Exception as e:
last_err = e
time.sleep(1)
return f"ERROR: Scraper failed: {last_err}"
def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
aggregated = []
for q in queries:
if not q.strip(): continue
aggregated.append(f"\n=== QUERY: {q.strip()} ===\n")
aggregated.append(deep_scrape(q.strip()))
time.sleep(delay)
return "\n".join(aggregated)
# =============================================================================
# LLM
# =============================================================================
try:
from openai import OpenAI
except Exception:
OpenAI = None
def run_llm_system_and_user(system_prompt: str, user_text: str) -> str:
if OpenAI is None: return "ERROR: openai package not installed."
if not OPENAI_API_KEY: return "ERROR: OPENAI_API_KEY not set."
client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
try:
completion = client.chat.completions.create(
model=LLM_MODEL,
messages=[{"role": "system", "content": system_prompt},
{"role": "user", "content": user_text}],
max_tokens=MAX_TOKENS
)
return completion.choices[0].message.content if hasattr(completion, "choices") else str(completion)
except Exception as e:
return f"ERROR: LLM call failed: {e}"
finally:
try: client.close()
except: pass
# =============================================================================
# ANALYSIS PIPELINE
# =============================================================================
def analyze_and_seed_chat(prompts_text: str):
if not prompts_text.strip():
return "Please enter at least one prompt.", []
queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
scraped = multi_scrape(queries)
if scraped.startswith("ERROR"): return scraped, []
user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nFollow instructions and output analysis."
analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
if analysis.startswith("ERROR"): return analysis, []
return analysis, [
{"role": "user", "content": f"Analyze the data (prompts: {', '.join(queries)})"},
{"role": "assistant", "content": analysis}
]
def continue_chat(chat_messages, user_message: str, analysis_text: str):
if not user_message.strip(): return chat_messages or []
chat_messages = chat_messages or []
chat_messages.append({"role": "user", "content": user_message})
system_prompt = "You are AURA. Use previous analysis as reference and answer concisely."
user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}"
assistant_reply = run_llm_system_and_user(system_prompt, user_payload)
chat_messages.append({"role": "assistant", "content": assistant_reply})
return chat_messages
# =============================================================================
# GRADIO UI
# =============================================================================
def build_demo():
with gr.Blocks(title="AURA Chat β€” Hedge Fund Picks") as demo:
# Dark theme CSS
gr.HTML("""
<style>
body { background-color: #121212; color: #cfd8dc; font-family: Arial, sans-serif; }
.gradio-container { max-width: 1200px; margin: 20px auto; }
.info-box { background:#1e1e1e; border-left:4px solid #00ff66; border-radius:10px; padding:20px; margin-bottom:20px; }
.analysis-box { background:#1e1e1e; border-radius:10px; padding:15px; box-shadow:0 4px 14px rgba(0,255,102,0.2); color:#cfd8dc; overflow-y:auto; }
.section-title { font-size:20px; color:#00ff66; margin-bottom:8px; }
.example { background:#263238; padding:8px; border-radius:5px; font-family:monospace; color:#00ff66; }
.header-title { color:#00ff66; font-size:32px; font-weight:bold; margin-bottom:12px; text-align:center; }
.gr-button { background-color:#00ff66; color:#121212; border:none; }
.gr-button:hover { background-color:#00cc52; }
.chat-message-user { background:#263238; color:#cfd8dc; border-radius:8px; padding:8px; }
.chat-message-assistant { background:#00ff66; color:#121212; border-radius:8px; padding:8px; }
.textbox input, .textbox textarea { background:#1e1e1e; color:#cfd8dc; border:1px solid #00ff66; border-radius:5px; }
</style>
""")
gr.HTML('<div class="header-title">AURA Chat β€” Hedge Fund Picks</div>')
# YouTube explainer
gr.HTML("""
<div style="text-align:center; margin-bottom:20px;">
<iframe width="800" height="450" src="https://www.youtube.com/embed/56zpjyHd3d4"
title="AURA Chat Explainer" frameborder="0" allowfullscreen></iframe>
</div>
""")
# Info container
gr.HTML("""
<div class="info-box">
<div class="section-title">What this app does:</div>
Fetches latest public data on insider trading and top stock market insights based on your prompts.
Provides a ranked list of the best stocks to invest in with entry/exit alerts.
<br><br>
<div class="section-title">Example prompts:</div>
<div class="example">
SEC insider transactions october 2025<br>
13F filings Q3 2025<br>
company: ACME corp insider buys
</div>
<br>
<div class="section-title">Output:</div>
Ranked top stock picks with short rationale, investment duration, and actionable insights.
</div>
""")
# Two-column layout
with gr.Row():
with gr.Column(scale=1):
prompts = gr.Textbox(lines=6, label="Data Prompts (one per line)", placeholder="Enter prompts here")
analyze_btn = gr.Button("Analyze")
error_box = gr.Markdown("", visible=False)
gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} β€’ Max tokens = {MAX_TOKENS} β€’ Scrape delay = {SCRAPE_DELAY}s", elem_classes="gr-mb-2")
with gr.Column(scale=1):
analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False)
gr.Markdown("**Chat with AURA about this analysis**")
chatbot = gr.Chatbot(height=420)
user_input = gr.Textbox(placeholder="Ask follow-up question...", label="Your question")
send_btn = gr.Button("Send")
analysis_state = gr.State("")
chat_state = gr.State([])
def on_analyze(prompts_text):
analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
if analysis_text.startswith("ERROR"):
return "", f"**Error:** {analysis_text}", "", []
return analysis_text, "", analysis_text, initial_chat
def on_send(chat_state_list, user_msg, analysis_text):
if not user_msg.strip(): return chat_state_list or [], ""
updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
return updated_history, ""
analyze_btn.click(fn=on_analyze, inputs=[prompts], outputs=[analysis_out, error_box, analysis_state, chat_state])
send_btn.click(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
user_input.submit(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
chat_state.change(fn=lambda x: x or [], inputs=[chat_state], outputs=[chatbot])
return demo
# =============================================================================
# CLEAN SHUTDOWN
# =============================================================================
def _cleanup_on_exit():
try:
loop = asyncio.get_event_loop()
if loop and not loop.is_closed():
try: loop.stop()
except: pass
try: loop.close()
except: pass
except: pass
atexit.register(_cleanup_on_exit)
# =============================================================================
# RUN
# =============================================================================
if __name__ == "__main__":
demo = build_demo()
demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))