File size: 10,953 Bytes
2d5d541
d4f0271
0a73e83
 
 
d4f0271
 
 
2d5d541
 
6e526ac
e5a4541
2bd3c6f
ccf5a94
2d5d541
ccf5a94
2bd3c6f
2d5d541
 
0a73e83
 
 
ccf5a94
 
 
 
 
 
 
4edc9b1
0a73e83
4edc9b1
2d5d541
0a73e83
6e526ac
 
 
2d5d541
 
 
e5a4541
2d5d541
0a73e83
 
 
 
e5a4541
4edc9b1
0a73e83
4edc9b1
2d5d541
 
0a73e83
2d5d541
0a73e83
2d5d541
 
 
0a73e83
 
2d5d541
 
0a73e83
e5a4541
 
2d5d541
 
 
0a73e83
 
 
2d5d541
 
 
4edc9b1
0a73e83
4edc9b1
e5a4541
 
 
 
 
0a73e83
 
 
 
2d5d541
 
0a73e83
 
 
 
2d5d541
0a73e83
2d5d541
 
 
0a73e83
 
4edc9b1
 
0a73e83
4edc9b1
2bd3c6f
e5a4541
0a73e83
2d5d541
0a73e83
 
 
2bd3c6f
0a73e83
 
 
e5a4541
6fafd23
4edc9b1
e5a4541
0a73e83
 
2bd3c6f
0a73e83
 
 
2bd3c6f
 
 
4edc9b1
 
 
9dad169
 
d4f0271
9dad169
 
d4f0271
 
 
 
 
 
 
 
 
 
 
 
9dad169
 
d4f0271
 
 
0a73e83
 
 
 
 
 
d4f0271
0a73e83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4f0271
 
9dad169
 
0a73e83
d4f0271
9dad169
d4f0271
9dad169
0a73e83
9dad169
0a73e83
 
9dad169
d4f0271
9dad169
 
0a73e83
9dad169
 
 
 
 
0a73e83
9dad169
0a73e83
9dad169
 
ccf5a94
0a73e83
 
 
 
 
 
4edc9b1
 
 
 
e5a4541
 
 
 
0a73e83
 
 
 
 
e5a4541
2d5d541
4edc9b1
 
 
2d5d541
9dad169
0a73e83
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
"""
AURA Chat β€” Hedge Fund Picks (Dark Mode + Enhanced UI)
Single-file Gradio app with:
- YouTube explainer video
- Info container (what it does, accuracy, example prompts)
- Dark theme (green, gray, black)
- Two-column layout: inputs left, analysis/chat right
- Interactive chat component
"""
import os
import time
import sys
import asyncio
import requests
import atexit
import traceback
from typing import List
import gradio as gr

# =============================================================================
# EVENT LOOP FOR NON-WINDOWS
# =============================================================================
if sys.platform != "win32":
    try:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
    except Exception:
        traceback.print_exc()

# =============================================================================
# CONFIGURATION
# =============================================================================
SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")

PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
Task:
- List top 5 stock picks (or fewer if data limited), with short rationale and Investment Duration (entry/exit).
- Include a summary (2-3 sentences) and Assumptions & Risks (2-3 bullet points).
- Keep entries short, scannable, plain text, no JSON.
Max tokens: {MAX_TOKENS}, Model: {LLM_MODEL}"""

# =============================================================================
# SCRAPER
# =============================================================================
def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
    last_err = None
    for attempt in range(retries):
        try:
            resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json={"query": query}, timeout=timeout)
            resp.raise_for_status()
            data = resp.json()
            if isinstance(data, dict):
                return "\n".join([f"{k.upper()}:\n{v}" for k, v in data.items()])
            return str(data)
        except Exception as e:
            last_err = e
            time.sleep(1)
    return f"ERROR: Scraper failed: {last_err}"

def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
    aggregated = []
    for q in queries:
        if not q.strip(): continue
        aggregated.append(f"\n=== QUERY: {q.strip()} ===\n")
        aggregated.append(deep_scrape(q.strip()))
        time.sleep(delay)
    return "\n".join(aggregated)

# =============================================================================
# LLM
# =============================================================================
try:
    from openai import OpenAI
except Exception:
    OpenAI = None

def run_llm_system_and_user(system_prompt: str, user_text: str) -> str:
    if OpenAI is None: return "ERROR: openai package not installed."
    if not OPENAI_API_KEY: return "ERROR: OPENAI_API_KEY not set."
    client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
    try:
        completion = client.chat.completions.create(
            model=LLM_MODEL,
            messages=[{"role": "system", "content": system_prompt},
                      {"role": "user", "content": user_text}],
            max_tokens=MAX_TOKENS
        )
        return completion.choices[0].message.content if hasattr(completion, "choices") else str(completion)
    except Exception as e:
        return f"ERROR: LLM call failed: {e}"
    finally:
        try: client.close()
        except: pass

# =============================================================================
# ANALYSIS PIPELINE
# =============================================================================
def analyze_and_seed_chat(prompts_text: str):
    if not prompts_text.strip():
        return "Please enter at least one prompt.", []
    queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
    scraped = multi_scrape(queries)
    if scraped.startswith("ERROR"): return scraped, []
    user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nFollow instructions and output analysis."
    analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
    if analysis.startswith("ERROR"): return analysis, []
    return analysis, [
        {"role": "user", "content": f"Analyze the data (prompts: {', '.join(queries)})"},
        {"role": "assistant", "content": analysis}
    ]

def continue_chat(chat_messages, user_message: str, analysis_text: str):
    if not user_message.strip(): return chat_messages or []
    chat_messages = chat_messages or []
    chat_messages.append({"role": "user", "content": user_message})
    system_prompt = "You are AURA. Use previous analysis as reference and answer concisely."
    user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}"
    assistant_reply = run_llm_system_and_user(system_prompt, user_payload)
    chat_messages.append({"role": "assistant", "content": assistant_reply})
    return chat_messages

# =============================================================================
# GRADIO UI
# =============================================================================
def build_demo():
    with gr.Blocks(title="AURA Chat β€” Hedge Fund Picks") as demo:
        # Dark theme CSS
        gr.HTML("""
        <style>
        body { background-color: #121212; color: #cfd8dc; font-family: Arial, sans-serif; }
        .gradio-container { max-width: 1200px; margin: 20px auto; }
        .info-box { background:#1e1e1e; border-left:4px solid #00ff66; border-radius:10px; padding:20px; margin-bottom:20px; }
        .analysis-box { background:#1e1e1e; border-radius:10px; padding:15px; box-shadow:0 4px 14px rgba(0,255,102,0.2); color:#cfd8dc; overflow-y:auto; }
        .section-title { font-size:20px; color:#00ff66; margin-bottom:8px; }
        .example { background:#263238; padding:8px; border-radius:5px; font-family:monospace; color:#00ff66; }
        .header-title { color:#00ff66; font-size:32px; font-weight:bold; margin-bottom:12px; text-align:center; }
        .gr-button { background-color:#00ff66; color:#121212; border:none; }
        .gr-button:hover { background-color:#00cc52; }
        .chat-message-user { background:#263238; color:#cfd8dc; border-radius:8px; padding:8px; }
        .chat-message-assistant { background:#00ff66; color:#121212; border-radius:8px; padding:8px; }
        .textbox input, .textbox textarea { background:#1e1e1e; color:#cfd8dc; border:1px solid #00ff66; border-radius:5px; }
        </style>
        """)
        gr.HTML('<div class="header-title">AURA Chat β€” Hedge Fund Picks</div>')

        # YouTube explainer
        gr.HTML("""
        <div style="text-align:center; margin-bottom:20px;">
            <iframe width="800" height="450" src="https://www.youtube.com/embed/56zpjyHd3d4"
            title="AURA Chat Explainer" frameborder="0" allowfullscreen></iframe>
        </div>
        """)

        # Info container
        gr.HTML("""
        <div class="info-box">
            <div class="section-title">What this app does:</div>
            Fetches latest public data on insider trading and top stock market insights based on your prompts. 
            Provides a ranked list of the best stocks to invest in with entry/exit alerts.
            <br><br>
            <div class="section-title">Example prompts:</div>
            <div class="example">
            SEC insider transactions october 2025<br>
            13F filings Q3 2025<br>
            company: ACME corp insider buys
            </div>
            <br>
            <div class="section-title">Output:</div>
            Ranked top stock picks with short rationale, investment duration, and actionable insights.
        </div>
        """)

        # Two-column layout
        with gr.Row():
            with gr.Column(scale=1):
                prompts = gr.Textbox(lines=6, label="Data Prompts (one per line)", placeholder="Enter prompts here")
                analyze_btn = gr.Button("Analyze")
                error_box = gr.Markdown("", visible=False)
                gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} β€’ Max tokens = {MAX_TOKENS} β€’ Scrape delay = {SCRAPE_DELAY}s", elem_classes="gr-mb-2")
            with gr.Column(scale=1):
                analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False)
                gr.Markdown("**Chat with AURA about this analysis**")
                chatbot = gr.Chatbot(height=420)
                user_input = gr.Textbox(placeholder="Ask follow-up question...", label="Your question")
                send_btn = gr.Button("Send")

        analysis_state = gr.State("")
        chat_state = gr.State([])

        def on_analyze(prompts_text):
            analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
            if analysis_text.startswith("ERROR"):
                return "", f"**Error:** {analysis_text}", "", []
            return analysis_text, "", analysis_text, initial_chat

        def on_send(chat_state_list, user_msg, analysis_text):
            if not user_msg.strip(): return chat_state_list or [], ""
            updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
            return updated_history, ""

        analyze_btn.click(fn=on_analyze, inputs=[prompts], outputs=[analysis_out, error_box, analysis_state, chat_state])
        send_btn.click(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
        user_input.submit(fn=on_send, inputs=[chat_state, user_input, analysis_state], outputs=[chat_state, user_input])
        chat_state.change(fn=lambda x: x or [], inputs=[chat_state], outputs=[chatbot])

    return demo

# =============================================================================
# CLEAN SHUTDOWN
# =============================================================================
def _cleanup_on_exit():
    try:
        loop = asyncio.get_event_loop()
        if loop and not loop.is_closed():
            try: loop.stop()
            except: pass
            try: loop.close()
            except: pass
    except: pass
atexit.register(_cleanup_on_exit)

# =============================================================================
# RUN
# =============================================================================
if __name__ == "__main__":
    demo = build_demo()
    demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))