|
|
""" |
|
|
SuperCoder - Hugging Face Spaces Frontend |
|
|
Connects to your local API server via tunnel |
|
|
""" |
|
|
import gradio as gr |
|
|
import requests |
|
|
from typing import List, Tuple |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
API_URL = "https://inge-chalcographic-helene.ngrok-free.dev" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def call_api(message: str, temperature: float = 0.1, max_tokens: int = 512) -> str: |
|
|
""" |
|
|
Call the SuperCoder API running on your local machine. |
|
|
""" |
|
|
try: |
|
|
response = requests.post( |
|
|
f"{API_URL}/api/chat", |
|
|
json={ |
|
|
"messages": [{"role": "user", "content": message}], |
|
|
"temperature": temperature, |
|
|
"max_tokens": max_tokens, |
|
|
"stream": False |
|
|
}, |
|
|
timeout=60 |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
return result.get("response", "No response from API") |
|
|
else: |
|
|
return f"β API Error ({response.status_code}): {response.text}" |
|
|
|
|
|
except requests.exceptions.Timeout: |
|
|
return "β±οΈ Request timed out. The model might be processing a complex request." |
|
|
|
|
|
except requests.exceptions.ConnectionError: |
|
|
return "π Connection failed. Please ensure your local API server is running." |
|
|
|
|
|
except Exception as e: |
|
|
return f"β οΈ Error: {str(e)}" |
|
|
|
|
|
|
|
|
def check_api_status() -> str: |
|
|
"""Check if the API is reachable.""" |
|
|
try: |
|
|
response = requests.get(f"{API_URL}/health", timeout=5) |
|
|
if response.status_code == 200: |
|
|
data = response.json() |
|
|
if data.get("model_loaded"): |
|
|
return "β
Connected - Model Ready" |
|
|
else: |
|
|
return "β οΈ Connected but model not loaded" |
|
|
else: |
|
|
return f"β API returned status {response.status_code}" |
|
|
except: |
|
|
return "π΄ Not connected to API" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat_interface(message: str, history: List[Tuple[str, str]], |
|
|
temperature: float, max_tokens: int) -> Tuple[List[Tuple[str, str]], str]: |
|
|
"""Handle chat interaction.""" |
|
|
if not message.strip(): |
|
|
return history, "" |
|
|
|
|
|
|
|
|
history = history + [(message, None)] |
|
|
|
|
|
|
|
|
response = call_api(message, temperature, max_tokens) |
|
|
|
|
|
|
|
|
history[-1] = (message, response) |
|
|
|
|
|
return history, "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
QUICK_ACTIONS = { |
|
|
"Explain Code": "Explain the following code:\n\n```python\n# PASTE YOUR CODE HERE\n```", |
|
|
"Debug Code": "Help me debug this code:\n\n```python\n# PASTE YOUR CODE HERE\n```", |
|
|
"Write Function": "Write a Python function that:", |
|
|
"Optimize Code": "Optimize this code for better performance:\n\n```python\n# PASTE YOUR CODE HERE\n```", |
|
|
"Add Comments": "Add detailed comments to this code:\n\n```python\n# PASTE YOUR CODE HERE\n```", |
|
|
} |
|
|
|
|
|
def use_template(template_name: str) -> str: |
|
|
"""Return the selected template.""" |
|
|
return QUICK_ACTIONS.get(template_name, "") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks( |
|
|
title="SuperCoder Pro", |
|
|
theme=gr.themes.Soft(primary_hue="indigo"), |
|
|
css=""" |
|
|
.container { max-width: 1200px; margin: auto; } |
|
|
.status-box { padding: 10px; border-radius: 5px; margin: 10px 0; } |
|
|
.status-connected { background-color: #d4edda; } |
|
|
.status-disconnected { background-color: #f8d7da; } |
|
|
""" |
|
|
) as demo: |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
# π€ SuperCoder Pro |
|
|
### AI-Powered Coding Assistant |
|
|
|
|
|
Your personal AI coding assistant powered by local hardware. Ask me to write, |
|
|
explain, debug, or optimize code! |
|
|
|
|
|
--- |
|
|
""") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
status_text = gr.Textbox( |
|
|
value=check_api_status(), |
|
|
label="π API Status", |
|
|
interactive=False, |
|
|
show_label=True |
|
|
) |
|
|
refresh_btn = gr.Button("π Refresh Status", size="sm") |
|
|
|
|
|
refresh_btn.click( |
|
|
fn=check_api_status, |
|
|
outputs=status_text |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=3): |
|
|
chatbot = gr.Chatbot( |
|
|
label="π¬ Conversation", |
|
|
height=500, |
|
|
show_copy_button=True, |
|
|
avatar_images=(None, "π€") |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
msg_input = gr.Textbox( |
|
|
placeholder="Ask me to write, explain, debug, or review code...", |
|
|
show_label=False, |
|
|
scale=5, |
|
|
lines=2 |
|
|
) |
|
|
send_btn = gr.Button("Send π", scale=1, variant="primary") |
|
|
|
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### βοΈ Settings") |
|
|
|
|
|
temperature = gr.Slider( |
|
|
0.0, 1.0, |
|
|
value=0.1, |
|
|
step=0.05, |
|
|
label="π‘οΈ Temperature", |
|
|
info="Lower = precise, Higher = creative" |
|
|
) |
|
|
|
|
|
max_tokens = gr.Slider( |
|
|
128, 2048, |
|
|
value=512, |
|
|
step=128, |
|
|
label="π Max Tokens", |
|
|
info="Response length limit" |
|
|
) |
|
|
|
|
|
gr.Markdown("### π― Quick Actions") |
|
|
|
|
|
template_dropdown = gr.Dropdown( |
|
|
choices=list(QUICK_ACTIONS.keys()), |
|
|
label="Select Template", |
|
|
value=None |
|
|
) |
|
|
|
|
|
use_template_btn = gr.Button("Use Template", size="sm") |
|
|
|
|
|
clear_btn = gr.Button("ποΈ Clear Chat", variant="stop", size="sm") |
|
|
|
|
|
|
|
|
msg_input.submit( |
|
|
fn=chat_interface, |
|
|
inputs=[msg_input, chatbot, temperature, max_tokens], |
|
|
outputs=[chatbot, msg_input] |
|
|
) |
|
|
|
|
|
send_btn.click( |
|
|
fn=chat_interface, |
|
|
inputs=[msg_input, chatbot, temperature, max_tokens], |
|
|
outputs=[chatbot, msg_input] |
|
|
) |
|
|
|
|
|
use_template_btn.click( |
|
|
fn=use_template, |
|
|
inputs=[template_dropdown], |
|
|
outputs=[msg_input] |
|
|
) |
|
|
|
|
|
clear_btn.click( |
|
|
fn=lambda: ([], ""), |
|
|
outputs=[chatbot, msg_input] |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
|
|
|
### π‘ Tips |
|
|
- **Be specific** in your requests for better results |
|
|
- **Paste code** directly in your messages |
|
|
- Use **templates** for common tasks |
|
|
- Adjust **temperature** for more creative or precise outputs |
|
|
|
|
|
### β οΈ Important |
|
|
This Space connects to a **locally-running** AI model via tunnel. |
|
|
If you see connection errors, the local server may be offline. |
|
|
|
|
|
### π Privacy |
|
|
- All processing happens on the owner's local machine |
|
|
- No data is stored by Hugging Face |
|
|
- Each chat session is independent |
|
|
|
|
|
--- |
|
|
|
|
|
**Built with β€οΈ using llama.cpp and Gradio** |
|
|
""") |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
show_error=True |
|
|
) |
|
|
|