Kai Izumoto
Upload app.py with huggingface_hub
b238600 verified
"""
SuperCoder - Hugging Face Spaces Frontend
Connects to your local API server via tunnel
"""
import gradio as gr
import requests
from typing import List, Tuple
# ============================================================================
# Configuration - EDIT THIS WITH YOUR TUNNEL URL
# ============================================================================
API_URL = "https://inge-chalcographic-helene.ngrok-free.dev"
# Example URLs:
# ngrok: https://abc123.ngrok-free.app
# cloudflare: https://abc123.trycloudflare.com
# ============================================================================
# API Client Functions
# ============================================================================
def call_api(message: str, temperature: float = 0.1, max_tokens: int = 512) -> str:
"""
Call the SuperCoder API running on your local machine.
"""
try:
response = requests.post(
f"{API_URL}/api/chat",
json={
"messages": [{"role": "user", "content": message}],
"temperature": temperature,
"max_tokens": max_tokens,
"stream": False
},
timeout=60
)
if response.status_code == 200:
result = response.json()
return result.get("response", "No response from API")
else:
return f"❌ API Error ({response.status_code}): {response.text}"
except requests.exceptions.Timeout:
return "⏱️ Request timed out. The model might be processing a complex request."
except requests.exceptions.ConnectionError:
return "πŸ”Œ Connection failed. Please ensure your local API server is running."
except Exception as e:
return f"⚠️ Error: {str(e)}"
def check_api_status() -> str:
"""Check if the API is reachable."""
try:
response = requests.get(f"{API_URL}/health", timeout=5)
if response.status_code == 200:
data = response.json()
if data.get("model_loaded"):
return "βœ… Connected - Model Ready"
else:
return "⚠️ Connected but model not loaded"
else:
return f"❌ API returned status {response.status_code}"
except:
return "πŸ”΄ Not connected to API"
# ============================================================================
# Gradio Interface
# ============================================================================
def chat_interface(message: str, history: List[Tuple[str, str]],
temperature: float, max_tokens: int) -> Tuple[List[Tuple[str, str]], str]:
"""Handle chat interaction."""
if not message.strip():
return history, ""
# Add user message to history
history = history + [(message, None)]
# Get AI response
response = call_api(message, temperature, max_tokens)
# Update history with response
history[-1] = (message, response)
return history, ""
# ============================================================================
# Quick Action Templates
# ============================================================================
QUICK_ACTIONS = {
"Explain Code": "Explain the following code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
"Debug Code": "Help me debug this code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
"Write Function": "Write a Python function that:",
"Optimize Code": "Optimize this code for better performance:\n\n```python\n# PASTE YOUR CODE HERE\n```",
"Add Comments": "Add detailed comments to this code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
}
def use_template(template_name: str) -> str:
"""Return the selected template."""
return QUICK_ACTIONS.get(template_name, "")
# ============================================================================
# Build Gradio UI
# ============================================================================
with gr.Blocks(
title="SuperCoder Pro",
theme=gr.themes.Soft(primary_hue="indigo"),
css="""
.container { max-width: 1200px; margin: auto; }
.status-box { padding: 10px; border-radius: 5px; margin: 10px 0; }
.status-connected { background-color: #d4edda; }
.status-disconnected { background-color: #f8d7da; }
"""
) as demo:
# Header
gr.Markdown("""
# πŸ€– SuperCoder Pro
### AI-Powered Coding Assistant
Your personal AI coding assistant powered by local hardware. Ask me to write,
explain, debug, or optimize code!
---
""")
# API Status
with gr.Row():
status_text = gr.Textbox(
value=check_api_status(),
label="πŸ”Œ API Status",
interactive=False,
show_label=True
)
refresh_btn = gr.Button("πŸ”„ Refresh Status", size="sm")
refresh_btn.click(
fn=check_api_status,
outputs=status_text
)
# Main Interface
with gr.Row():
# Left Column: Chat
with gr.Column(scale=3):
chatbot = gr.Chatbot(
label="πŸ’¬ Conversation",
height=500,
show_copy_button=True,
avatar_images=(None, "πŸ€–")
)
with gr.Row():
msg_input = gr.Textbox(
placeholder="Ask me to write, explain, debug, or review code...",
show_label=False,
scale=5,
lines=2
)
send_btn = gr.Button("Send πŸš€", scale=1, variant="primary")
# Right Column: Settings & Actions
with gr.Column(scale=1):
gr.Markdown("### βš™οΈ Settings")
temperature = gr.Slider(
0.0, 1.0,
value=0.1,
step=0.05,
label="🌑️ Temperature",
info="Lower = precise, Higher = creative"
)
max_tokens = gr.Slider(
128, 2048,
value=512,
step=128,
label="πŸ“ Max Tokens",
info="Response length limit"
)
gr.Markdown("### 🎯 Quick Actions")
template_dropdown = gr.Dropdown(
choices=list(QUICK_ACTIONS.keys()),
label="Select Template",
value=None
)
use_template_btn = gr.Button("Use Template", size="sm")
clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="stop", size="sm")
# Event Handlers
msg_input.submit(
fn=chat_interface,
inputs=[msg_input, chatbot, temperature, max_tokens],
outputs=[chatbot, msg_input]
)
send_btn.click(
fn=chat_interface,
inputs=[msg_input, chatbot, temperature, max_tokens],
outputs=[chatbot, msg_input]
)
use_template_btn.click(
fn=use_template,
inputs=[template_dropdown],
outputs=[msg_input]
)
clear_btn.click(
fn=lambda: ([], ""),
outputs=[chatbot, msg_input]
)
# Footer
gr.Markdown("""
---
### πŸ’‘ Tips
- **Be specific** in your requests for better results
- **Paste code** directly in your messages
- Use **templates** for common tasks
- Adjust **temperature** for more creative or precise outputs
### ⚠️ Important
This Space connects to a **locally-running** AI model via tunnel.
If you see connection errors, the local server may be offline.
### πŸ”’ Privacy
- All processing happens on the owner's local machine
- No data is stored by Hugging Face
- Each chat session is independent
---
**Built with ❀️ using llama.cpp and Gradio**
""")
# Launch the app
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
show_error=True
)