Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,12 +13,12 @@ from fastapi import FastAPI, HTTPException
|
|
| 13 |
from fastapi.responses import StreamingResponse
|
| 14 |
from fastapi.middleware.cors import CORSMiddleware
|
| 15 |
from pydantic import BaseModel
|
| 16 |
-
from typing import List, Optional, AsyncGenerator
|
| 17 |
import asyncio
|
| 18 |
import gradio as gr
|
| 19 |
|
| 20 |
# ==============================================================================
|
| 21 |
-
# Model Architecture
|
| 22 |
# ==============================================================================
|
| 23 |
|
| 24 |
@keras.saving.register_keras_serializable()
|
|
@@ -274,7 +274,7 @@ class KerasBackend(ModelBackend):
|
|
| 274 |
CONFIG_TOKENIZER_REPO_ID = "Smilyai-labs/Sam-1-large-it-0002"
|
| 275 |
|
| 276 |
print("="*60)
|
| 277 |
-
print("π SAM-X-1
|
| 278 |
print("="*60)
|
| 279 |
|
| 280 |
# Download config/tokenizer
|
|
@@ -394,19 +394,6 @@ async def generate_stream(prompt: str, backend, temperature: float) -> AsyncGene
|
|
| 394 |
yield new_chunk
|
| 395 |
|
| 396 |
|
| 397 |
-
# ==============================================================================
|
| 398 |
-
# Gradio Chat Function
|
| 399 |
-
# ==============================================================================
|
| 400 |
-
|
| 401 |
-
def chat_fn(message, history, model_choice="SAM-X-1-Large", temperature=0.7):
|
| 402 |
-
backend = available_models[model_choice]
|
| 403 |
-
prompt = f"User: {message}\nSam: <think>"
|
| 404 |
-
response = ""
|
| 405 |
-
for chunk in generate_stream(prompt, backend, temperature):
|
| 406 |
-
response += chunk
|
| 407 |
-
yield response
|
| 408 |
-
|
| 409 |
-
|
| 410 |
# ==============================================================================
|
| 411 |
# FastAPI Endpoints (OpenAI-style)
|
| 412 |
# ==============================================================================
|
|
@@ -484,29 +471,33 @@ async def list_models():
|
|
| 484 |
|
| 485 |
|
| 486 |
# ==============================================================================
|
| 487 |
-
# Gradio UI
|
| 488 |
# ==============================================================================
|
| 489 |
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
from fastapi.responses import StreamingResponse
|
| 14 |
from fastapi.middleware.cors import CORSMiddleware
|
| 15 |
from pydantic import BaseModel
|
| 16 |
+
from typing import List, Optional, AsyncGenerator
|
| 17 |
import asyncio
|
| 18 |
import gradio as gr
|
| 19 |
|
| 20 |
# ==============================================================================
|
| 21 |
+
# Model Architecture (Same as before)
|
| 22 |
# ==============================================================================
|
| 23 |
|
| 24 |
@keras.saving.register_keras_serializable()
|
|
|
|
| 274 |
CONFIG_TOKENIZER_REPO_ID = "Smilyai-labs/Sam-1-large-it-0002"
|
| 275 |
|
| 276 |
print("="*60)
|
| 277 |
+
print("π SAM-X-1 API Server Loading...".center(60))
|
| 278 |
print("="*60)
|
| 279 |
|
| 280 |
# Download config/tokenizer
|
|
|
|
| 394 |
yield new_chunk
|
| 395 |
|
| 396 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
# ==============================================================================
|
| 398 |
# FastAPI Endpoints (OpenAI-style)
|
| 399 |
# ==============================================================================
|
|
|
|
| 471 |
|
| 472 |
|
| 473 |
# ==============================================================================
|
| 474 |
+
# Simple Gradio API Info Page (Replaces Chat UI)
|
| 475 |
# ==============================================================================
|
| 476 |
|
| 477 |
+
def get_api_info():
|
| 478 |
+
model_info = "\n".join([f"- {name}" for name in available_models.keys()])
|
| 479 |
+
return f"""
|
| 480 |
+
# π€ SAM-X-1 AI API Server
|
| 481 |
+
|
| 482 |
+
This is a production-grade API server for the SAM-X-1 family of models.
|
| 483 |
+
|
| 484 |
+
## π Available Models:
|
| 485 |
+
{model_info}
|
| 486 |
+
|
| 487 |
+
## π API Endpoints:
|
| 488 |
+
- `POST /v1/chat/completions` - Chat completions (OpenAI-style)
|
| 489 |
+
- `GET /v1/models` - List available models
|
| 490 |
+
|
| 491 |
+
## π Streaming:
|
| 492 |
+
Set `"stream": true` in your request to receive real-time token-by-token responses.
|
| 493 |
+
|
| 494 |
+
## π§ͺ Example Request:
|
| 495 |
+
```json
|
| 496 |
+
{{
|
| 497 |
+
"model": "SAM-X-1-Large",
|
| 498 |
+
"messages": [
|
| 499 |
+
{{"role": "user", "content": "Hello!"}}
|
| 500 |
+
],
|
| 501 |
+
"stream": true,
|
| 502 |
+
"temperature": 0.7
|
| 503 |
+
}}
|