Bc-AI commited on
Commit
121fa0a
Β·
verified Β·
1 Parent(s): e8fe16d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -40
app.py CHANGED
@@ -13,12 +13,12 @@ from fastapi import FastAPI, HTTPException
13
  from fastapi.responses import StreamingResponse
14
  from fastapi.middleware.cors import CORSMiddleware
15
  from pydantic import BaseModel
16
- from typing import List, Optional, AsyncGenerator # βœ… AsyncGenerator import
17
  import asyncio
18
  import gradio as gr
19
 
20
  # ==============================================================================
21
- # Model Architecture
22
  # ==============================================================================
23
 
24
  @keras.saving.register_keras_serializable()
@@ -274,7 +274,7 @@ class KerasBackend(ModelBackend):
274
  CONFIG_TOKENIZER_REPO_ID = "Smilyai-labs/Sam-1-large-it-0002"
275
 
276
  print("="*60)
277
- print("πŸš€ SAM-X-1 Hybrid API + UI Loading...".center(60))
278
  print("="*60)
279
 
280
  # Download config/tokenizer
@@ -394,19 +394,6 @@ async def generate_stream(prompt: str, backend, temperature: float) -> AsyncGene
394
  yield new_chunk
395
 
396
 
397
- # ==============================================================================
398
- # Gradio Chat Function
399
- # ==============================================================================
400
-
401
- def chat_fn(message, history, model_choice="SAM-X-1-Large", temperature=0.7):
402
- backend = available_models[model_choice]
403
- prompt = f"User: {message}\nSam: <think>"
404
- response = ""
405
- for chunk in generate_stream(prompt, backend, temperature):
406
- response += chunk
407
- yield response
408
-
409
-
410
  # ==============================================================================
411
  # FastAPI Endpoints (OpenAI-style)
412
  # ==============================================================================
@@ -484,29 +471,33 @@ async def list_models():
484
 
485
 
486
  # ==============================================================================
487
- # Gradio UI
488
  # ==============================================================================
489
 
490
- with gr.Blocks(title="SAM-X-1 Chat", theme=gr.themes.Soft()) as demo:
491
- gr.Markdown("# πŸ€– SAM-X-1 Multi-Model Chat")
492
-
493
- with gr.Row():
494
- with gr.Column(scale=4):
495
- chat = gr.ChatInterface(
496
- fn=chat_fn,
497
- additional_inputs=[
498
- gr.Dropdown(
499
- choices=list(available_models.keys()),
500
- value=list(available_models.keys())[0],
501
- label="Model"
502
- ),
503
- gr.Slider(0.0, 2.0, value=0.7, label="Temperature")
504
- ],
505
- examples=[
506
- "Explain quantum computing like I'm 5.",
507
- "Write a haiku about a robot learning to dream."
508
- ]
509
- )
510
-
511
- # Mount Gradio app on root path
512
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
 
13
  from fastapi.responses import StreamingResponse
14
  from fastapi.middleware.cors import CORSMiddleware
15
  from pydantic import BaseModel
16
+ from typing import List, Optional, AsyncGenerator
17
  import asyncio
18
  import gradio as gr
19
 
20
  # ==============================================================================
21
+ # Model Architecture (Same as before)
22
  # ==============================================================================
23
 
24
  @keras.saving.register_keras_serializable()
 
274
  CONFIG_TOKENIZER_REPO_ID = "Smilyai-labs/Sam-1-large-it-0002"
275
 
276
  print("="*60)
277
+ print("πŸš€ SAM-X-1 API Server Loading...".center(60))
278
  print("="*60)
279
 
280
  # Download config/tokenizer
 
394
  yield new_chunk
395
 
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  # ==============================================================================
398
  # FastAPI Endpoints (OpenAI-style)
399
  # ==============================================================================
 
471
 
472
 
473
  # ==============================================================================
474
+ # Simple Gradio API Info Page (Replaces Chat UI)
475
  # ==============================================================================
476
 
477
+ def get_api_info():
478
+ model_info = "\n".join([f"- {name}" for name in available_models.keys()])
479
+ return f"""
480
+ # πŸ€– SAM-X-1 AI API Server
481
+
482
+ This is a production-grade API server for the SAM-X-1 family of models.
483
+
484
+ ## πŸš€ Available Models:
485
+ {model_info}
486
+
487
+ ## πŸ”Œ API Endpoints:
488
+ - `POST /v1/chat/completions` - Chat completions (OpenAI-style)
489
+ - `GET /v1/models` - List available models
490
+
491
+ ## 🌊 Streaming:
492
+ Set `"stream": true` in your request to receive real-time token-by-token responses.
493
+
494
+ ## πŸ§ͺ Example Request:
495
+ ```json
496
+ {{
497
+ "model": "SAM-X-1-Large",
498
+ "messages": [
499
+ {{"role": "user", "content": "Hello!"}}
500
+ ],
501
+ "stream": true,
502
+ "temperature": 0.7
503
+ }}