|
|
""" |
|
|
GraphLLM - Modal Deployment |
|
|
Serverless ML deployment with auto-scaling |
|
|
""" |
|
|
import modal |
|
|
|
|
|
|
|
|
app = modal.App("graphllm") |
|
|
|
|
|
|
|
|
image = ( |
|
|
modal.Image.debian_slim(python_version="3.12") |
|
|
.apt_install("tesseract-ocr", "ghostscript", "gcc", "g++") |
|
|
.pip_install_from_requirements("requirements.txt") |
|
|
) |
|
|
|
|
|
|
|
|
volume = modal.Volume.from_name("graphllm-data", create_if_missing=True) |
|
|
|
|
|
|
|
|
@app.function( |
|
|
image=image, |
|
|
gpu=None, |
|
|
memory=4096, |
|
|
timeout=600, |
|
|
volumes={"/app/data": volume}, |
|
|
secrets=[modal.Secret.from_name("graphllm-secrets")], |
|
|
) |
|
|
@modal.asgi_app() |
|
|
def fastapi_app(): |
|
|
""" |
|
|
Mount the FastAPI application |
|
|
""" |
|
|
import sys |
|
|
sys.path.insert(0, "/root") |
|
|
|
|
|
|
|
|
from main import app as fastapi_app |
|
|
|
|
|
return fastapi_app |
|
|
|
|
|
|
|
|
|
|
|
@app.local_entrypoint() |
|
|
def main(): |
|
|
""" |
|
|
Test the deployment locally |
|
|
""" |
|
|
print("GraphLLM deployed to Modal!") |
|
|
print("Access your app at: https://YOUR_USERNAME--graphllm-fastapi-app.modal.run") |
|
|
|