Spaces:
Sleeping
Sleeping
Update backend.py
Browse files- backend.py +132 -89
backend.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import sqlite3
|
| 2 |
import os
|
| 3 |
import json
|
|
@@ -10,6 +11,7 @@ import concurrent.futures
|
|
| 10 |
from typing import List, Dict, Any
|
| 11 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 12 |
import torch
|
|
|
|
| 13 |
|
| 14 |
DB_PATH = "code_agents_pro.db"
|
| 15 |
PROJECT_ROOT = "./projects"
|
|
@@ -31,8 +33,9 @@ def init_db():
|
|
| 31 |
user_id INTEGER NOT NULL,
|
| 32 |
title TEXT,
|
| 33 |
description TEXT,
|
| 34 |
-
status TEXT DEFAULT 'queued',
|
| 35 |
zip_path TEXT,
|
|
|
|
| 36 |
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
| 37 |
FOREIGN KEY (user_id) REFERENCES users(id)
|
| 38 |
);
|
|
@@ -44,7 +47,6 @@ def init_db():
|
|
| 44 |
init_db()
|
| 45 |
|
| 46 |
def hash_password(password):
|
| 47 |
-
# In production, use bcrypt or argon2. For now, simple hash for demo.
|
| 48 |
import hashlib
|
| 49 |
return hashlib.sha256(password.encode()).hexdigest()
|
| 50 |
|
|
@@ -77,7 +79,7 @@ def get_user_projects(user_id, limit=3):
|
|
| 77 |
conn = sqlite3.connect(DB_PATH)
|
| 78 |
cursor = conn.cursor()
|
| 79 |
cursor.execute("""
|
| 80 |
-
SELECT id, title, description, status, zip_path, created_at
|
| 81 |
FROM projects WHERE user_id = ? ORDER BY created_at DESC LIMIT ?
|
| 82 |
""", (user_id, limit))
|
| 83 |
projects = cursor.fetchall()
|
|
@@ -88,25 +90,25 @@ def create_project(user_id, title, description):
|
|
| 88 |
conn = sqlite3.connect(DB_PATH)
|
| 89 |
cursor = conn.cursor()
|
| 90 |
cursor.execute("""
|
| 91 |
-
INSERT INTO projects (user_id, title, description, status)
|
| 92 |
-
VALUES (?, ?, ?, 'queued')
|
| 93 |
""", (user_id, title, description))
|
| 94 |
project_id = cursor.lastrowid
|
| 95 |
conn.commit()
|
| 96 |
conn.close()
|
| 97 |
return project_id
|
| 98 |
|
| 99 |
-
def update_project_status(project_id, status, zip_path=None):
|
| 100 |
conn = sqlite3.connect(DB_PATH)
|
| 101 |
cursor = conn.cursor()
|
| 102 |
if zip_path:
|
| 103 |
cursor.execute("""
|
| 104 |
-
UPDATE projects SET status = ?, zip_path = ? WHERE id = ?
|
| 105 |
-
""", (status, zip_path, project_id))
|
| 106 |
else:
|
| 107 |
cursor.execute("""
|
| 108 |
-
UPDATE projects SET status = ? WHERE id = ?
|
| 109 |
-
""", (status, project_id))
|
| 110 |
conn.commit()
|
| 111 |
conn.close()
|
| 112 |
|
|
@@ -114,7 +116,7 @@ def get_project(project_id):
|
|
| 114 |
conn = sqlite3.connect(DB_PATH)
|
| 115 |
cursor = conn.cursor()
|
| 116 |
cursor.execute("""
|
| 117 |
-
SELECT id, user_id, title, description, status, zip_path
|
| 118 |
FROM projects WHERE id = ?
|
| 119 |
""", (project_id,))
|
| 120 |
row = cursor.fetchone()
|
|
@@ -133,9 +135,24 @@ def search_projects(user_id, query):
|
|
| 133 |
conn.close()
|
| 134 |
return results
|
| 135 |
|
| 136 |
-
# ------------------------------
|
| 137 |
-
|
|
|
|
| 138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
MODEL_REGISTRY = {
|
| 140 |
"understander": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
| 141 |
"architect": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
|
@@ -146,9 +163,9 @@ MODEL_REGISTRY = {
|
|
| 146 |
"editor": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
| 147 |
"tester": "microsoft/Phi-3-mini-4k-instruct",
|
| 148 |
"publisher": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
|
|
|
| 149 |
}
|
| 150 |
|
| 151 |
-
# Load models once at startup
|
| 152 |
_MODEL_CACHE = {}
|
| 153 |
|
| 154 |
def load_model(model_name):
|
|
@@ -298,7 +315,7 @@ If no: list missing items.
|
|
| 298 |
"""
|
| 299 |
}
|
| 300 |
|
| 301 |
-
# ------------------------------ TOOLS (
|
| 302 |
def get_user_project_dir(user_id):
|
| 303 |
user_dir = os.path.join(PROJECT_ROOT, str(user_id))
|
| 304 |
os.makedirs(user_dir, exist_ok=True)
|
|
@@ -401,81 +418,95 @@ def zip_project(user_id, project_name: str):
|
|
| 401 |
zf.write(full_path, arcname)
|
| 402 |
return zip_path
|
| 403 |
|
| 404 |
-
# ------------------------------ LLM GENERATOR (
|
| 405 |
def generate_with_model(role: str, prompt: str, context: dict = {}) -> str:
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
|
|
|
| 442 |
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
|
| 449 |
-
|
| 450 |
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
|
|
|
|
|
|
|
|
|
| 460 |
|
| 461 |
-
|
| 462 |
-
|
| 463 |
|
| 464 |
-
# ------------------------------ ASYNC JOB EXECUTOR ------------------------------
|
| 465 |
def run_agent_chain(project_id, user_id, initial_prompt):
|
| 466 |
try:
|
| 467 |
-
update_project_status(project_id, "running")
|
| 468 |
|
| 469 |
user_dir = get_user_project_dir(user_id)
|
| 470 |
clear_user_project_dir(user_id)
|
| 471 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
# Step 1: Understander
|
|
|
|
| 473 |
understand_result = generate_with_model("understander", initial_prompt)
|
| 474 |
-
|
| 475 |
|
| 476 |
# Step 2: Architect
|
|
|
|
| 477 |
architect_result = generate_with_model("architect", understand_result)
|
| 478 |
-
|
| 479 |
|
| 480 |
# Parse architect JSON
|
| 481 |
try:
|
|
@@ -483,16 +514,18 @@ def run_agent_chain(project_id, user_id, initial_prompt):
|
|
| 483 |
arch_data = json.loads(architect_result)
|
| 484 |
for file in arch_data.get("structure", []):
|
| 485 |
create_file(user_id, file["path"], file["content"])
|
| 486 |
-
except:
|
| 487 |
-
|
| 488 |
|
| 489 |
# Step 3: CEO
|
|
|
|
| 490 |
ceo_response = generate_with_model("ceo", "Review architecture and approve.")
|
| 491 |
-
|
| 492 |
|
| 493 |
# Step 4: Manager assigns tasks
|
|
|
|
| 494 |
manager_response = generate_with_model("manager", "Assign tasks based on architecture.")
|
| 495 |
-
|
| 496 |
|
| 497 |
try:
|
| 498 |
import json
|
|
@@ -503,38 +536,44 @@ def run_agent_chain(project_id, user_id, initial_prompt):
|
|
| 503 |
"instructions": task["instructions"]
|
| 504 |
})
|
| 505 |
create_file(user_id, task["file"], worker_response)
|
| 506 |
-
|
| 507 |
-
|
|
|
|
| 508 |
|
| 509 |
# Step 5: Reviewer
|
| 510 |
all_files = "\n".join(list_files(user_id))
|
| 511 |
reviewer_input = f"Review these files:\n\n{all_files}"
|
|
|
|
| 512 |
reviewer_result = generate_with_model("reviewer", reviewer_input)
|
| 513 |
-
|
| 514 |
|
| 515 |
# Step 6: Editor applies fixes
|
| 516 |
editor_input = f"Apply these changes:\n{reviewer_result}\n\nFiles:\n{all_files}"
|
|
|
|
| 517 |
editor_response = generate_with_model("editor", editor_input)
|
| 518 |
-
|
| 519 |
|
| 520 |
# Step 7: Testers generate tests
|
| 521 |
for filename in [f for f in list_files(user_id) if f.endswith(".py") and not f.startswith("test_")]:
|
| 522 |
test_prompt = f"Write a test for this file: {filename}"
|
|
|
|
| 523 |
test_content = generate_with_model("tester", test_prompt)
|
| 524 |
test_path = f"tests/test_{os.path.basename(filename)}"
|
| 525 |
create_file(user_id, test_path, test_content)
|
| 526 |
-
|
| 527 |
|
| 528 |
# Step 8: Editor reviews tests
|
| 529 |
test_files = "\n".join([f for f in list_files(user_id) if f.startswith("tests/")])
|
| 530 |
editor_test_input = f"Review these test files:\n\n{test_files}\n\nMake sure they are complete and correct."
|
|
|
|
| 531 |
editor_test_response = generate_with_model("editor", editor_test_input)
|
| 532 |
-
|
| 533 |
|
| 534 |
# Step 9: Publisher creates final assets
|
| 535 |
publisher_input = "Generate final project assets: README.md, Dockerfile, requirements.txt, .gitignore"
|
|
|
|
| 536 |
publisher_response = generate_with_model("publisher", publisher_input)
|
| 537 |
-
|
| 538 |
|
| 539 |
try:
|
| 540 |
import json
|
|
@@ -548,25 +587,29 @@ def run_agent_chain(project_id, user_id, initial_prompt):
|
|
| 548 |
write_requirements(user_id, content)
|
| 549 |
elif fname == ".gitignore":
|
| 550 |
write_gitignore(user_id, content)
|
| 551 |
-
except:
|
| 552 |
-
|
| 553 |
|
| 554 |
# Step 10: Final CEO
|
| 555 |
final_input = "All files generated. Verify completeness."
|
|
|
|
| 556 |
final_ceo_response = generate_with_model("final_ceo", final_input)
|
| 557 |
-
|
| 558 |
|
| 559 |
if "Ready to package" not in final_ceo_response:
|
| 560 |
raise Exception(f"Final CEO rejected: {final_ceo_response}")
|
| 561 |
|
| 562 |
# Step 11: ZIP & Save
|
|
|
|
| 563 |
zip_path = zip_project(user_id, f"project_{project_id}")
|
| 564 |
-
|
| 565 |
|
|
|
|
| 566 |
return "Completed successfully"
|
| 567 |
|
| 568 |
except Exception as e:
|
| 569 |
-
|
|
|
|
| 570 |
return f"Failed: {str(e)}"
|
| 571 |
|
| 572 |
# ------------------------------ JOB QUEUE ------------------------------
|
|
|
|
| 1 |
+
# backend.py — ENHANCED WITH AGENT CONVERSATION LOGS + RAM/VRAM MONITORING
|
| 2 |
import sqlite3
|
| 3 |
import os
|
| 4 |
import json
|
|
|
|
| 11 |
from typing import List, Dict, Any
|
| 12 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 13 |
import torch
|
| 14 |
+
import psutil
|
| 15 |
|
| 16 |
DB_PATH = "code_agents_pro.db"
|
| 17 |
PROJECT_ROOT = "./projects"
|
|
|
|
| 33 |
user_id INTEGER NOT NULL,
|
| 34 |
title TEXT,
|
| 35 |
description TEXT,
|
| 36 |
+
status TEXT DEFAULT 'queued',
|
| 37 |
zip_path TEXT,
|
| 38 |
+
logs TEXT DEFAULT '',
|
| 39 |
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
| 40 |
FOREIGN KEY (user_id) REFERENCES users(id)
|
| 41 |
);
|
|
|
|
| 47 |
init_db()
|
| 48 |
|
| 49 |
def hash_password(password):
|
|
|
|
| 50 |
import hashlib
|
| 51 |
return hashlib.sha256(password.encode()).hexdigest()
|
| 52 |
|
|
|
|
| 79 |
conn = sqlite3.connect(DB_PATH)
|
| 80 |
cursor = conn.cursor()
|
| 81 |
cursor.execute("""
|
| 82 |
+
SELECT id, title, description, status, zip_path, created_at, logs
|
| 83 |
FROM projects WHERE user_id = ? ORDER BY created_at DESC LIMIT ?
|
| 84 |
""", (user_id, limit))
|
| 85 |
projects = cursor.fetchall()
|
|
|
|
| 90 |
conn = sqlite3.connect(DB_PATH)
|
| 91 |
cursor = conn.cursor()
|
| 92 |
cursor.execute("""
|
| 93 |
+
INSERT INTO projects (user_id, title, description, status, logs)
|
| 94 |
+
VALUES (?, ?, ?, 'queued', '')
|
| 95 |
""", (user_id, title, description))
|
| 96 |
project_id = cursor.lastrowid
|
| 97 |
conn.commit()
|
| 98 |
conn.close()
|
| 99 |
return project_id
|
| 100 |
|
| 101 |
+
def update_project_status(project_id, status, logs=None, zip_path=None):
|
| 102 |
conn = sqlite3.connect(DB_PATH)
|
| 103 |
cursor = conn.cursor()
|
| 104 |
if zip_path:
|
| 105 |
cursor.execute("""
|
| 106 |
+
UPDATE projects SET status = ?, zip_path = ?, logs = ? WHERE id = ?
|
| 107 |
+
""", (status, zip_path, logs, project_id))
|
| 108 |
else:
|
| 109 |
cursor.execute("""
|
| 110 |
+
UPDATE projects SET status = ?, logs = ? WHERE id = ?
|
| 111 |
+
""", (status, logs, project_id))
|
| 112 |
conn.commit()
|
| 113 |
conn.close()
|
| 114 |
|
|
|
|
| 116 |
conn = sqlite3.connect(DB_PATH)
|
| 117 |
cursor = conn.cursor()
|
| 118 |
cursor.execute("""
|
| 119 |
+
SELECT id, user_id, title, description, status, zip_path, logs
|
| 120 |
FROM projects WHERE id = ?
|
| 121 |
""", (project_id,))
|
| 122 |
row = cursor.fetchone()
|
|
|
|
| 135 |
conn.close()
|
| 136 |
return results
|
| 137 |
|
| 138 |
+
# ------------------------------ SYSTEM MONITORING ------------------------------
|
| 139 |
+
def get_ram_usage_gb():
|
| 140 |
+
return round(psutil.virtual_memory().used / (1024**3), 2)
|
| 141 |
|
| 142 |
+
def get_vram_usage_gb():
|
| 143 |
+
"""Try to get GPU VRAM usage. Returns 0 if no GPU or error."""
|
| 144 |
+
try:
|
| 145 |
+
if torch.cuda.is_available():
|
| 146 |
+
torch.cuda.synchronize()
|
| 147 |
+
allocated = torch.cuda.memory_allocated() / (1024**3)
|
| 148 |
+
reserved = torch.cuda.memory_reserved() / (1024**3)
|
| 149 |
+
return round(max(allocated, reserved), 2)
|
| 150 |
+
else:
|
| 151 |
+
return 0.0
|
| 152 |
+
except:
|
| 153 |
+
return 0.0
|
| 154 |
+
|
| 155 |
+
# ------------------------------ MODEL LOADING ------------------------------
|
| 156 |
MODEL_REGISTRY = {
|
| 157 |
"understander": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
| 158 |
"architect": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
|
|
|
| 163 |
"editor": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
| 164 |
"tester": "microsoft/Phi-3-mini-4k-instruct",
|
| 165 |
"publisher": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
|
| 166 |
+
"final_ceo": "google/gemma-2-2b-it",
|
| 167 |
}
|
| 168 |
|
|
|
|
| 169 |
_MODEL_CACHE = {}
|
| 170 |
|
| 171 |
def load_model(model_name):
|
|
|
|
| 315 |
"""
|
| 316 |
}
|
| 317 |
|
| 318 |
+
# ------------------------------ TOOLS (UNCHANGED) ------------------------------
|
| 319 |
def get_user_project_dir(user_id):
|
| 320 |
user_dir = os.path.join(PROJECT_ROOT, str(user_id))
|
| 321 |
os.makedirs(user_dir, exist_ok=True)
|
|
|
|
| 418 |
zf.write(full_path, arcname)
|
| 419 |
return zip_path
|
| 420 |
|
| 421 |
+
# ------------------------------ LLM GENERATOR (WITH LOGGING) ------------------------------
|
| 422 |
def generate_with_model(role: str, prompt: str, context: dict = {}) -> str:
|
| 423 |
+
try:
|
| 424 |
+
model_name = MODEL_REGISTRY[role]
|
| 425 |
+
tokenizer, model = load_model(model_name)
|
| 426 |
+
|
| 427 |
+
# Format message based on role
|
| 428 |
+
if role == "understander":
|
| 429 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["understander"]},
|
| 430 |
+
{"role": "user", "content": prompt}]
|
| 431 |
+
elif role == "architect":
|
| 432 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["architect"]},
|
| 433 |
+
{"role": "user", "content": prompt}]
|
| 434 |
+
elif role == "ceo":
|
| 435 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["ceo"]},
|
| 436 |
+
{"role": "user", "content": prompt}]
|
| 437 |
+
elif role == "manager":
|
| 438 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["manager"]},
|
| 439 |
+
{"role": "user", "content": prompt}]
|
| 440 |
+
elif role == "worker":
|
| 441 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["worker"].format(file=context.get("file"), instructions=context.get("instructions"))},
|
| 442 |
+
{"role": "user", "content": ""}]
|
| 443 |
+
elif role == "reviewer":
|
| 444 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["reviewer"]},
|
| 445 |
+
{"role": "user", "content": prompt}]
|
| 446 |
+
elif role == "editor":
|
| 447 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["editor"]},
|
| 448 |
+
{"role": "user", "content": prompt}]
|
| 449 |
+
elif role == "tester":
|
| 450 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["tester"]},
|
| 451 |
+
{"role": "user", "content": prompt}]
|
| 452 |
+
elif role == "publisher":
|
| 453 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["publisher"]},
|
| 454 |
+
{"role": "user", "content": prompt}]
|
| 455 |
+
elif role == "final_ceo":
|
| 456 |
+
messages = [{"role": "system", "content": ROLE_PROMPTS["final_ceo"]},
|
| 457 |
+
{"role": "user", "content": prompt}]
|
| 458 |
+
else:
|
| 459 |
+
messages = [{"role": "user", "content": prompt}]
|
| 460 |
|
| 461 |
+
text = tokenizer.apply_chat_template(
|
| 462 |
+
messages,
|
| 463 |
+
tokenize=False,
|
| 464 |
+
add_generation_prompt=True
|
| 465 |
+
)
|
| 466 |
|
| 467 |
+
inputs = tokenizer(text, return_tensors="pt").to(model.device)
|
| 468 |
|
| 469 |
+
outputs = model.generate(
|
| 470 |
+
**inputs,
|
| 471 |
+
max_new_tokens=512,
|
| 472 |
+
temperature=0.2,
|
| 473 |
+
top_p=0.9,
|
| 474 |
+
do_sample=True,
|
| 475 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 476 |
+
eos_token_id=tokenizer.eos_token_id
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
response = tokenizer.decode(outputs[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
|
| 480 |
+
return response.strip()
|
| 481 |
|
| 482 |
+
except Exception as e:
|
| 483 |
+
return f"[ERROR IN {role.upper()}]: {str(e)}"
|
| 484 |
|
| 485 |
+
# ------------------------------ ASYNC JOB EXECUTOR (WITH FULL LOGGING) ------------------------------
|
| 486 |
def run_agent_chain(project_id, user_id, initial_prompt):
|
| 487 |
try:
|
| 488 |
+
update_project_status(project_id, "running", f"Starting agent chain...\nRAM: {get_ram_usage_gb()}GB | VRAM: {get_vram_usage_gb()}GB")
|
| 489 |
|
| 490 |
user_dir = get_user_project_dir(user_id)
|
| 491 |
clear_user_project_dir(user_id)
|
| 492 |
|
| 493 |
+
log_entries = []
|
| 494 |
+
|
| 495 |
+
def log_step(agent, action, output):
|
| 496 |
+
entry = f"[{agent}] {action}\n→ {output[:500]}{'...' if len(output) > 500 else ''}\n\n"
|
| 497 |
+
log_entries.append(entry)
|
| 498 |
+
current_logs = ''.join(log_entries)
|
| 499 |
+
update_project_status(project_id, "running", current_logs)
|
| 500 |
+
|
| 501 |
# Step 1: Understander
|
| 502 |
+
log_step("Understander", "Analyzing prompt...", "Started...")
|
| 503 |
understand_result = generate_with_model("understander", initial_prompt)
|
| 504 |
+
log_step("Understander", "Analysis complete", understand_result)
|
| 505 |
|
| 506 |
# Step 2: Architect
|
| 507 |
+
log_step("Architect", "Designing structure...", "Started...")
|
| 508 |
architect_result = generate_with_model("architect", understand_result)
|
| 509 |
+
log_step("Architect", "Structure designed", architect_result)
|
| 510 |
|
| 511 |
# Parse architect JSON
|
| 512 |
try:
|
|
|
|
| 514 |
arch_data = json.loads(architect_result)
|
| 515 |
for file in arch_data.get("structure", []):
|
| 516 |
create_file(user_id, file["path"], file["content"])
|
| 517 |
+
except Exception as e:
|
| 518 |
+
log_step("Architect", "JSON parse failed", str(e))
|
| 519 |
|
| 520 |
# Step 3: CEO
|
| 521 |
+
log_step("CEO", "Reviewing architecture...", "Started...")
|
| 522 |
ceo_response = generate_with_model("ceo", "Review architecture and approve.")
|
| 523 |
+
log_step("CEO", "Response", ceo_response)
|
| 524 |
|
| 525 |
# Step 4: Manager assigns tasks
|
| 526 |
+
log_step("Manager", "Assigning tasks...", "Started...")
|
| 527 |
manager_response = generate_with_model("manager", "Assign tasks based on architecture.")
|
| 528 |
+
log_step("Manager", "Tasks assigned", manager_response)
|
| 529 |
|
| 530 |
try:
|
| 531 |
import json
|
|
|
|
| 536 |
"instructions": task["instructions"]
|
| 537 |
})
|
| 538 |
create_file(user_id, task["file"], worker_response)
|
| 539 |
+
log_step("Worker", f"Generated {task['file']}", worker_response[:300])
|
| 540 |
+
except Exception as e:
|
| 541 |
+
log_step("Manager", "Task parsing failed", str(e))
|
| 542 |
|
| 543 |
# Step 5: Reviewer
|
| 544 |
all_files = "\n".join(list_files(user_id))
|
| 545 |
reviewer_input = f"Review these files:\n\n{all_files}"
|
| 546 |
+
log_step("Reviewer", "Auditing code...", "Started...")
|
| 547 |
reviewer_result = generate_with_model("reviewer", reviewer_input)
|
| 548 |
+
log_step("Reviewer", "Audit complete", reviewer_result)
|
| 549 |
|
| 550 |
# Step 6: Editor applies fixes
|
| 551 |
editor_input = f"Apply these changes:\n{reviewer_result}\n\nFiles:\n{all_files}"
|
| 552 |
+
log_step("Editor", "Applying fixes...", "Started...")
|
| 553 |
editor_response = generate_with_model("editor", editor_input)
|
| 554 |
+
log_step("Editor", "Fixes applied", "Applied corrections to files.")
|
| 555 |
|
| 556 |
# Step 7: Testers generate tests
|
| 557 |
for filename in [f for f in list_files(user_id) if f.endswith(".py") and not f.startswith("test_")]:
|
| 558 |
test_prompt = f"Write a test for this file: {filename}"
|
| 559 |
+
log_step("Tester", f"Generating test for {filename}...", "Started...")
|
| 560 |
test_content = generate_with_model("tester", test_prompt)
|
| 561 |
test_path = f"tests/test_{os.path.basename(filename)}"
|
| 562 |
create_file(user_id, test_path, test_content)
|
| 563 |
+
log_step("Tester", f"Test saved to {test_path}", test_content[:200])
|
| 564 |
|
| 565 |
# Step 8: Editor reviews tests
|
| 566 |
test_files = "\n".join([f for f in list_files(user_id) if f.startswith("tests/")])
|
| 567 |
editor_test_input = f"Review these test files:\n\n{test_files}\n\nMake sure they are complete and correct."
|
| 568 |
+
log_step("Editor", "Reviewing tests...", "Started...")
|
| 569 |
editor_test_response = generate_with_model("editor", editor_test_input)
|
| 570 |
+
log_step("Editor", "Test review complete", "Reviewed and corrected test files.")
|
| 571 |
|
| 572 |
# Step 9: Publisher creates final assets
|
| 573 |
publisher_input = "Generate final project assets: README.md, Dockerfile, requirements.txt, .gitignore"
|
| 574 |
+
log_step("Publisher", "Generating final files...", "Started...")
|
| 575 |
publisher_response = generate_with_model("publisher", publisher_input)
|
| 576 |
+
log_step("Publisher", "Final assets generated", publisher_response)
|
| 577 |
|
| 578 |
try:
|
| 579 |
import json
|
|
|
|
| 587 |
write_requirements(user_id, content)
|
| 588 |
elif fname == ".gitignore":
|
| 589 |
write_gitignore(user_id, content)
|
| 590 |
+
except Exception as e:
|
| 591 |
+
log_step("Publisher", "JSON parsing failed", str(e))
|
| 592 |
|
| 593 |
# Step 10: Final CEO
|
| 594 |
final_input = "All files generated. Verify completeness."
|
| 595 |
+
log_step("Final CEO", "Final verification...", "Started...")
|
| 596 |
final_ceo_response = generate_with_model("final_ceo", final_input)
|
| 597 |
+
log_step("Final CEO", "Verification result", final_ceo_response)
|
| 598 |
|
| 599 |
if "Ready to package" not in final_ceo_response:
|
| 600 |
raise Exception(f"Final CEO rejected: {final_ceo_response}")
|
| 601 |
|
| 602 |
# Step 11: ZIP & Save
|
| 603 |
+
log_step("System", "Creating ZIP archive...", "Started...")
|
| 604 |
zip_path = zip_project(user_id, f"project_{project_id}")
|
| 605 |
+
log_step("System", "ZIP created", f"Saved to {zip_path}")
|
| 606 |
|
| 607 |
+
update_project_status(project_id, "completed", ''.join(log_entries), zip_path)
|
| 608 |
return "Completed successfully"
|
| 609 |
|
| 610 |
except Exception as e:
|
| 611 |
+
error_log = ''.join(log_entries) + f"\n\n❌ FINAL ERROR: {str(e)}"
|
| 612 |
+
update_project_status(project_id, "failed", error_log)
|
| 613 |
return f"Failed: {str(e)}"
|
| 614 |
|
| 615 |
# ------------------------------ JOB QUEUE ------------------------------
|