Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from ultralytics import YOLO | |
| from PIL import Image | |
| import numpy as np | |
| from typing import List, Tuple, Dict, Optional | |
| from huggingface_hub import InferenceClient | |
| # Load the trained model | |
| model = YOLO('best.pt') | |
| # Initialize state structure | |
| def init_user_state() -> Dict: | |
| """Initialize the user state dictionary.""" | |
| return { | |
| 'name': '', | |
| 'age': None, | |
| 'weight_lbs': None, | |
| 'height_cm': None, | |
| 'gender': '', | |
| 'activity_level': '', | |
| 'goal': '', | |
| 'calorie_target': None, | |
| 'cuisine_preference': '', | |
| 'detected_ingredients': [], | |
| 'ingredient_list_text': '' | |
| } | |
| # BMR & CALORIE CALCULATION | |
| def convert_height_to_cm(height_ft: Optional[float], height_in: Optional[float]) -> Optional[float]: | |
| """Convert feet and inches to centimeters.""" | |
| if height_ft is None or height_in is None: | |
| return None | |
| total_inches = (height_ft * 12) + height_in | |
| return total_inches * 2.54 | |
| def calculate_bmr(weight_kg: float, height_cm: float, age: int, gender: str) -> float: | |
| """ | |
| Calculate Basal Metabolic Rate using Mifflin-St Jeor Equation. | |
| BMR (Men) = 10 Γ weight(kg) + 6.25 Γ height(cm) - 5 Γ age(years) + 5 | |
| BMR (Women) = 10 Γ weight(kg) + 6.25 Γ height(cm) - 5 Γ age(years) - 161 | |
| """ | |
| base_bmr = (10 * weight_kg) + (6.25 * height_cm) - (5 * age) | |
| if gender.lower() == 'male': | |
| bmr = base_bmr + 5 | |
| else: # female | |
| bmr = base_bmr - 161 | |
| return bmr | |
| def get_activity_multiplier(activity_level: str) -> float: | |
| """Get activity multiplier based on activity level.""" | |
| multipliers = { | |
| 'Sedentary': 1.2, | |
| 'Light': 1.375, | |
| 'Moderate': 1.55, | |
| 'Active': 1.725, | |
| 'Very Active': 1.9 | |
| } | |
| return multipliers.get(activity_level, 1.2) | |
| def get_goal_adjustment(goal: str) -> int: | |
| """Get calorie adjustment based on goal.""" | |
| adjustments = { | |
| 'Cutting': -500, | |
| 'Maintain': 0, | |
| 'Bulking': +500, | |
| 'Custom': 0 # Will be handled separately | |
| } | |
| return adjustments.get(goal, 0) | |
| def calculate_calorie_target( | |
| weight_lbs: Optional[float], | |
| height_ft: Optional[float], | |
| height_in: Optional[float], | |
| age: Optional[int], | |
| gender: Optional[str], | |
| activity_level: Optional[str], | |
| goal: Optional[str], | |
| custom_calories: Optional[float], | |
| state: Dict | |
| ) -> Tuple[Dict, str]: | |
| """ | |
| Calculate daily calorie target based on user inputs. | |
| Updates state and returns formatted result. | |
| """ | |
| # Validate inputs | |
| if not all([weight_lbs, height_ft is not None, height_in is not None, age, gender, activity_level, goal]): | |
| return state, "**Please fill in all required fields.**" | |
| # Convert weight to kg | |
| weight_kg = weight_lbs * 0.453592 | |
| # Convert height to cm | |
| height_cm = convert_height_to_cm(height_ft, height_in) | |
| if height_cm is None: | |
| return state, "**Please enter valid height values.**" | |
| # Calculate BMR | |
| bmr = calculate_bmr(weight_kg, height_cm, age, gender) | |
| # Get activity multiplier | |
| activity_mult = get_activity_multiplier(activity_level) | |
| # Calculate TDEE (Total Daily Energy Expenditure) | |
| tdee = bmr * activity_mult | |
| # Apply goal adjustment | |
| if goal == 'Custom' and custom_calories is not None: | |
| calorie_target = custom_calories | |
| else: | |
| goal_adj = get_goal_adjustment(goal) | |
| calorie_target = tdee + goal_adj | |
| # Update state | |
| state['weight_lbs'] = weight_lbs | |
| state['height_cm'] = height_cm | |
| state['age'] = age | |
| state['gender'] = gender | |
| state['activity_level'] = activity_level | |
| state['goal'] = goal | |
| state['calorie_target'] = calorie_target | |
| # Format output | |
| result_text = f""" | |
| ## π Your Daily Calorie Target | |
| **BMR (Basal Metabolic Rate):** {bmr:.0f} calories/day | |
| **Activity Level:** {activity_level} (Γ{activity_mult:.2f}) | |
| **TDEE (Total Daily Energy Expenditure):** {tdee:.0f} calories/day | |
| **Goal Adjustment:** {get_goal_adjustment(goal):+.0f} calories | |
| ### π― **Daily Calorie Target: {calorie_target:.0f} calories** | |
| *This target is based on your profile and has been saved for recipe generation.* | |
| """ | |
| return state, result_text | |
| # INGREDIENT DETECTION | |
| def detect_ingredients(images: List, state: Dict) -> Tuple[Dict, List, str]: | |
| """ | |
| Process multiple images and return detected ingredients. | |
| Also updates the state with detected ingredients. | |
| Args: | |
| images: List of uploaded images (file paths) | |
| state: User state dictionary | |
| Returns: | |
| Tuple of (updated_state, processed_images, ingredient_list_text) | |
| """ | |
| if not images or len(images) == 0: | |
| return state, [], "**No images uploaded.**" | |
| processed_images = [] | |
| all_detected_items = set() | |
| # Process each uploaded image | |
| for image_file in images: | |
| if image_file is None: | |
| continue | |
| # Get file path (Remeber that Gradio returns file objects) | |
| image_path = image_file.name if hasattr(image_file, 'name') else image_file | |
| # Run prediction with the local settings | |
| results = model.predict(source=image_path, conf=0.7, iou=0.3, verbose=False) | |
| # Get the image with bounding boxes drawn | |
| result_image = results[0].plot() | |
| # Extract detected ingredients from this image | |
| for box in results[0].boxes: | |
| class_id = int(box.cls) | |
| class_name = model.names[class_id] | |
| all_detected_items.add(class_name) | |
| # Convert numpy array to PIL Image for display | |
| # YOLO returns BGR, convert to RGB | |
| if len(result_image.shape) == 3: | |
| result_image_rgb = result_image[..., ::-1] # BGR to RGB | |
| processed_images.append(Image.fromarray(result_image_rgb)) | |
| else: | |
| processed_images.append(Image.fromarray(result_image)) | |
| # formatted ingredient list | |
| if all_detected_items: | |
| ingredient_list = sorted(list(all_detected_items)) | |
| ingredient_list_text = "**Detected Ingredients:**\n\n" | |
| ingredient_list_text += "\n".join([f"β’ {item.capitalize()}" for item in ingredient_list]) | |
| ingredient_list_text += f"\n\n**Total unique items:** {len(ingredient_list)}" | |
| # Update state with detected ingredients for later use | |
| state['detected_ingredients'] = ingredient_list | |
| state['ingredient_list_text'] = ingredient_list_text | |
| else: | |
| ingredient_list_text = "**No ingredients detected.**\n\nTry adjusting the image quality or lighting." | |
| state['detected_ingredients'] = [] | |
| state['ingredient_list_text'] = ingredient_list_text | |
| return state, processed_images, ingredient_list_text | |
| # RECIPE GENERATION | |
| def generate_recipes(cuisine_preference: Optional[str], state: Dict) -> Tuple[Dict, str]: | |
| """ | |
| Generate recipes using LLM. All inputs are optional with smart defaults. | |
| """ | |
| # Make everything optional - use defaults if not provided | |
| cuisine_preference = cuisine_preference or "International" | |
| # Get user data with defaults | |
| calorie_target = state.get('calorie_target') | |
| if calorie_target: | |
| calorie_target = int(calorie_target) | |
| else: | |
| calorie_target = 2000 # Default calorie target | |
| goal = state.get('goal', 'Maintain') | |
| ingredients = state.get('detected_ingredients', []) | |
| # Build ingredient list or use default | |
| if ingredients: | |
| ingredient_list = ", ".join([item.capitalize() for item in ingredients]) | |
| ingredient_context = f"Use these available ingredients: {ingredient_list}. " | |
| else: | |
| ingredient_list = "common pantry items" | |
| ingredient_context = "Use common, readily available ingredients. " | |
| # Update state | |
| state['cuisine_preference'] = cuisine_preference | |
| # Map goal to dietary focus | |
| goal_descriptions = { | |
| 'Cutting': 'weight loss and calorie deficit', | |
| 'Maintain': 'maintaining current weight', | |
| 'Bulking': 'muscle gain with high protein', | |
| 'Custom': 'your custom calorie target' | |
| } | |
| goal_desc = goal_descriptions.get(goal, 'general health and nutrition') | |
| # Build flexible prompt based on available data | |
| prompt = f"""You are a professional nutritionist and chef. Create 3 distinct, detailed recipes that: | |
| 1. {ingredient_context} | |
| 2. Fit within a daily calorie target of approximately {calorie_target} calories per day | |
| 3. Match {cuisine_preference} cuisine style | |
| 4. Align with the goal of {goal_desc} | |
| For each recipe, provide: | |
| - Recipe name | |
| - Serving size | |
| - Estimated calories per serving | |
| - Complete ingredient list (you may suggest additional common pantry items if needed) | |
| - Step-by-step cooking instructions | |
| - Nutritional highlights relevant to the goal | |
| Format each recipe clearly with headers. Make the recipes practical, delicious, and suitable for home cooking.""" | |
| try: | |
| # Use Hugging Face Inference API | |
| import os | |
| # Try multiple ways to get the token | |
| hf_token = None | |
| # Method 1: Check HF_TOKEN environment variable (Hugging Face Spaces secret) | |
| hf_token = os.getenv("HF_TOKEN", None) | |
| # Method 2: Check HUGGING_FACE_HUB_TOKEN (alternative name) | |
| if not hf_token: | |
| hf_token = os.getenv("HUGGING_FACE_HUB_TOKEN", None) | |
| # Method 3: Try to get from Hugging Face cache (for Spaces or logged-in users) | |
| if not hf_token: | |
| try: | |
| from huggingface_hub import HfFolder | |
| hf_token = HfFolder.get_token() | |
| except: | |
| pass | |
| # Initialize client with token if available, otherwise try without | |
| if hf_token: | |
| client = InferenceClient(token=hf_token) | |
| else: | |
| # Try without token | |
| client = InferenceClient() | |
| # Try multiple models that work on free tier | |
| #Try chat_completion first, then text_generation as fallback | |
| response = None | |
| last_error = None | |
| successful_model = None | |
| errors_log = [] | |
| # List of models to try - simpler models that work on free tier | |
| models_to_try = [ | |
| "microsoft/Phi-3-mini-4k-instruct", | |
| "HuggingFaceH4/zephyr-7b-beta", | |
| "mistralai/Mistral-7B-Instruct-v0.2", | |
| "meta-llama/Llama-3.2-3B-Instruct", | |
| "google/flan-t5-xxl", # Simple text generation model | |
| ] | |
| for model_name in models_to_try: | |
| # Try chat_completion first | |
| try: | |
| messages = [ | |
| {"role": "system", "content": "You are a professional nutritionist and chef. Create detailed, practical recipes with clear formatting."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| response_obj = client.chat_completion( | |
| messages=messages, | |
| model=model_name, | |
| max_tokens=1500, | |
| temperature=0.7, | |
| ) | |
| # Extract response | |
| if hasattr(response_obj, 'choices') and len(response_obj.choices) > 0: | |
| if hasattr(response_obj.choices[0].message, 'content'): | |
| response = response_obj.choices[0].message.content | |
| else: | |
| response = str(response_obj.choices[0].message) | |
| elif isinstance(response_obj, dict) and 'choices' in response_obj: | |
| response = response_obj['choices'][0]['message']['content'] | |
| elif isinstance(response_obj, str): | |
| response = response_obj | |
| else: | |
| response = str(response_obj) | |
| if response and len(response.strip()) > 50: # Make sure we got a real response | |
| successful_model = f"{model_name} (chat_completion)" | |
| break | |
| except Exception as chat_error: | |
| errors_log.append(f"{model_name} (chat): {str(chat_error)[:80]}") | |
| # Try text_generation as fallback for this model | |
| try: | |
| response = client.text_generation( | |
| prompt, | |
| model=model_name, | |
| max_new_tokens=1500, | |
| temperature=0.7, | |
| ) | |
| if response and len(str(response).strip()) > 50: | |
| successful_model = f"{model_name} (text_generation)" | |
| break | |
| except Exception as text_error: | |
| errors_log.append(f"{model_name} (text): {str(text_error)[:80]}") | |
| last_error = text_error | |
| continue | |
| # If still no response, try one more fallback | |
| if not response: | |
| try: | |
| # Try a very simple model as last resort | |
| response = client.text_generation( | |
| prompt, | |
| model="gpt2", # Should be always available | |
| max_new_tokens=500, | |
| temperature=0.7, | |
| ) | |
| successful_model = "gpt2 (fallback)" | |
| except: | |
| pass | |
| # If all models failed, provide an error message | |
| if response is None: | |
| error_msg = f"** Failed to generate recipes.**\n\n" | |
| # Show last error details | |
| if last_error: | |
| error_msg += f"**Last error:** {str(last_error)[:200]}\n\n" | |
| # Show which models were tried | |
| if errors_log: | |
| error_msg += "**Models tried:**\n" | |
| for err in errors_log[:3]: # Show first 3 errors | |
| error_msg += f"- {err}\n" | |
| error_msg += "\n" | |
| if not hf_token: | |
| error_msg += """**π‘ Setup Required if dulplicated:** | |
| **For Hugging Face Spaces:** | |
| Go to your Space Settings | |
| Scroll to "Repository secrets" | |
| Click **"New secret"** | |
| Value: Your Hugging Face token | |
| Click **"Add secret"** and your Space will rebuild automatically | |
| **For Local Development:** | |
| Set the `HF_TOKEN` environment variable with your Hugging Face token. | |
| Once the token is set, try generating recipes again!""" | |
| else: | |
| error_msg += """**Possible issues:** | |
| - The models may require special access (some models need approval on Hugging Face) | |
| - Your token may not have access to these models (free tier has limitations) | |
| - Models might be routed to external providers that aren't available | |
| - Network connectivity issues | |
| """ | |
| return state, error_msg | |
| # Extract text if response is a formatted object | |
| if hasattr(response, 'generated_text'): | |
| response_text = response.generated_text | |
| elif isinstance(response, str): | |
| response_text = response | |
| else: | |
| response_text = str(response) | |
| # Build profile summary (only show if data exists) | |
| profile_parts = [] | |
| if state.get('calorie_target'): | |
| profile_parts.append(f"- Daily Calorie Target: {calorie_target} calories") | |
| if state.get('goal'): | |
| profile_parts.append(f"- Goal: {goal}") | |
| if ingredients: | |
| profile_parts.append(f"- Available Ingredients: {ingredient_list}") | |
| profile_summary = "\n".join(profile_parts) if profile_parts else "- Using default settings (2000 calories, general recipes)" | |
| recipes_text = f"""## π³ Recipe Suggestions for {cuisine_preference} Cuisine | |
| **Settings Used:** | |
| {profile_summary} | |
| --- | |
| {response_text} | |
| --- | |
| *Recipes generated using AI. {"Based on your profile and ingredients." if (state.get('calorie_target') or ingredients) else "Feel free to customize your profile and scan ingredients for more personalized results!"}*""" | |
| return state, recipes_text | |
| except Exception as e: | |
| error_msg = f"""** Error generating recipes.** | |
| Please try again. If the issue persists, you may need to: | |
| 1. Check your internet connection | |
| 2. Ensure you have a Hugging Face API token set (if required) | |
| 3. Try a different cuisine preference | |
| Error details: {str(e)}""" | |
| return state, error_msg | |
| # GRADIO INTERFACE | |
| # Custom CSS -NOTE: Lets add emojis to the Responses and buttons to add more colors without offending the design. | |
| custom_css = """ | |
| .gradio-container { | |
| font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
| } | |
| .main-header { | |
| text-align: center; | |
| padding: 20px; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border-radius: 10px; | |
| margin-bottom: 20px; | |
| } | |
| .description-box { | |
| background: #f8f9fa; | |
| padding: 15px; | |
| border-radius: 8px; | |
| border-left: 4px solid #667eea; | |
| margin-bottom: 20px; | |
| color: #000000 !important; | |
| } | |
| .description-box * { | |
| color: #000000 !important; | |
| } | |
| .ingredient-list { | |
| background: #ffffff; | |
| padding: 20px; | |
| border-radius: 8px; | |
| box-shadow: 0 2px 8px rgba(0,0,0,0.1); | |
| min-height: 200px; | |
| color: #000000 !important; | |
| } | |
| .ingredient-list * { | |
| color: #000000 !important; | |
| } | |
| .calorie-result { | |
| background: #e8f5e9; | |
| padding: 20px; | |
| border-radius: 8px; | |
| border-left: 4px solid #4caf50; | |
| margin-top: 20px; | |
| color: #000000 !important; | |
| } | |
| .calorie-result * { | |
| color: #000000 !important; | |
| } | |
| """ | |
| # Gradio interface | |
| with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: | |
| # Header - Changed again for CTP showcase. | |
| gr.Markdown( | |
| """ | |
| # π₯ Forked Nutrition | |
| Your AI-powered kitchen companion: Scan ingredients, calculate calories, and generate personalized recipes! | |
| """, | |
| elem_classes=["main-header"] | |
| ) | |
| # Initialize state | |
| user_state = gr.State(value=init_user_state) | |
| # Tab structure | |
| with gr.Tabs() as tabs: | |
| # TAB 1: USER PROFILE & GOALS | |
| with gr.Tab("π€ User Profile & Goals"): | |
| gr.Markdown( | |
| """ | |
| <div class="description-box"> | |
| <strong>π Set up your profile:</strong><br> | |
| Enter your personal information and fitness goals to calculate your daily calorie target. | |
| This will be used to generate personalized recipes. | |
| </div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| name_input = gr.Textbox( | |
| label="Name", | |
| placeholder="Enter your name", | |
| value="" | |
| ) | |
| with gr.Row(): | |
| age_input = gr.Number( | |
| label="Age", | |
| minimum=1, | |
| maximum=120, | |
| value=None, | |
| precision=0 | |
| ) | |
| gender_input = gr.Dropdown( | |
| label="Gender", | |
| choices=["Male", "Female"], | |
| value=None | |
| ) | |
| with gr.Row(): | |
| weight_input = gr.Number( | |
| label="Weight (lbs)", | |
| minimum=1, | |
| maximum=1000, | |
| value=None, | |
| precision=1 | |
| ) | |
| with gr.Row(): | |
| height_ft_input = gr.Number( | |
| label="Height (feet)", | |
| minimum=1, | |
| maximum=8, | |
| value=None, | |
| precision=0 | |
| ) | |
| height_in_input = gr.Number( | |
| label="Height (inches)", | |
| minimum=0, | |
| maximum=11, | |
| value=None, | |
| precision=0 | |
| ) | |
| activity_input = gr.Dropdown( | |
| label="Activity Level", | |
| choices=["Sedentary", "Light", "Moderate", "Active", "Very Active"], | |
| value=None, | |
| info="Sedentary: Little/no exercise | Light: Light exercise 1-3 days/week | Moderate: Moderate exercise 3-5 days/week | Active: Hard exercise 6-7 days/week | Very Active: Very hard exercise, physical job" | |
| ) | |
| goal_input = gr.Radio( | |
| label="Goal", | |
| choices=["Cutting", "Maintain", "Bulking", "Custom"], | |
| value=None | |
| ) | |
| custom_calories_input = gr.Number( | |
| label="Custom Calorie Target", | |
| minimum=800, | |
| maximum=5000, | |
| value=None, | |
| precision=0, | |
| visible=False, | |
| info="Enter your desired daily calorie target" | |
| ) | |
| calculate_btn = gr.Button( | |
| "π Calculate Calorie Target", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Column(scale=1): | |
| calorie_output = gr.Markdown( | |
| label="Calorie Calculation Result", | |
| elem_classes=["calorie-result"] | |
| ) | |
| # Show/hide custom calories input based on goal selection | |
| def toggle_custom_calories(goal): | |
| if goal == "Custom": | |
| return gr.update(visible=True) | |
| else: | |
| # Reset value to None when hiding to prevent validation errors | |
| return gr.update(visible=False, value=None) | |
| goal_input.change( | |
| fn=toggle_custom_calories, | |
| inputs=goal_input, | |
| outputs=custom_calories_input | |
| ) | |
| # Calculate calories | |
| calculate_btn.click( | |
| fn=calculate_calorie_target, | |
| inputs=[ | |
| weight_input, | |
| height_ft_input, | |
| height_in_input, | |
| age_input, | |
| gender_input, | |
| activity_input, | |
| goal_input, | |
| custom_calories_input, | |
| user_state | |
| ], | |
| outputs=[user_state, calorie_output] | |
| ) | |
| # Update name in state when changed | |
| name_input.change( | |
| fn=lambda name, state: ({**state, 'name': name}, state), | |
| inputs=[name_input, user_state], | |
| outputs=[user_state, user_state] | |
| ) | |
| # TAB 2: INGREDIENT SCANNER | |
| with gr.Tab("πΈ Ingredient Scanner"): | |
| gr.Markdown( | |
| """ | |
| <div class="description-box"> | |
| <strong>πΈ How to use:</strong><br> | |
| 1. Click "Upload Images" or drag and drop multiple photos<br> | |
| 2. Wait for the AI to analyze your ingredients<br> | |
| 3. View all processed images with detection boxes and the complete ingredient list<br> | |
| 4. Detected ingredients will be saved for recipe generation | |
| </div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| image_input = gr.File( | |
| file_count="multiple", | |
| file_types=["image"], | |
| label="π Upload Images", | |
| height=200 | |
| ) | |
| process_btn = gr.Button( | |
| "π Detect Ingredients", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| gr.Markdown("---") | |
| ingredient_output = gr.Markdown( | |
| label="π Detected Ingredients", | |
| elem_classes=["ingredient-list"] | |
| ) | |
| with gr.Column(scale=2): | |
| gallery_output = gr.Gallery( | |
| label="πΌοΈ Processed Images with Detections", | |
| show_label=True, | |
| elem_id="gallery", | |
| columns=2, | |
| rows=2, | |
| height="auto", | |
| allow_preview=True, | |
| preview=True | |
| ) | |
| # Process images when button is clicked | |
| process_btn.click( | |
| fn=detect_ingredients, | |
| inputs=[image_input, user_state], | |
| outputs=[user_state, gallery_output, ingredient_output] | |
| ) | |
| # Also process when images are uploaded (auto-detect) | |
| image_input.upload( | |
| fn=detect_ingredients, | |
| inputs=[image_input, user_state], | |
| outputs=[user_state, gallery_output, ingredient_output] | |
| ) | |
| # TAB 3: RECIPE GENERATOR | |
| with gr.Tab("π³ Recipe Generator"): | |
| gr.Markdown( | |
| """ | |
| <div class="description-box"> | |
| <strong>π³ Generate personalized recipes:</strong><br> | |
| Generate AI-powered recipes! You can customize with your calorie target, fitness goals, and detected ingredients, | |
| or simply select a cuisine preference to get started right away. Everything is optional! | |
| </div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| cuisine_input = gr.Dropdown( | |
| label="Cuisine Preference", | |
| choices=["International", "Mexican", "Chinese", "American", "Italian", "Indian", "Japanese", "Mediterranean", "Thai", "French"], | |
| value="International", | |
| info="Select your preferred cuisine style (optional, defaults to International)" | |
| ) | |
| generate_btn = gr.Button( | |
| "β¨ Generate Recipes", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| gr.Markdown("---") | |
| with gr.Column(scale=2): | |
| recipe_output = gr.Markdown( | |
| label="Generated Recipes", | |
| elem_classes=["ingredient-list"] | |
| ) | |
| # Generate recipes | |
| generate_btn.click( | |
| fn=generate_recipes, | |
| inputs=[cuisine_input, user_state], | |
| outputs=[user_state, recipe_output] | |
| ) | |
| gr.Markdown( | |
| """ | |
| --- | |
| <div style="text-align: center; color: #666; padding: 20px;"> | |
| <small>Powered by YOLOv11 & AI Recipe Generation | Your smart kitchen assistant!</small> | |
| </div> | |
| """ | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() | |