AymanFahim commited on
Commit
ced12a1
Β·
verified Β·
1 Parent(s): 5c1cc7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -80
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  from ultralytics import YOLO
3
- import PIL.Image
4
  import numpy as np
5
  from typing import List, Tuple, Dict, Optional
6
  from huggingface_hub import InferenceClient
@@ -183,9 +183,9 @@ def detect_ingredients(images: List, state: Dict) -> Tuple[Dict, List, str]:
183
  # YOLO returns BGR, convert to RGB
184
  if len(result_image.shape) == 3:
185
  result_image_rgb = result_image[..., ::-1] # BGR to RGB
186
- processed_images.append(PIL.Image.fromarray(result_image_rgb))
187
  else:
188
- processed_images.append(PIL.Image.fromarray(result_image))
189
 
190
  # Create formatted ingredient list
191
  if all_detected_items:
@@ -208,27 +208,32 @@ def detect_ingredients(images: List, state: Dict) -> Tuple[Dict, List, str]:
208
 
209
  def generate_recipes(cuisine_preference: Optional[str], state: Dict) -> Tuple[Dict, str]:
210
  """
211
- Generate recipes using LLM based on user profile and detected ingredients.
212
  """
213
- # Validate that we have the necessary data
214
- if not state.get('calorie_target'):
215
- return state, "**⚠️ Please complete your User Profile & Goals first to set your calorie target.**"
216
 
217
- if not state.get('detected_ingredients'):
218
- return state, "**⚠️ Please scan ingredients in the Ingredient Scanner tab first.**"
 
 
 
 
 
 
 
219
 
220
- if not cuisine_preference:
221
- return state, "**⚠️ Please select a cuisine preference.**"
 
 
 
 
 
222
 
223
  # Update state
224
  state['cuisine_preference'] = cuisine_preference
225
 
226
- # Get user data
227
- calorie_target = int(state['calorie_target'])
228
- goal = state.get('goal', 'Maintain')
229
- ingredients = state['detected_ingredients']
230
- ingredient_list = ", ".join([item.capitalize() for item in ingredients])
231
-
232
  # Map goal to dietary focus
233
  goal_descriptions = {
234
  'Cutting': 'weight loss and calorie deficit',
@@ -236,12 +241,12 @@ def generate_recipes(cuisine_preference: Optional[str], state: Dict) -> Tuple[Di
236
  'Bulking': 'muscle gain with high protein',
237
  'Custom': 'your custom calorie target'
238
  }
239
- goal_desc = goal_descriptions.get(goal, 'your goals')
240
 
241
- # Construct prompt
242
  prompt = f"""You are a professional nutritionist and chef. Create 3 distinct, detailed recipes that:
243
 
244
- 1. Use these available ingredients: {ingredient_list}
245
  2. Fit within a daily calorie target of approximately {calorie_target} calories per day
246
  3. Match {cuisine_preference} cuisine style
247
  4. Align with the goal of {goal_desc}
@@ -262,7 +267,7 @@ Format each recipe clearly with headers. Make the recipes practical, delicious,
262
  # Try multiple ways to get the token
263
  hf_token = None
264
 
265
- # Method 1: Check HF_TOKEN environment variable
266
  hf_token = os.getenv("HF_TOKEN", None)
267
 
268
  # Method 2: Check HUGGING_FACE_HUB_TOKEN (alternative name)
@@ -277,65 +282,138 @@ Format each recipe clearly with headers. Make the recipes practical, delicious,
277
  except:
278
  pass
279
 
280
- if not hf_token:
281
- return state, """**⚠️ Hugging Face Token Required**
282
-
283
- Please set your HF_TOKEN environment variable to use recipe generation.
284
-
285
- **For Hugging Face Spaces:**
286
- 1. Go to your Space Settings (gear icon)
287
- 2. Scroll to "Repository secrets"
288
- 3. Click "New secret"
289
- 4. Name: `HF_TOKEN`
290
- 5. Value: Your Hugging Face token
291
- 6. Click "Add secret" and restart your Space
292
-
293
- **For Local Development (Windows):**
294
- 1. Press Win+R, type `sysdm.cpl`, press Enter
295
- 2. Go to "Advanced" tab β†’ "Environment Variables"
296
- 3. Under "User variables", click "New"
297
- 4. Variable name: `HF_TOKEN`
298
- 5. Variable value: Your Hugging Face token
299
- 6. Click OK and restart your application
300
-
301
- Get your token at: https://huggingface.co/settings/tokens"""
302
-
303
- client = InferenceClient(token=hf_token)
304
-
305
- # Try using models that support text-generation
306
- # List of models to try in order of preference (all verified to work with text-generation)
307
- models_to_try = [
308
- "meta-llama/Llama-3.2-3B-Instruct", # Fast and reliable
309
- "meta-llama/Llama-3.1-8B-Instruct", # Better quality
310
- "mistralai/Mistral-7B-Instruct-v0.3", # Alternative option
311
- "microsoft/Phi-3-mini-4k-instruct", # Lightweight fallback
312
- "google/gemma-2-2b-it", # Additional reliable option
313
- ]
314
 
 
 
315
  response = None
316
  last_error = None
317
  successful_model = None
 
 
 
 
 
 
 
 
 
 
318
 
319
  for model_name in models_to_try:
 
320
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  response = client.text_generation(
322
  prompt,
323
- model=model_name,
324
- max_new_tokens=1500,
325
  temperature=0.7,
326
  )
327
- successful_model = model_name
328
- break # Success, exit the loop
329
- except Exception as model_error:
330
- last_error = model_error
331
- continue # Try next model
332
 
333
- # If all models failed, raise error with details
334
  if response is None:
335
- error_msg = f"All models failed. Last error: {str(last_error)}"
 
 
 
 
 
 
 
 
 
 
 
 
336
  if not hf_token:
337
- error_msg += "\n\nπŸ’‘ TIP: Make sure you have set your HF_TOKEN environment variable."
338
- raise Exception(error_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
  # Extract text if response is a formatted object
341
  if hasattr(response, 'generated_text'):
@@ -345,12 +423,21 @@ Get your token at: https://huggingface.co/settings/tokens"""
345
  else:
346
  response_text = str(response)
347
 
 
 
 
 
 
 
 
 
 
 
 
348
  recipes_text = f"""## 🍳 Recipe Suggestions for {cuisine_preference} Cuisine
349
 
350
- **Your Profile:**
351
- - Daily Calorie Target: {calorie_target} calories
352
- - Goal: {goal}
353
- - Available Ingredients: {ingredient_list}
354
 
355
  ---
356
 
@@ -358,7 +445,7 @@ Get your token at: https://huggingface.co/settings/tokens"""
358
 
359
  ---
360
 
361
- *Recipes generated based on your profile and available ingredients.*"""
362
 
363
  return state, recipes_text
364
 
@@ -642,8 +729,8 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
642
  """
643
  <div class="description-box">
644
  <strong>🍳 Generate personalized recipes:</strong><br>
645
- Based on your calorie target, fitness goals, and detected ingredients,
646
- we'll generate 3 custom recipes tailored to your preferences.
647
  </div>
648
  """
649
  )
@@ -652,9 +739,9 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
652
  with gr.Column(scale=1):
653
  cuisine_input = gr.Dropdown(
654
  label="Cuisine Preference",
655
- choices=["Mexican", "Chinese", "American", "Italian", "Indian", "Japanese", "Mediterranean", "Thai", "French"],
656
- value=None,
657
- info="Select your preferred cuisine style"
658
  )
659
 
660
  generate_btn = gr.Button(
@@ -666,10 +753,12 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
666
  gr.Markdown("---")
667
  gr.Markdown(
668
  """
669
- **πŸ“ Requirements:**
670
- - Complete User Profile & Goals tab
671
- - Scan ingredients in Ingredient Scanner tab
672
- - Select a cuisine preference
 
 
673
  """
674
  )
675
 
@@ -698,3 +787,4 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
698
  # Launch the app
699
  if __name__ == "__main__":
700
  demo.launch()
 
 
1
  import gradio as gr
2
  from ultralytics import YOLO
3
+ from PIL import Image
4
  import numpy as np
5
  from typing import List, Tuple, Dict, Optional
6
  from huggingface_hub import InferenceClient
 
183
  # YOLO returns BGR, convert to RGB
184
  if len(result_image.shape) == 3:
185
  result_image_rgb = result_image[..., ::-1] # BGR to RGB
186
+ processed_images.append(Image.fromarray(result_image_rgb))
187
  else:
188
+ processed_images.append(Image.fromarray(result_image))
189
 
190
  # Create formatted ingredient list
191
  if all_detected_items:
 
208
 
209
  def generate_recipes(cuisine_preference: Optional[str], state: Dict) -> Tuple[Dict, str]:
210
  """
211
+ Generate recipes using LLM. All inputs are optional with smart defaults.
212
  """
213
+ # Make everything optional - use defaults if not provided
214
+ cuisine_preference = cuisine_preference or "International"
 
215
 
216
+ # Get user data with defaults
217
+ calorie_target = state.get('calorie_target')
218
+ if calorie_target:
219
+ calorie_target = int(calorie_target)
220
+ else:
221
+ calorie_target = 2000 # Default calorie target
222
+
223
+ goal = state.get('goal', 'Maintain')
224
+ ingredients = state.get('detected_ingredients', [])
225
 
226
+ # Build ingredient list or use default
227
+ if ingredients:
228
+ ingredient_list = ", ".join([item.capitalize() for item in ingredients])
229
+ ingredient_context = f"Use these available ingredients: {ingredient_list}. "
230
+ else:
231
+ ingredient_list = "common pantry items"
232
+ ingredient_context = "Use common, readily available ingredients. "
233
 
234
  # Update state
235
  state['cuisine_preference'] = cuisine_preference
236
 
 
 
 
 
 
 
237
  # Map goal to dietary focus
238
  goal_descriptions = {
239
  'Cutting': 'weight loss and calorie deficit',
 
241
  'Bulking': 'muscle gain with high protein',
242
  'Custom': 'your custom calorie target'
243
  }
244
+ goal_desc = goal_descriptions.get(goal, 'general health and nutrition')
245
 
246
+ # Construct prompt - flexible based on available data
247
  prompt = f"""You are a professional nutritionist and chef. Create 3 distinct, detailed recipes that:
248
 
249
+ 1. {ingredient_context}
250
  2. Fit within a daily calorie target of approximately {calorie_target} calories per day
251
  3. Match {cuisine_preference} cuisine style
252
  4. Align with the goal of {goal_desc}
 
267
  # Try multiple ways to get the token
268
  hf_token = None
269
 
270
+ # Method 1: Check HF_TOKEN environment variable (Hugging Face Spaces secret)
271
  hf_token = os.getenv("HF_TOKEN", None)
272
 
273
  # Method 2: Check HUGGING_FACE_HUB_TOKEN (alternative name)
 
282
  except:
283
  pass
284
 
285
+ # Initialize client with token if available, otherwise try without
286
+ # Most models require a token, but we'll try both approaches
287
+ if hf_token:
288
+ client = InferenceClient(token=hf_token)
289
+ else:
290
+ # Try without token (may not work for most models)
291
+ client = InferenceClient()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
 
293
+ # Try multiple approaches and models that work on free tier
294
+ # Strategy: Try chat_completion first (more reliable), then text_generation as fallback
295
  response = None
296
  last_error = None
297
  successful_model = None
298
+ errors_log = []
299
+
300
+ # List of models to try - simpler models that work on free tier
301
+ models_to_try = [
302
+ "microsoft/Phi-3-mini-4k-instruct",
303
+ "HuggingFaceH4/zephyr-7b-beta",
304
+ "mistralai/Mistral-7B-Instruct-v0.2",
305
+ "meta-llama/Llama-3.2-3B-Instruct",
306
+ "google/flan-t5-xxl", # Simple text generation model
307
+ ]
308
 
309
  for model_name in models_to_try:
310
+ # Try chat_completion first
311
  try:
312
+ messages = [
313
+ {"role": "system", "content": "You are a professional nutritionist and chef. Create detailed, practical recipes with clear formatting."},
314
+ {"role": "user", "content": prompt}
315
+ ]
316
+ response_obj = client.chat_completion(
317
+ messages=messages,
318
+ model=model_name,
319
+ max_tokens=1500,
320
+ temperature=0.7,
321
+ )
322
+ # Extract response
323
+ if hasattr(response_obj, 'choices') and len(response_obj.choices) > 0:
324
+ if hasattr(response_obj.choices[0].message, 'content'):
325
+ response = response_obj.choices[0].message.content
326
+ else:
327
+ response = str(response_obj.choices[0].message)
328
+ elif isinstance(response_obj, dict) and 'choices' in response_obj:
329
+ response = response_obj['choices'][0]['message']['content']
330
+ elif isinstance(response_obj, str):
331
+ response = response_obj
332
+ else:
333
+ response = str(response_obj)
334
+
335
+ if response and len(response.strip()) > 50: # Make sure we got a real response
336
+ successful_model = f"{model_name} (chat_completion)"
337
+ break
338
+ except Exception as chat_error:
339
+ errors_log.append(f"{model_name} (chat): {str(chat_error)[:80]}")
340
+
341
+ # Try text_generation as fallback for this model
342
+ try:
343
+ response = client.text_generation(
344
+ prompt,
345
+ model=model_name,
346
+ max_new_tokens=1500,
347
+ temperature=0.7,
348
+ )
349
+ if response and len(str(response).strip()) > 50:
350
+ successful_model = f"{model_name} (text_generation)"
351
+ break
352
+ except Exception as text_error:
353
+ errors_log.append(f"{model_name} (text): {str(text_error)[:80]}")
354
+ last_error = text_error
355
+ continue
356
+
357
+ # If still no response, try a simple fallback
358
+ if not response:
359
+ try:
360
+ # Try a very simple model as last resort
361
  response = client.text_generation(
362
  prompt,
363
+ model="gpt2", # Always available
364
+ max_new_tokens=500,
365
  temperature=0.7,
366
  )
367
+ successful_model = "gpt2 (fallback)"
368
+ except:
369
+ pass
 
 
370
 
371
+ # If all models failed, provide helpful error message
372
  if response is None:
373
+ error_msg = f"**⚠️ Failed to generate recipes.**\n\n"
374
+
375
+ # Show last error details
376
+ if last_error:
377
+ error_msg += f"**Last error:** {str(last_error)[:200]}\n\n"
378
+
379
+ # Show which models were tried
380
+ if errors_log:
381
+ error_msg += "**Models tried:**\n"
382
+ for err in errors_log[:3]: # Show first 3 errors
383
+ error_msg += f"- {err}\n"
384
+ error_msg += "\n"
385
+
386
  if not hf_token:
387
+ error_msg += """**πŸ’‘ Setup Required:**
388
+
389
+ Most recipe generation models require a Hugging Face token.
390
+
391
+ **For Hugging Face Spaces:**
392
+ 1. Go to your Space Settings (βš™οΈ gear icon)
393
+ 2. Scroll to **"Repository secrets"**
394
+ 3. Click **"New secret"**
395
+ 4. Name: `HF_TOKEN`
396
+ 5. Value: Your Hugging Face token (get one at https://huggingface.co/settings/tokens)
397
+ 6. Click **"Add secret"** and your Space will rebuild automatically
398
+
399
+ **For Local Development:**
400
+ Set the `HF_TOKEN` environment variable with your Hugging Face token.
401
+
402
+ Once the token is set, try generating recipes again!"""
403
+ else:
404
+ error_msg += """**Possible issues:**
405
+ - The models may require special access (some models need approval on Hugging Face)
406
+ - Your token may not have access to these models (free tier has limitations)
407
+ - Models might be routed to external providers that aren't available
408
+ - Network connectivity issues
409
+
410
+ **πŸ’‘ Solutions:**
411
+ 1. Make sure your HF_TOKEN has "Inference" permission enabled
412
+ 2. Some models require accepting terms on their Hugging Face page first
413
+ 3. Try again - models may be temporarily unavailable
414
+ 4. Check https://huggingface.co/settings/tokens to verify token permissions"""
415
+
416
+ return state, error_msg
417
 
418
  # Extract text if response is a formatted object
419
  if hasattr(response, 'generated_text'):
 
423
  else:
424
  response_text = str(response)
425
 
426
+ # Build profile summary (only show if data exists)
427
+ profile_parts = []
428
+ if state.get('calorie_target'):
429
+ profile_parts.append(f"- Daily Calorie Target: {calorie_target} calories")
430
+ if state.get('goal'):
431
+ profile_parts.append(f"- Goal: {goal}")
432
+ if ingredients:
433
+ profile_parts.append(f"- Available Ingredients: {ingredient_list}")
434
+
435
+ profile_summary = "\n".join(profile_parts) if profile_parts else "- Using default settings (2000 calories, general recipes)"
436
+
437
  recipes_text = f"""## 🍳 Recipe Suggestions for {cuisine_preference} Cuisine
438
 
439
+ **Settings Used:**
440
+ {profile_summary}
 
 
441
 
442
  ---
443
 
 
445
 
446
  ---
447
 
448
+ *Recipes generated using AI. {"Based on your profile and ingredients." if (state.get('calorie_target') or ingredients) else "Feel free to customize your profile and scan ingredients for more personalized results!"}*"""
449
 
450
  return state, recipes_text
451
 
 
729
  """
730
  <div class="description-box">
731
  <strong>🍳 Generate personalized recipes:</strong><br>
732
+ Generate AI-powered recipes! You can customize with your calorie target, fitness goals, and detected ingredients,
733
+ or simply select a cuisine preference to get started right away. Everything is optional!
734
  </div>
735
  """
736
  )
 
739
  with gr.Column(scale=1):
740
  cuisine_input = gr.Dropdown(
741
  label="Cuisine Preference",
742
+ choices=["International", "Mexican", "Chinese", "American", "Italian", "Indian", "Japanese", "Mediterranean", "Thai", "French"],
743
+ value="International",
744
+ info="Select your preferred cuisine style (optional, defaults to International)"
745
  )
746
 
747
  generate_btn = gr.Button(
 
753
  gr.Markdown("---")
754
  gr.Markdown(
755
  """
756
+ **πŸ“ Optional Enhancements:**
757
+ - πŸ’‘ Complete User Profile & Goals tab for personalized calorie targets
758
+ - πŸ“Έ Scan ingredients in Ingredient Scanner tab for custom recipes
759
+ - 🎨 Select a cuisine preference above (defaults to International)
760
+
761
+ **✨ You can generate recipes right now with just a cuisine selection!**
762
  """
763
  )
764
 
 
787
  # Launch the app
788
  if __name__ == "__main__":
789
  demo.launch()
790
+