JustNikunj commited on
Commit
c1f8ab9
·
1 Parent(s): ca88810

api update

Browse files
Files changed (1) hide show
  1. app.py +160 -87
app.py CHANGED
@@ -12,8 +12,9 @@ import time
12
  from enum import Enum
13
  import gradio as gr
14
  from huggingface_hub import InferenceClient
 
15
 
16
- # Environment variables
17
  HF_TOKEN = os.getenv("HF_TOKEN") # Required for Llama model access
18
  MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct"
19
  MAX_PROMPT_LENGTH = int(os.getenv("MAX_PROMPT_LENGTH", "2000"))
@@ -21,7 +22,7 @@ RECOMMENDATION_TIMEOUT = int(os.getenv("RECOMMENDATION_TIMEOUT", "60"))
21
  MAX_RETRIES = int(os.getenv("MAX_RETRIES", "2"))
22
  ENABLE_CACHING = os.getenv("ENABLE_CACHING", "true").lower() == "true"
23
 
24
- # Logging setup
25
  logging.basicConfig(
26
  level=logging.INFO,
27
  format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
@@ -29,7 +30,7 @@ logging.basicConfig(
29
  )
30
  logger = logging.getLogger("hindi_emotion_recommendation")
31
 
32
- # Initialize HuggingFace Inference Client
33
  try:
34
  client = InferenceClient(token=HF_TOKEN)
35
  logger.info(f"✓ HuggingFace client initialized with model: {MODEL_NAME}")
@@ -37,11 +38,24 @@ except Exception as e:
37
  logger.error(f"Failed to initialize HuggingFace client: {str(e)}")
38
  client = None
39
 
40
- # Cache for recommendations
 
 
 
 
 
 
 
 
 
41
  recommendation_cache = {}
42
  CACHE_TTL_SECONDS = 3600
43
 
44
- # Response validation enums
 
 
 
 
45
  class ValidationStatus(str, Enum):
46
  VALID = "valid"
47
  WARNING = "warning"
@@ -162,6 +176,7 @@ class ResponseValidator:
162
 
163
  return recommendation
164
 
 
165
  def get_cache_key(emotion_result: dict) -> str:
166
  """Generate cache key from emotion result"""
167
  cache_data = {
@@ -190,6 +205,7 @@ def save_to_cache(cache_key: str, data: Dict[str, Any]):
190
  if ENABLE_CACHING:
191
  recommendation_cache[cache_key] = (data, time.time())
192
 
 
193
  @lru_cache(maxsize=1)
194
  def load_few_shot_examples() -> str:
195
  """Load few-shot examples"""
@@ -345,6 +361,15 @@ def get_fallback_recommendation(emotion_result: dict) -> str:
345
 
346
  return "यदि आपको सहायता चाहिए तो किसी विश्वसनीय व्यक्ति से संपर्क करें। आपकी भावनाएं महत्वपूर्ण हैं।"
347
 
 
 
 
 
 
 
 
 
 
348
  def process_emotion_analysis(
349
  transcript: str,
350
  sentiment: str,
@@ -355,7 +380,7 @@ def process_emotion_analysis(
355
  is_mental_health: bool,
356
  is_grief_loss: bool,
357
  is_relationship: bool
358
- ) -> Tuple[str, str, str, str, str]:
359
  """Process emotion analysis and generate recommendation"""
360
 
361
  start_time = time.time()
@@ -377,7 +402,7 @@ def process_emotion_analysis(
377
  'is_relationship_distress': is_relationship
378
  }
379
  },
380
- 'prosodic_features': {}
381
  }
382
 
383
  # Check cache
@@ -392,6 +417,7 @@ def process_emotion_analysis(
392
  cached = True
393
  else:
394
  # Generate new recommendation
 
395
  action = get_llama_recommendation(emotion_result)
396
 
397
  # Validate the recommendation
@@ -407,6 +433,7 @@ def process_emotion_analysis(
407
  if action != original_action:
408
  enhanced = True
409
  logger.info("Recommendation auto-enhanced")
 
410
  validation_result = ResponseValidator.validate_recommendation(action, emotion_result)
411
 
412
  # Cache the result
@@ -441,12 +468,95 @@ def process_emotion_analysis(
441
  **Processing Time:** {processing_time}ms
442
  **Cached:** {'Yes ♻️' if cached else 'No 🆕'}
443
  **Enhanced:** {'Yes 🔧' if enhanced else 'No'}
444
- **Confidence:** {confidence:.2%}
445
  """
446
 
447
- return action, risk_level, validation_info, metadata
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448
 
449
- # Gradio Interface
450
  def create_interface():
451
  """Create Gradio interface"""
452
 
@@ -461,58 +571,33 @@ def create_interface():
461
  AI-powered emotional analysis and action recommendations for Indian women's support.
462
  Powered by **Meta Llama 3.1 8B Instruct** with intelligent validation and enhancement.
463
 
464
- ### Important Helplines:
465
- - 🚨 **Emergency/Police:** 112
466
- - 👩 **Women's Helpline:** 181, 1091
467
- - 🧠 **Mental Health:** 9152987821 (Vandrevala), 08046110007 (NIMHANS)
468
- - 💙 **Suicide Prevention:** 9820466726 (AASRA)
469
  """)
470
 
471
  with gr.Row():
472
  with gr.Column(scale=1):
473
- gr.Markdown("### 📝 Input Emotion Analysis")
474
-
475
- transcript = gr.Textbox(
476
- label="Transcript (Hindi/English)",
477
- placeholder="मुझे बहुत डर लग रहा है...",
478
- lines=3
479
- )
480
 
481
- sentiment = gr.Dropdown(
482
- label="Sentiment",
483
- choices=["Positive", "Negative", "Neutral"],
484
- value="Negative"
 
485
  )
486
 
487
- with gr.Row():
488
- primary_emotion = gr.Dropdown(
489
- label="Primary Emotion",
490
- choices=["fear", "sadness", "anger", "joy", "surprise", "disgust", "neutral", "despair", "anxiety"],
491
- value="sadness"
492
- )
493
-
494
- secondary_emotion = gr.Dropdown(
495
- label="Secondary Emotion",
496
- choices=["", "distress", "frustration", "hopelessness", "worry", "relief"],
497
- value=""
498
- )
499
-
500
- confidence = gr.Slider(
501
- label="Confidence Score",
502
- minimum=0.0,
503
- maximum=1.0,
504
- value=0.8,
505
- step=0.05
506
- )
507
-
508
- gr.Markdown("### 🎯 Situation Flags")
509
 
510
- is_crisis = gr.Checkbox(label="🚨 Crisis Situation", value=False)
511
- is_mental_health = gr.Checkbox(label="🧠 Mental Health Distress", value=False)
512
- is_grief_loss = gr.Checkbox(label="💔 Grief/Loss", value=False)
513
- is_relationship = gr.Checkbox(label="👥 Relationship Distress", value=False)
 
 
 
 
514
 
515
- submit_btn = gr.Button("Generate Recommendation 🚀", variant="primary", size="lg")
 
516
 
517
  with gr.Column(scale=1):
518
  gr.Markdown("### 💡 Recommendation Output")
@@ -535,54 +620,42 @@ def create_interface():
535
  metadata = gr.Markdown(
536
  label="Metadata"
537
  )
 
 
 
 
 
538
 
539
- # Example inputs
540
- gr.Markdown("### 📚 Example Inputs")
541
- gr.Examples(
542
- examples=[
543
- ["मुझे बचाओ! कोई मुझे मार रहा है।", "Negative", "fear", "distress", 0.95, True, False, False, False],
544
- ["मैं बहुत अकेला और उदास महसूस कर रहा हूँ।", "Negative", "sadness", "neutral", 0.78, False, True, False, False],
545
- ["मेरी पत्नी ने मुझे छोड़ दिया है।", "Negative", "sadness", "distress", 0.82, False, False, False, True],
546
- ["मैं अपने जीवन को खत्म करना चाहती हूं।", "Negative", "despair", "hopelessness", 0.92, True, True, False, False],
547
- ["आज मौसम बहुत अच्छा है।", "Positive", "joy", "", 0.85, False, False, False, False],
548
- ],
549
- inputs=[transcript, sentiment, primary_emotion, secondary_emotion, confidence, is_crisis, is_mental_health, is_grief_loss, is_relationship],
550
- )
551
-
552
- # Connect button
553
  submit_btn.click(
554
- fn=process_emotion_analysis,
555
- inputs=[
556
- transcript, sentiment, primary_emotion, secondary_emotion,
557
- confidence, is_crisis, is_mental_health, is_grief_loss, is_relationship
558
- ],
559
- outputs=[recommendation, risk_level, validation, metadata]
560
  )
561
 
 
562
  gr.Markdown("""
563
- ---
564
- ### ℹ️ About
565
-
566
- This system uses:
567
- - **Meta Llama 3.1 8B Instruct** for generating empathetic recommendations
568
- - **Multi-criteria validation** to ensure quality and safety
569
- - **Auto-enhancement** to add missing critical information
570
- - **Caching** for faster repeated queries
571
-
572
- **Note:** This is an AI assistant. In emergencies, always call emergency services immediately.
573
  """)
574
 
575
  return demo
576
 
577
- # Launch the app
578
  if __name__ == "__main__":
 
 
 
 
 
579
  logger.info("Starting Gradio interface...")
580
- logger.info(f"Model: {MODEL_NAME}")
581
- logger.info(f"HF Token Available: {'Yes' if HF_TOKEN else 'No'}")
582
 
583
  demo = create_interface()
584
  demo.launch(
585
  server_name="0.0.0.0",
586
  server_port=7860,
587
- share=False
588
  )
 
12
  from enum import Enum
13
  import gradio as gr
14
  from huggingface_hub import InferenceClient
15
+ from gradio_client import Client, handle_file # <-- REQUIRED IMPORT
16
 
17
+ # --- Environment variables ---
18
  HF_TOKEN = os.getenv("HF_TOKEN") # Required for Llama model access
19
  MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct"
20
  MAX_PROMPT_LENGTH = int(os.getenv("MAX_PROMPT_LENGTH", "2000"))
 
22
  MAX_RETRIES = int(os.getenv("MAX_RETRIES", "2"))
23
  ENABLE_CACHING = os.getenv("ENABLE_CACHING", "true").lower() == "true"
24
 
25
+ # --- Logging setup ---
26
  logging.basicConfig(
27
  level=logging.INFO,
28
  format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
 
30
  )
31
  logger = logging.getLogger("hindi_emotion_recommendation")
32
 
33
+ # --- Initialize HuggingFace Inference Client (for Llama 3.1) ---
34
  try:
35
  client = InferenceClient(token=HF_TOKEN)
36
  logger.info(f"✓ HuggingFace client initialized with model: {MODEL_NAME}")
 
38
  logger.error(f"Failed to initialize HuggingFace client: {str(e)}")
39
  client = None
40
 
41
+ # --- Initialize Audio Analysis Client (for your Emotion API) ---
42
+ # This client points to your *other* Hugging Face Space
43
+ try:
44
+ audio_analysis_client = Client("JustNikunj/Sentimental_Analysis")
45
+ logger.info("✓ Audio Analysis API client initialized for 'JustNikunj/Sentimental_Analysis'")
46
+ except Exception as e:
47
+ logger.error(f"Failed to initialize Audio Analysis API client: {str(e)}")
48
+ audio_analysis_client = None
49
+
50
+ # --- Cache for recommendations ---
51
  recommendation_cache = {}
52
  CACHE_TTL_SECONDS = 3600
53
 
54
+ # =========================================================================
55
+ # === START: ORIGINAL HELPER FUNCTIONS (FROM YOUR MAIN CODE) ===
56
+ # =========================================================================
57
+
58
+ # --- Response validation enums ---
59
  class ValidationStatus(str, Enum):
60
  VALID = "valid"
61
  WARNING = "warning"
 
176
 
177
  return recommendation
178
 
179
+ # --- Caching functions ---
180
  def get_cache_key(emotion_result: dict) -> str:
181
  """Generate cache key from emotion result"""
182
  cache_data = {
 
205
  if ENABLE_CACHING:
206
  recommendation_cache[cache_key] = (data, time.time())
207
 
208
+ # --- LLM Prompting functions ---
209
  @lru_cache(maxsize=1)
210
  def load_few_shot_examples() -> str:
211
  """Load few-shot examples"""
 
361
 
362
  return "यदि आपको सहायता चाहिए तो किसी विश्वसनीय व्यक्ति से संपर्क करें। आपकी भावनाएं महत्वपूर्ण हैं।"
363
 
364
+ # =========================================================================
365
+ # === END: ORIGINAL HELPER FUNCTIONS ===
366
+ # =========================================================================
367
+
368
+
369
+ # --- CORE LOGIC: Function to process emotion data and get LLM response ---
370
+ # This is your original 'process_emotion_analysis' function.
371
+ # It is now called by the new 'get_recommendation_from_audio' function.
372
+
373
  def process_emotion_analysis(
374
  transcript: str,
375
  sentiment: str,
 
380
  is_mental_health: bool,
381
  is_grief_loss: bool,
382
  is_relationship: bool
383
+ ) -> Tuple[str, str, str, str, str]: # Returns 5 values for the 5 Gradio outputs
384
  """Process emotion analysis and generate recommendation"""
385
 
386
  start_time = time.time()
 
402
  'is_relationship_distress': is_relationship
403
  }
404
  },
405
+ 'prosodic_features': {} # Prosodic features are not used in the prompt
406
  }
407
 
408
  # Check cache
 
417
  cached = True
418
  else:
419
  # Generate new recommendation
420
+ logger.info("Generating new LLM recommendation...")
421
  action = get_llama_recommendation(emotion_result)
422
 
423
  # Validate the recommendation
 
433
  if action != original_action:
434
  enhanced = True
435
  logger.info("Recommendation auto-enhanced")
436
+ # Re-validate after enhancement
437
  validation_result = ResponseValidator.validate_recommendation(action, emotion_result)
438
 
439
  # Cache the result
 
468
  **Processing Time:** {processing_time}ms
469
  **Cached:** {'Yes ♻️' if cached else 'No 🆕'}
470
  **Enhanced:** {'Yes 🔧' if enhanced else 'No'}
 
471
  """
472
 
473
+ # This string will show the raw API data in the UI
474
+ api_debug_info = f"""
475
+ **Transcript:** {transcript}
476
+ **Emotion:** {primary_emotion} (Conf: {confidence:.1%})
477
+ **Sentiment:** {sentiment}
478
+ **Crisis:** {is_crisis}
479
+ **Mental Health:** {is_mental_health}
480
+ """
481
+
482
+ # Return 5 values to match the Gradio output components
483
+ return action, risk_level, validation_info, metadata, api_debug_info
484
+
485
+
486
+ # --- NEW: MASTER FUNCTION TO CONNECT API AND LLM ---
487
+ def get_recommendation_from_audio(audio_filepath: str) -> Tuple[str, str, str, str, str]:
488
+ """
489
+ This is the new master function for the Gradio interface.
490
+ 1. Takes audio input from Gradio.
491
+ 2. Calls the 'JustNikunj/Sentimental_Analysis' API.
492
+ 3. Parses the result.
493
+ 4. Calls 'process_emotion_analysis' to get the LLM recommendation.
494
+ """
495
+ if not audio_analysis_client:
496
+ error_msg = "Audio Analysis API client ('JustNikunj/Sentimental_Analysis') is not initialized."
497
+ logger.error(error_msg)
498
+ return "Error: " + error_msg, "🔴 ERROR", error_msg, "", ""
499
+
500
+ if not audio_filepath:
501
+ logger.warning("No audio file provided.")
502
+ return "No audio provided. Please record or upload.", "⚪️ N/A", "No input", "", ""
503
+
504
+ logger.info(f"Processing audio file: {audio_filepath}")
505
+
506
+ try:
507
+ # --- STEP 1: Call Audio Analysis API ---
508
+ logger.info("Calling audio analysis API...")
509
+ api_result = audio_analysis_client.predict(
510
+ audio_filepath=handle_file(audio_filepath),
511
+ api_name="/predict"
512
+ )
513
+ logger.info("✓ Audio analysis complete.")
514
+
515
+ # --- STEP 2: Check for API errors ---
516
+ if not isinstance(api_result, dict) or api_result.get('status') != 'success':
517
+ error_msg = api_result.get('message', 'Unknown API error')
518
+ logger.error(f"Audio API failed: {error_msg}")
519
+ # Show the raw error in the "API Debug" box
520
+ return f"Error from Audio API: {error_msg}", "🔴 ERROR", error_msg, "", json.dumps(api_result, indent=2, ensure_ascii=False)
521
+
522
+ # --- STEP 3: Parse the successful API result ---
523
+ transcript = api_result['transcription']
524
+ sentiment = api_result['sentiment']['dominant']
525
+ primary_emotion = api_result['emotion']['primary']
526
+ secondary_emotion = api_result['emotion'].get('secondary')
527
+ confidence = float(api_result['emotion']['confidence'])
528
+
529
+ situations = api_result['analysis']['situations']
530
+ is_crisis = situations['is_crisis']
531
+ is_mental_health = situations['is_mental_health_distress']
532
+ is_grief_loss = situations['is_grief_loss']
533
+ is_relationship = situations['is_relationship_distress']
534
+
535
+ logger.info(f"✓ API Success. Transcript: '{transcript}', Emotion: {primary_emotion}")
536
+
537
+ # --- STEP 4: Call the LLM Recommendation Logic ---
538
+ # This calls your original function with the data we just got
539
+ return process_emotion_analysis(
540
+ transcript=transcript,
541
+ sentiment=sentiment,
542
+ primary_emotion=primary_emotion,
543
+ secondary_emotion=secondary_emotion,
544
+ confidence=confidence,
545
+ is_crisis=is_crisis,
546
+ is_mental_health=is_mental_health,
547
+ is_grief_loss=is_grief_loss,
548
+ is_relationship=is_relationship
549
+ )
550
+
551
+ except Exception as e:
552
+ logger.error(f"Error in 'get_recommendation_from_audio': {str(e)}")
553
+ import traceback
554
+ tb_str = traceback.format_exc()
555
+ logger.error(tb_str)
556
+ return f"A system error occurred: {str(e)}", "🔴 ERROR", str(e), "", tb_str
557
+
558
 
559
+ # --- MODIFIED: Gradio Interface ---
560
  def create_interface():
561
  """Create Gradio interface"""
562
 
 
571
  AI-powered emotional analysis and action recommendations for Indian women's support.
572
  Powered by **Meta Llama 3.1 8B Instruct** with intelligent validation and enhancement.
573
 
574
+ This app **calls the 'JustNikunj/Sentimental_Analysis' API** to get emotion data from your audio.
 
 
 
 
575
  """)
576
 
577
  with gr.Row():
578
  with gr.Column(scale=1):
579
+ gr.Markdown("### 🎙️ Input Audio")
 
 
 
 
 
 
580
 
581
+ # --- THIS IS THE NEW INPUT ---
582
+ audio_input = gr.Audio(
583
+ label="Record or Upload Audio",
584
+ sources=["microphone", "upload"],
585
+ type="filepath" # Send the file path to the function
586
  )
587
 
588
+ submit_btn = gr.Button("Generate Recommendation 🚀", variant="primary", size="lg")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589
 
590
+ gr.Markdown("### ℹ️ About This App")
591
+ gr.Markdown("""
592
+ This system uses:
593
+ - **'JustNikunj/Sentimental_Analysis'** for audio-to-emotion analysis.
594
+ - **Meta Llama 3.1 8B Instruct** for generating empathetic recommendations.
595
+ - **Multi-criteria validation** to ensure quality and safety.
596
+ - **Auto-enhancement** to add missing critical information.
597
+ - **Caching** for faster repeated queries.
598
 
599
+ **Note:** This is an AI assistant. In emergencies, always call emergency services immediately.
600
+ """)
601
 
602
  with gr.Column(scale=1):
603
  gr.Markdown("### 💡 Recommendation Output")
 
620
  metadata = gr.Markdown(
621
  label="Metadata"
622
  )
623
+
624
+ # Added an output to show the data from the audio API
625
+ api_debug = gr.Markdown(
626
+ label="Raw Analysis from Audio API"
627
+ )
628
 
629
+ # Connect button to the new master function
 
 
 
 
 
 
 
 
 
 
 
 
 
630
  submit_btn.click(
631
+ fn=get_recommendation_from_audio, # <--- NEW FUNCTION
632
+ inputs=[audio_input], # <--- NEW INPUT
633
+ outputs=[recommendation, risk_level, validation, metadata, api_debug] # <--- 5 OUTPUTS
 
 
 
634
  )
635
 
636
+ gr.Markdown("### Important Helplines")
637
  gr.Markdown("""
638
+ - 🚨 **Emergency/Police:** 112
639
+ - 👩 **Women's Helpline:** 181, 1091
640
+ - 🧠 **Mental Health:** 9152987821 (Vandrevala), 08046110007 (NIMHANS)
641
+ - 💙 **Suicide Prevention:** 9820466726 (AASRA)
 
 
 
 
 
 
642
  """)
643
 
644
  return demo
645
 
646
+ # --- Launch the app ---
647
  if __name__ == "__main__":
648
+ if not HF_TOKEN:
649
+ logger.warning("HF_TOKEN environment variable is not set. Llama 3.1 calls may fail.")
650
+ if not audio_analysis_client:
651
+ logger.warning("Could not connect to 'JustNikunj/Sentimental_Analysis'. The app will not work.")
652
+
653
  logger.info("Starting Gradio interface...")
654
+ logger.info(f"Recommendation Model: {MODEL_NAME}")
 
655
 
656
  demo = create_interface()
657
  demo.launch(
658
  server_name="0.0.0.0",
659
  server_port=7860,
660
+ share=False # Set to True if you want a public link from your local machine
661
  )