from fastai.vision.all import * import gradio as gr import pathlib import cv2 import dlib import numpy as np import mediapipe as mp from imutils import face_utils from PIL import Image import time from dotenv import load_dotenv import os load_dotenv() colorDict = eval(os.getenv('color-dict')) makeup_recommendations = eval(os.getenv('makeup_recommendations')) plt = platform.system() if plt == 'Windows': pathlib.PosixPath = pathlib.WindowsPath def rgb_to_bgr(rgb_color): r, g, b = rgb_color return (b, g, r) image_path = None lipstick_shade = None foundation_color = None powder_color = None # Function to get makeup recommendations def get_makeup_recommendation(skin_tone): global lipstick_shade, foundation_color, powder_color if skin_tone in makeup_recommendations: lipstick_shade = makeup_recommendations[skin_tone]['Lipstick Shade'] foundation_color = makeup_recommendations[skin_tone]['Foundation Shade'] powder_color = makeup_recommendations[skin_tone]['Powder Shade'] return makeup_recommendations[skin_tone] return {'Foundation Shade': '', 'Lipstick Shade': '', 'Powder Shade': '', 'Brands': {}} # Function to save the uploaded image def save_uploaded_image(img): global image_path image_path = "uploaded_image_"+str(round(time.time() * 1000))+".png" img.save(image_path) return image_path # Load the trained model learn = load_learner('export.pkl') # Prediction and recommendation function def classify_and_recommend(img): # Save the uploaded image img_path = save_uploaded_image(img) print(f"Image saved to {img_path}") # Perform classification and get recommendations pred, _, probs = learn.predict(img) result = {learn.dls.vocab[i]: float(probs[i]) for i in range(len(probs))} top_pred = pred recommendation = get_makeup_recommendation(top_pred) return result, recommendation # Load the pre-trained facial landmark detector model predictor_path = "shape_predictor_68_face_landmarks.dat" detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) # Define lip landmarks LIP_POINTS = list(range(48, 61)) def apply_lipstick(image): # Convert PIL image to OpenCV format lipstick_color = rgb_to_bgr(colorDict[lipstick_shade]) image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Detect faces in the image faces = detector(image_cv, 1) if len(faces) == 0: return image # No faces detected, return the original image for face in faces: # Get the facial landmarks shape = predictor(image_cv, face) shape = face_utils.shape_to_np(shape) # Get the lip region points lips = shape[LIP_POINTS] # Create a mask for the lips mask = np.zeros(image_cv.shape[:2], dtype=np.uint8) cv2.fillPoly(mask, [lips], 255) # Create a colored overlay overlay = np.zeros_like(image_cv, dtype=np.uint8) overlay[:] = lipstick_color # Isolate the lip region from the original image lip_area = cv2.bitwise_and(overlay, overlay, mask=mask) # Invert the lip mask inv_mask = cv2.bitwise_not(mask) # Apply the inverted mask to the original image to remove the lip region no_lip_area = cv2.bitwise_and(image_cv, image_cv, mask=inv_mask) # Combine the lip area with the rest of the image image_cv = cv2.addWeighted(no_lip_area, 1, lip_area, 0.6, 0) # Convert back to PIL format return Image.fromarray(cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)) mp_face_detection = mp.solutions.face_detection mp_face_mesh = mp.solutions.face_mesh def apply_color(image_pil, color, alpha): color = rgb_to_bgr(colorDict[color]) """Applies a specified color to the face in the image.""" # Convert PIL image to OpenCV format image_cv = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR) # Convert the image to RGB image_rgb = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB) # Initialize face detection and face mesh with mp_face_detection.FaceDetection(min_detection_confidence=0.5) as face_detection, \ mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, min_detection_confidence=0.5) as face_mesh: # Detect faces in the image detection_results = face_detection.process(image_rgb) if detection_results.detections: for detection in detection_results.detections: # Get face landmarks mesh_results = face_mesh.process(image_rgb) if mesh_results.multi_face_landmarks: for face_landmarks in mesh_results.multi_face_landmarks: # Create a mask for the face mask = np.zeros_like(image_cv, dtype=np.uint8) ih, iw, _ = image_cv.shape for landmark in face_landmarks.landmark: x = int(landmark.x * iw) y = int(landmark.y * ih) cv2.circle(mask, (x, y), 1, (255, 255, 255), -1) hull = cv2.convexHull(np.array([(int(landmark.x * iw), int(landmark.y * ih)) for landmark in face_landmarks.landmark])) cv2.fillConvexPoly(mask, hull, (255, 255, 255)) # Create a color image color_image = np.full_like(image_cv, color, dtype=np.uint8) # Apply the specified color # Blend the color with the face region using the mask blended = cv2.addWeighted(image_cv, 1 - alpha, color_image, alpha, 0) # Combine the original image with the blended color image using the mask image_cv = np.where(mask == np.array([255, 255, 255]), blended, image_cv) # Convert the result back to PIL format result_image_pil = Image.fromarray(cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)) return result_image_pil def apply_makeup(): # Load your input image print(image_path) input_image = Image.open(image_path) foundation_alpha = 0.05 # Transparency factor for foundation powder_alpha = 0.05 # Transparency factor for powder output_image_path = "output_image_" + str(round(time.time() * 1000)) + ".jpg" # Apply lipstick, foundation, and powder to the input image output_image = apply_lipstick(input_image) output_image = apply_color(output_image, foundation_color, foundation_alpha) output_image = apply_color(output_image, powder_color, powder_alpha) # Save or display the output image output_image.save(output_image_path) return output_image_path # Add a logo and a welcome message logo = "https://i.pinimg.com/736x/f8/34/cc/f834ccc788207ae147ab37d2085f6903.jpg" # Replace with your logo URL welcome_message = """ # Skin Tone Classification and Makeup Recommendations Upload an image to classify your skin tone and receive personalized makeup recommendations. Find the perfect foundation, lipstick, and powder shades from your favorite brands! """ # Gradio interface with gr.Blocks() as demo: gr.Markdown(""" # Welcome to the Makeup Recommendation and Application Tool Upload an image to receive personalized makeup recommendations and see how the makeup looks on you! """) with gr.Row(): with gr.Column(): gr.Markdown("## Upload and Classify") upload_image = gr.Image(type="pil", label="Upload Image") classify_btn = gr.Button("Submit") result_label = gr.Label(num_top_classes=3, label='Classification Results') recommendation_json = gr.JSON(label='Makeup Recommendations') classify_btn.click(fn=classify_and_recommend, inputs=upload_image, outputs=[result_label, recommendation_json]) with gr.Column(): gr.Markdown("## View Edited Image") img = gr.Image(label="Edited Image") show_image_btn = gr.Button("Show Edited Image") show_image_btn.click(fn=apply_makeup, inputs=[], outputs=img) gr.Markdown(""" ### Instructions: 1. **Upload an Image:** Choose a clear image of your face. 2. **Submit for Classification:** Click 'Submit' to receive makeup recommendations. 3. **View Edited Image:** Click 'Show Edited Image' to see the applied makeup. """) demo.launch(debug=True, share=True)