ishanprogs commited on
Commit
a65d531
·
verified ·
1 Parent(s): 6377553

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -9
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py (Complete Final Version)
2
 
3
  import gradio as gr
4
  import torch
@@ -40,7 +40,7 @@ NUM_CAR_PART_CLASSES = len(CAR_PART_CLASSES)
40
 
41
  # Paths within the Hugging Face Space repository (relative to app.py)
42
  CLIP_TEXT_FEATURES_PATH = "./clip_text_features.pt"
43
- DAMAGE_MODEL_WEIGHTS_PATH = "./best.pt" # Your YOLOv8 damage model weights
44
  PART_MODEL_WEIGHTS_PATH = "./partdetection_yolobest.pt" # Your YOLOv8 part model weights
45
 
46
  # Default Prediction Thresholds (can be overridden by sliders)
@@ -167,7 +167,6 @@ def classify_image_clip(image_pil):
167
  traceback.print_exc()
168
  return "Error during CLIP processing", {"Error": 1.0}
169
 
170
- # --- CORRECTED process_car_image Function ---
171
  def process_car_image(image_np_bgr, damage_threshold, part_threshold):
172
  """
173
  Runs damage and part segmentation (YOLOv8), calculates overlap, visualizes.
@@ -186,7 +185,6 @@ def process_car_image(image_np_bgr, damage_threshold, part_threshold):
186
 
187
  try:
188
  # --- Create the image tensor ONCE for the annotator ---
189
- # Needs to be HWC format on the correct device
190
  try:
191
  im_tensor_gpu_for_annotator = torch.from_numpy(image_np_bgr).to(DEVICE) # Keep HWC
192
  if not isinstance(im_tensor_gpu_for_annotator, torch.Tensor) or im_tensor_gpu_for_annotator.ndim != 3:
@@ -217,9 +215,38 @@ def process_car_image(image_np_bgr, damage_threshold, part_threshold):
217
  yolo_end_time = time.time()
218
  logger.info(f" YOLO predictions took {yolo_end_time - yolo_start_time:.2f}s")
219
 
220
- # --- 3. Resize Masks ---
221
  def resize_masks(masks_tensor, target_h, target_w):
222
- masks_np_bool = masks_tensor.cpu().numpy().astype(bool); if masks_np_bool.shape[0] == 0 or (masks_np_bool.shape[1] == target_h and masks_np_bool.shape[2] == target_w): return masks_np_bool; resized_masks_list = []; for i in range(masks_np_bool.shape[0]): mask = masks_np_bool[i]; mask_resized = cv2.resize(mask.astype(np.uint8), (target_w, target_h), interpolation=cv2.INTER_NEAREST); resized_masks_list.append(mask_resized.astype(bool)); return np.array(resized_masks_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  resize_start_time = time.time()
224
  damage_masks_np = resize_masks(damage_masks_raw, img_h, img_w)
225
  part_masks_np = resize_masks(part_masks_raw, img_h, img_w)
@@ -251,7 +278,6 @@ def process_car_image(image_np_bgr, damage_threshold, part_threshold):
251
  if part_result.masks is not None and im_tensor_gpu_for_annotator is not None:
252
  try:
253
  colors_part = [(0, random.randint(100, 200), 0) for _ in part_classes_ids_cpu]
254
- # Pass CORRECT image tensor (HWC on GPU) to annotator.masks
255
  annotator.masks(part_masks_raw, colors=colors_part, im_gpu=im_tensor_gpu_for_annotator, alpha=0.3)
256
  for box, cls_id in zip(part_boxes_xyxy_cpu, part_classes_ids_cpu):
257
  try: label = f"{CAR_PART_CLASSES[cls_id]}"; annotator.box_label(box, label=label, color=(0, 200, 0))
@@ -264,7 +290,6 @@ def process_car_image(image_np_bgr, damage_threshold, part_threshold):
264
  if damage_result.masks is not None and im_tensor_gpu_for_annotator is not None:
265
  try:
266
  colors_dmg = [(random.randint(100, 200), 0, 0) for _ in damage_classes_ids_cpu]
267
- # Pass CORRECT image tensor (HWC on GPU) to annotator.masks
268
  annotator.masks(damage_masks_raw, colors=colors_dmg, im_gpu=im_tensor_gpu_for_annotator, alpha=0.4)
269
  for box, cls_id in zip(damage_boxes_xyxy_cpu, damage_classes_ids_cpu):
270
  try: label = f"{DAMAGE_CLASSES[cls_id]}"; annotator.box_label(box, label=label, color=(200, 0, 0))
@@ -281,7 +306,6 @@ def process_car_image(image_np_bgr, damage_threshold, part_threshold):
281
  logger.error(f"Error during combined processing: {e}", exc_info=True)
282
  traceback.print_exc()
283
  final_assignments.append("Error during segmentation/processing.")
284
- # annotated_image_bgr remains the original copy in case of error
285
 
286
  # --- Prepare output ---
287
  assignment_text = "\n".join(final_assignments) if final_assignments else "No damage assignments generated."
 
1
+ # app.py (Complete Final Version - Fixed resize_masks SyntaxError)
2
 
3
  import gradio as gr
4
  import torch
 
40
 
41
  # Paths within the Hugging Face Space repository (relative to app.py)
42
  CLIP_TEXT_FEATURES_PATH = "./clip_text_features.pt"
43
+ DAMAGE_MODEL_WEIGHTS_PATH = "./model_best.pt" # Your YOLOv8 damage model weights
44
  PART_MODEL_WEIGHTS_PATH = "./partdetection_yolobest.pt" # Your YOLOv8 part model weights
45
 
46
  # Default Prediction Thresholds (can be overridden by sliders)
 
167
  traceback.print_exc()
168
  return "Error during CLIP processing", {"Error": 1.0}
169
 
 
170
  def process_car_image(image_np_bgr, damage_threshold, part_threshold):
171
  """
172
  Runs damage and part segmentation (YOLOv8), calculates overlap, visualizes.
 
185
 
186
  try:
187
  # --- Create the image tensor ONCE for the annotator ---
 
188
  try:
189
  im_tensor_gpu_for_annotator = torch.from_numpy(image_np_bgr).to(DEVICE) # Keep HWC
190
  if not isinstance(im_tensor_gpu_for_annotator, torch.Tensor) or im_tensor_gpu_for_annotator.ndim != 3:
 
215
  yolo_end_time = time.time()
216
  logger.info(f" YOLO predictions took {yolo_end_time - yolo_start_time:.2f}s")
217
 
218
+ # --- 3. Resize Masks (Corrected Function Definition) ---
219
  def resize_masks(masks_tensor, target_h, target_w):
220
+ """Resizes masks tensor to target H, W using CPU numpy and OpenCV."""
221
+ masks_np_bool = masks_tensor.cpu().numpy().astype(bool) # Move to CPU *before* resizing
222
+
223
+ # Handle empty tensor or already correct size
224
+ if masks_np_bool.shape[0] == 0:
225
+ return np.array([]) # Return empty numpy array
226
+ if masks_np_bool.ndim == 3 and masks_np_bool.shape[1] == target_h and masks_np_bool.shape[2] == target_w:
227
+ return masks_np_bool # Return if already correct size
228
+
229
+ # Ensure masks_np_bool is 3D [N, H, W] even if only one mask
230
+ if masks_np_bool.ndim == 2: # Handle case of single mask output [H, W]
231
+ masks_np_bool = np.expand_dims(masks_np_bool, axis=0)
232
+ logger.warning("Detected 2D mask input, expanding to 3D for resize loop.")
233
+
234
+ # Check dimensions *before* logging resize message
235
+ if masks_np_bool.ndim != 3:
236
+ logger.error(f"Unexpected mask dimension: {masks_np_bool.ndim}. Expected 3D [N, H, W]. Cannot resize.")
237
+ return np.array([]) # Return empty if shape is wrong
238
+
239
+ # Proceed with resizing if necessary
240
+ # logger.info(f"Resizing {masks_np_bool.shape[0]} masks from {masks_np_bool.shape[1:]} to {(target_h, target_w)}") # Optional verbose log
241
+ resized_masks_list = []
242
+ for i in range(masks_np_bool.shape[0]):
243
+ mask = masks_np_bool[i] # Get the single [H, W] mask
244
+ # Resize needs uint8
245
+ mask_resized = cv2.resize(mask.astype(np.uint8), (target_w, target_h), interpolation=cv2.INTER_NEAREST)
246
+ resized_masks_list.append(mask_resized.astype(bool)) # Append boolean mask
247
+ return np.array(resized_masks_list) # Return numpy array [N, target_h, target_w]
248
+
249
+ # --- Perform resizing ---
250
  resize_start_time = time.time()
251
  damage_masks_np = resize_masks(damage_masks_raw, img_h, img_w)
252
  part_masks_np = resize_masks(part_masks_raw, img_h, img_w)
 
278
  if part_result.masks is not None and im_tensor_gpu_for_annotator is not None:
279
  try:
280
  colors_part = [(0, random.randint(100, 200), 0) for _ in part_classes_ids_cpu]
 
281
  annotator.masks(part_masks_raw, colors=colors_part, im_gpu=im_tensor_gpu_for_annotator, alpha=0.3)
282
  for box, cls_id in zip(part_boxes_xyxy_cpu, part_classes_ids_cpu):
283
  try: label = f"{CAR_PART_CLASSES[cls_id]}"; annotator.box_label(box, label=label, color=(0, 200, 0))
 
290
  if damage_result.masks is not None and im_tensor_gpu_for_annotator is not None:
291
  try:
292
  colors_dmg = [(random.randint(100, 200), 0, 0) for _ in damage_classes_ids_cpu]
 
293
  annotator.masks(damage_masks_raw, colors=colors_dmg, im_gpu=im_tensor_gpu_for_annotator, alpha=0.4)
294
  for box, cls_id in zip(damage_boxes_xyxy_cpu, damage_classes_ids_cpu):
295
  try: label = f"{DAMAGE_CLASSES[cls_id]}"; annotator.box_label(box, label=label, color=(200, 0, 0))
 
306
  logger.error(f"Error during combined processing: {e}", exc_info=True)
307
  traceback.print_exc()
308
  final_assignments.append("Error during segmentation/processing.")
 
309
 
310
  # --- Prepare output ---
311
  assignment_text = "\n".join(final_assignments) if final_assignments else "No damage assignments generated."