Spaces:
Starting
on
A10G
Starting
on
A10G
update gradio theme
Browse files- app.py +9 -4
- example_images/MP5,_high_quality,_ultra_realistic.webp +0 -0
- example_images/a_cat_dressed_as_the_pope.webp +0 -0
- example_images/a_cute_little_frog_comicbook_style.webp +0 -0
- example_images/a_purple_winter_jacket.webp +0 -0
- example_images/retro_pc_photorealistic_high_detailed.webp +0 -0
- example_images/stratocaster_guitar_pixar_style.webp +0 -0
- utils.py +0 -23
app.py
CHANGED
|
@@ -114,8 +114,7 @@ def run_video(image_path: str,
|
|
| 114 |
|
| 115 |
def launch(port):
|
| 116 |
with gr.Blocks(
|
| 117 |
-
title="TGS - Demo"
|
| 118 |
-
theme=gr.themes.Monochrome()
|
| 119 |
) as demo:
|
| 120 |
with gr.Row(variant='panel'):
|
| 121 |
gr.Markdown(HEADER)
|
|
@@ -134,7 +133,13 @@ def launch(port):
|
|
| 134 |
"example_images/a_pikachu_with_smily_face.webp",
|
| 135 |
"example_images/an_otter_wearing_sunglasses.webp",
|
| 136 |
"example_images/lumberjack_axe.webp",
|
| 137 |
-
"example_images/medieval_shield.webp"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
],
|
| 139 |
inputs=[input_image],
|
| 140 |
cache_examples=False,
|
|
@@ -145,7 +150,7 @@ def launch(port):
|
|
| 145 |
with gr.Column(scale=1):
|
| 146 |
with gr.Row(variant='panel'):
|
| 147 |
seg_image = gr.Image(value=None, type="filepath", height=256, width=256, image_mode="RGBA", label="Segmented Image", interactive=False)
|
| 148 |
-
output_video = gr.Video(value=None, label="Video", height=256, autoplay=True)
|
| 149 |
output_3dgs = Model3DGS(value=None, label="3DGS")
|
| 150 |
|
| 151 |
img_run_btn.click(
|
|
|
|
| 114 |
|
| 115 |
def launch(port):
|
| 116 |
with gr.Blocks(
|
| 117 |
+
title="TGS - Demo"
|
|
|
|
| 118 |
) as demo:
|
| 119 |
with gr.Row(variant='panel'):
|
| 120 |
gr.Markdown(HEADER)
|
|
|
|
| 133 |
"example_images/a_pikachu_with_smily_face.webp",
|
| 134 |
"example_images/an_otter_wearing_sunglasses.webp",
|
| 135 |
"example_images/lumberjack_axe.webp",
|
| 136 |
+
"example_images/medieval_shield.webp",
|
| 137 |
+
"example_images/a_cat_dressed_as_the_pope.webp",
|
| 138 |
+
"example_images/a_cute_little_frog_comicbook_style.webp",
|
| 139 |
+
"example_images/a_purple_winter_jacket.webp",
|
| 140 |
+
"example_images/MP5,_high_quality,_ultra_realistic.webp",
|
| 141 |
+
"example_images/retro_pc_photorealistic_high_detailed.webp",
|
| 142 |
+
"example_images/stratocaster_guitar_pixar_style.webp"
|
| 143 |
],
|
| 144 |
inputs=[input_image],
|
| 145 |
cache_examples=False,
|
|
|
|
| 150 |
with gr.Column(scale=1):
|
| 151 |
with gr.Row(variant='panel'):
|
| 152 |
seg_image = gr.Image(value=None, type="filepath", height=256, width=256, image_mode="RGBA", label="Segmented Image", interactive=False)
|
| 153 |
+
output_video = gr.Video(value=None, label="Rendered Video", height=256, autoplay=True)
|
| 154 |
output_3dgs = Model3DGS(value=None, label="3DGS")
|
| 155 |
|
| 156 |
img_run_btn.click(
|
example_images/MP5,_high_quality,_ultra_realistic.webp
ADDED
|
example_images/a_cat_dressed_as_the_pope.webp
ADDED
|
example_images/a_cute_little_frog_comicbook_style.webp
ADDED
|
example_images/a_purple_winter_jacket.webp
ADDED
|
example_images/retro_pc_photorealistic_high_detailed.webp
ADDED
|
example_images/stratocaster_guitar_pixar_style.webp
ADDED
|
utils.py
CHANGED
|
@@ -12,7 +12,6 @@ from tqdm import tqdm
|
|
| 12 |
|
| 13 |
|
| 14 |
def sam_init(sam_checkpoint, device_id=0):
|
| 15 |
-
# sam_checkpoint = os.path.join(os.path.dirname(__file__), "./sam_vit_h_4b8939.pth")
|
| 16 |
model_type = "vit_h"
|
| 17 |
|
| 18 |
device = "cuda:{}".format(device_id) if torch.cuda.is_available() else "cpu"
|
|
@@ -26,7 +25,6 @@ def sam_out_nosave(predictor, input_image, *bbox_sliders):
|
|
| 26 |
bbox = np.array(bbox_sliders)
|
| 27 |
image = np.asarray(input_image)
|
| 28 |
|
| 29 |
-
start_time = time.time()
|
| 30 |
predictor.set_image(image)
|
| 31 |
|
| 32 |
masks_bbox, scores_bbox, logits_bbox = predictor.predict(
|
|
@@ -73,11 +71,6 @@ def image_preprocess(input_image, save_path, lower_contrast=True, rescale=True):
|
|
| 73 |
rgba = Image.fromarray(padded_image).resize((256, 256), Image.LANCZOS)
|
| 74 |
rgba.save(save_path)
|
| 75 |
|
| 76 |
-
# rgba_arr = np.array(rgba) / 255.0
|
| 77 |
-
# rgb = rgba_arr[...,:3] * rgba_arr[...,-1:] + (1 - rgba_arr[...,-1:])
|
| 78 |
-
# return Image.fromarray((rgb * 255).astype(np.uint8))
|
| 79 |
-
|
| 80 |
-
|
| 81 |
def pred_bbox(image):
|
| 82 |
image_nobg = remove(image.convert("RGBA"), alpha_matting=True)
|
| 83 |
alpha = np.asarray(image_nobg)[:, :, -1]
|
|
@@ -117,19 +110,3 @@ def todevice(vars, device="cuda"):
|
|
| 117 |
return vars
|
| 118 |
else:
|
| 119 |
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
|
| 120 |
-
|
| 121 |
-
def download_checkpoint(url, save_path):
|
| 122 |
-
try:
|
| 123 |
-
with urllib.request.urlopen(url) as response, open(save_path, 'wb') as file:
|
| 124 |
-
file_size = int(response.info().get('Content-Length', -1))
|
| 125 |
-
chunk_size = 8192
|
| 126 |
-
num_chunks = file_size // chunk_size if file_size > chunk_size else 1
|
| 127 |
-
|
| 128 |
-
with tqdm(total=file_size, unit='B', unit_scale=True, desc='Downloading', ncols=100) as pbar:
|
| 129 |
-
for chunk in iter(lambda: response.read(chunk_size), b''):
|
| 130 |
-
file.write(chunk)
|
| 131 |
-
pbar.update(len(chunk))
|
| 132 |
-
|
| 133 |
-
print(f"Checkpoint downloaded and saved to: {save_path}")
|
| 134 |
-
except Exception as e:
|
| 135 |
-
print(f"Error downloading checkpoint: {e}")
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
def sam_init(sam_checkpoint, device_id=0):
|
|
|
|
| 15 |
model_type = "vit_h"
|
| 16 |
|
| 17 |
device = "cuda:{}".format(device_id) if torch.cuda.is_available() else "cpu"
|
|
|
|
| 25 |
bbox = np.array(bbox_sliders)
|
| 26 |
image = np.asarray(input_image)
|
| 27 |
|
|
|
|
| 28 |
predictor.set_image(image)
|
| 29 |
|
| 30 |
masks_bbox, scores_bbox, logits_bbox = predictor.predict(
|
|
|
|
| 71 |
rgba = Image.fromarray(padded_image).resize((256, 256), Image.LANCZOS)
|
| 72 |
rgba.save(save_path)
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
def pred_bbox(image):
|
| 75 |
image_nobg = remove(image.convert("RGBA"), alpha_matting=True)
|
| 76 |
alpha = np.asarray(image_nobg)[:, :, -1]
|
|
|
|
| 110 |
return vars
|
| 111 |
else:
|
| 112 |
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|