Spaces:
Running
on
Zero
Running
on
Zero
| # coding=utf-8 | |
| # Copyright 2025 AIDAS Team | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import os | |
| os.environ["TOKENIZERS_PARALLELISM"] = "true" | |
| from PIL import Image | |
| from tqdm import tqdm | |
| import numpy as np | |
| import torch | |
| import wandb | |
| import cv2 | |
| from models import MAGVITv2, MMadaConfig, MMadaModelLM | |
| from training.prompting_utils import UniversalPrompting | |
| from training.utils import get_config, flatten_omega_conf, image_transform | |
| from transformers import AutoTokenizer, AutoConfig | |
| def resize_vocab(model, config): | |
| print(f"Resizing token embeddings to {config.new_vocab_size}") | |
| model.resize_token_embeddings(config.new_vocab_size) | |
| def get_vq_model_class(model_type): | |
| if model_type == "magvitv2": | |
| return MAGVITv2 | |
| else: | |
| raise ValueError(f"model_type {model_type} not supported.") | |
| def inference_video(): | |
| pass | |
| def load_video( | |
| video_path, | |
| config, | |
| uni_prompting, | |
| vq_model=None, | |
| device='cuda', | |
| sample='uniform', | |
| num_frames=8 | |
| ): | |
| """ | |
| args: | |
| video_path: path to the video file | |
| return: video frames as a list of images | |
| """ | |
| cap = cv2.VideoCapture(video_path) | |
| if not cap.isOpened(): | |
| raise IOError(f"Could not open video file {video_path}") | |
| frames = [] | |
| while True: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Convert BGR to RGB | |
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| frames.append(Image.fromarray(frame)) | |
| cap.release() | |
| total_frames = len(frames) | |
| if total_frames < num_frames: | |
| raise ValueError(f"Video {video_path} has less than 8 frames, got {total_frames} frames.") | |
| if sample == 'uniform': | |
| indices = np.linspace(0, total_frames - 1, num_frames).astype(int) | |
| elif sample == 'random': | |
| raise NotImplementedError("Random sampling not implemented yet.") | |
| else: | |
| raise ValueError(f"Sampling method {sample} not supported.") | |
| sampled_frames = [] | |
| sampled_frames_tokens = [] | |
| for idx in indices: | |
| frame = frames[idx] | |
| frame = image_transform(frame, resolution=config.dataset.params.resolution).to(device) | |
| sampled_frames.append(frame.unsqueeze(0)) | |
| sampled_frames_tokens.append( | |
| vq_model.get_code(frame.unsqueeze(0)) + len(uni_prompting.text_tokenizer) | |
| ) | |
| # num_frames * [num_frames, seq_len] -> [1, num_frames * seq_len] | |
| video_tokens = torch.cat(sampled_frames_tokens, dim=1) | |
| return sampled_frames, video_tokens | |
| def main(): | |
| config = get_config() | |
| resume_wandb_run = config.wandb.resume | |
| run_id = config.wandb.get("run_id", None) | |
| if run_id is None: | |
| resume_wandb_run = False | |
| run_id = wandb.util.generate_id() | |
| config.wandb.run_id = run_id | |
| wandb_config = {k: v for k, v in flatten_omega_conf(config, resolve=True)} | |
| wandb.init( | |
| project="demo", | |
| name=config.experiment.name + '_video', | |
| config=wandb_config, | |
| ) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| tokenizer = AutoTokenizer.from_pretrained(config.model.mmada.pretrained_model_path, padding_side="left") | |
| uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length, | |
| special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>", "<|v2v|>", "<|lvg|>", "<|v2t|>"), | |
| ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob, use_reserved_token=True) | |
| vq_model = get_vq_model_class(config.model.vq_model.type) | |
| vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device) | |
| vq_model.requires_grad_(False) | |
| vq_model.eval() | |
| train_step = config.step | |
| trained_checkpoint_path = f"/home/work/AIDAS/ckpts/omada/omada-training-stage1/checkpoint-{train_step}/unwrapped_model" | |
| model = MMadaModelLM.from_pretrained(trained_checkpoint_path, trust_remote_code=True, torch_dtype=torch.bfloat16, config="/home/work/AIDAS/ckpts/omada/omada-training-stage1/config.json") | |
| # model = MMadaModelLM.from_pretrained("Gen-Verse/MMaDA-8B-MixCoT", trust_remote_code=True, torch_dtype=torch.bfloat16) | |
| model.to(device) | |
| mask_token_id = model.config.mask_token_id | |
| temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions | |
| top_k = 1 # retain only the top_k most likely tokens, clamp others to have 0 probability | |
| file_list = os.listdir(config.video_image_root) | |
| file_list = [f for f in file_list if f.lower().endswith(('.mp4'))] | |
| responses = ['' for i in range(len(file_list))] | |
| videos = [] | |
| config.question = config.question.split(' *** ') | |
| for i, file_name in enumerate(tqdm(file_list)): | |
| video_path = os.path.join(config.video_image_root, file_name) | |
| print("current video path:", video_path) | |
| video_frames, video_tokens = load_video( | |
| video_path, | |
| config, | |
| uni_prompting, | |
| vq_model=vq_model, | |
| device=device, | |
| sample='uniform', | |
| num_frames=8 | |
| ) | |
| print("video tokens shape:", video_tokens.shape) | |
| batch_size = 1 | |
| for question in config.question: | |
| input_ids = uni_prompting.text_tokenizer(['<|start_header_id|>user<|end_header_id|>\n' + question +'<eot_id><|start_header_id|>assistant<|end_header_id|>\n'])['input_ids'] | |
| input_ids = torch.tensor(input_ids).to(device) | |
| input_ids = torch.cat([ | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|v2t|>']).to(device), | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|soi|>']).to(device), | |
| video_tokens, | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device), | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|sot|>']).to(device), | |
| input_ids | |
| ], dim=1).long() | |
| print(f"input_ids shape: {input_ids.shape}") | |
| output_ids = model.mmu_generate(input_ids, max_new_tokens=128, steps=128, block_length=128) | |
| text = uni_prompting.text_tokenizer.batch_decode(output_ids[:, input_ids.shape[1]:], skip_special_tokens=True) | |
| print(text) | |
| responses[i] += f'User: ' + question + f'\n Answer : ' + text[0] + '\n' | |
| # images = torch.cat(images, dim=0) | |
| # images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) | |
| # images *= 255.0 | |
| # images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) | |
| # pil_images = [Image.fromarray(image) for image in images] | |
| # wandb_images = [wandb.Image(image, caption=responses[i]) for i, image in enumerate(pil_images)] | |
| # wandb.log({"multimodal understanding": wandb_images}, step=0) | |
| if __name__ == '__main__': | |
| main() |