vlmfinegrained / create_hard_imagenetv2.py
djghosh's picture
Upload folder using huggingface_hub
4db2bfd verified
"""Create a hard multiple choice subset of the ImageNet validation split based on human accuracy annotation data."""
from typing import Dict, Tuple
import argparse
import glob
import json
import pickle
import numpy as np
import scipy.io
def get_imagenet_labels() -> Tuple[Dict[str, str], Dict[str, str]]:
"""Return ground truth wnids."""
images = glob.glob("*/*.jpeg", root_dir="/gscratch/efml/djghosh/temp_data/imagenetv2/inv2-all")
with open("imagenet_wnids.txt") as fp:
wnids = [line.strip() for line in fp]
imagenet_labels = {}
imagenet_paths = {}
for imgpath in images:
label_id, image_id = imgpath.split(".jpeg")[0].split("/")
imagenet_labels[image_id] = wnids[int(label_id)]
imagenet_paths[image_id] = imgpath
return imagenet_labels, imagenet_paths
def get_hard_examples(num_choices: int = 4, seed: int | np.random.Generator = None):
rng = np.random.default_rng(seed)
gt_labels, gt_paths = get_imagenet_labels()
with open("human_accuracy_annotations.pkl", "rb") as fp:
annotation_data = pickle.load(fp)
examples = []
for imgname, annot in annotation_data['initial_annots'].items():
if imgname in gt_labels and len(annot.get('wrong', [])) >= num_choices - 1:
if gt_labels[imgname] not in annot['wrong']:
wrong_choices = rng.choice(annot['wrong'], size=num_choices - 1, replace=False)
examples.append({
'image': f"imagesv2/{gt_paths[imgname]}",
'choices': [gt_labels[imgname]] + list(wrong_choices),
'correct_answer': gt_labels[imgname]
})
return examples
def construct_ancestor_map(wnids: list):
"""Construct map of deepest common ancestor for all pairs of ImageNet classes."""
ancestor_map = np.zeros((len(wnids), len(wnids)), dtype=np.uint8)
def iterate_postorder(node: dict, depth: int):
# Leaf node
if node['children'] is None:
idx = wnids.index(node['wnid'])
ancestor_map[idx, idx] = depth
return [idx]
# Iterate over children
all_leaves = [
iterate_postorder(child, depth + 1)
for child in node['children'].values()
]
# Connect branches together
for branch in range(len(all_leaves)):
for other in range(len(all_leaves)):
if other == branch:
continue
for leaf_a in all_leaves[branch]:
for leaf_b in all_leaves[other]:
ancestor_map[leaf_a, leaf_b] = max(ancestor_map[leaf_a, leaf_b], depth)
return sum(all_leaves, [])
with open("imagenet_hierarchy.json") as fp:
root = json.load(fp)['tree']
iterate_postorder(root, 0)
return ancestor_map
def get_filled_hard_examples(num_choices: int = 4, min_hard: int = 1, seed: int | np.random.Generator = None):
"""
Construct hard MCQA examples using human annotation data.
Examples with fewer than the required number of 'wrong' annotations will have remaining answer choices filled in.
"""
rng = np.random.default_rng(seed)
with open("imagenet_wnids.txt") as fp:
wnids = [line.strip() for line in fp]
gt_labels, gt_paths = get_imagenet_labels()
ancestor_map = construct_ancestor_map(wnids)
with open("human_accuracy_annotations.pkl", "rb") as fp:
annotation_data = pickle.load(fp)
examples = []
for imgname, annot in annotation_data['initial_annots'].items():
if imgname in gt_labels and len(annot.get('wrong', [])) >= min_hard:
if gt_labels[imgname] not in annot['wrong']:
# Get hard wrong answer choices from human annotation data
if num_choices - 1 < len(annot['wrong']):
wrong_choices = list(rng.choice(annot['wrong'], size=num_choices - 1, replace=False))
else:
wrong_choices = list(annot['wrong'])
# Fill in remainder from hierarchy
gt_idx = wnids.index(gt_labels[imgname])
for depth in range(ancestor_map[gt_idx, gt_idx] - 1, -1, -1):
if len(wrong_choices) >= num_choices - 1:
break
neighbors = [
wnids[idx]
for idx in np.nonzero(ancestor_map[gt_idx] == depth)[0]
if wnids[idx] not in annot['wrong'] and wnids[idx] not in annot.get('correct', [])
]
if num_choices - 1 - len(wrong_choices) < len(neighbors):
wrong_choices += list(rng.choice(neighbors, size=num_choices - 1 - len(wrong_choices), replace=False))
else:
wrong_choices += neighbors
examples.append({
'image': f"imagesv2/{gt_paths[imgname]}",
'choices': [gt_labels[imgname]] + list(wrong_choices),
'correct_answer': gt_labels[imgname]
})
return examples
def get_close_examples(num_choices: int = 4, seed: int | np.random.Generator = None):
"""
Construct close MCQA examples using only ImageNet/WordNet hierarchy data.
"""
rng = np.random.default_rng(seed)
with open("imagenet_wnids.txt") as fp:
wnids = [line.strip() for line in fp]
gt_labels, gt_paths = get_imagenet_labels()
ancestor_map = construct_ancestor_map(wnids)
with open("human_accuracy_annotations.pkl", "rb") as fp:
annotation_data = pickle.load(fp)
examples = []
for imgname, annot in annotation_data['initial_annots'].items():
if imgname in gt_labels:
if gt_labels[imgname] not in annot.get('wrong', []):
wrong_choices = []
# Fill in remainder from hierarchy
gt_idx = wnids.index(gt_labels[imgname])
for depth in range(ancestor_map[gt_idx, gt_idx] - 1, -1, -1):
if len(wrong_choices) >= num_choices - 1:
break
neighbors = [
wnids[idx]
for idx in np.nonzero(ancestor_map[gt_idx] == depth)[0]
if wnids[idx] not in annot.get('correct', [])
]
if num_choices - 1 - len(wrong_choices) < len(neighbors):
wrong_choices += list(rng.choice(neighbors, size=num_choices - 1 - len(wrong_choices), replace=False))
else:
wrong_choices += neighbors
examples.append({
'image': f"imagesv2/{gt_paths[imgname]}",
'choices': [gt_labels[imgname]] + list(wrong_choices),
'correct_answer': gt_labels[imgname]
})
return examples
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--n-choices", "-n", type=int, required=True)
parser.add_argument("--output", "-o", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--fill", "-f", action="store_true")
group.add_argument("--no-hard", action="store_true")
parser.add_argument("--min-hard", "-m", type=int, default=1)
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
if args.fill:
dataset = get_filled_hard_examples(args.n_choices, args.min_hard, seed=args.seed)
elif args.no_hard:
dataset = get_close_examples(args.n_choices, seed=args.seed)
else:
dataset = get_hard_examples(args.n_choices, seed=args.seed)
print(f"No. of examples: {len(dataset)}")
if args.output:
with open(args.output, "w") as fp:
json.dump(dataset, fp, indent=2)
print(f"Saved to '{args.output}'")