File size: 4,297 Bytes
4db2bfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
"""Create a hard multiple choice subset of the ImageNet validation split based on human accuracy annotation data."""

from typing import Dict
import argparse
import json
import pickle
import numpy as np
import scipy.io


def get_imagenet_labels() -> Dict[str, str]:
    """Return ground truth wnids."""
    with open("ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt") as fp:
        ilsvrc_idxs = [int(line.strip()) for line in fp]
    ilsvrc_metadata = scipy.io.loadmat("ILSVRC2012_devkit_t12/data/meta.mat", simplify_cells=True)
    ilsvrc_idx2wnid = {
        synset['ILSVRC2012_ID']: synset['WNID']
        for synset in ilsvrc_metadata['synsets']
    }
    return {
        f"ILSVRC2012_val_{img_id:0>8}.JPEG": ilsvrc_idx2wnid[idx]
        for img_id, idx in enumerate(ilsvrc_idxs, start=1)
    }


def construct_ancestor_map(wnids: list):
    """Construct map of deepest common ancestor for all pairs of ImageNet classes."""
    ancestor_map = np.zeros((len(wnids), len(wnids)), dtype=np.uint8)
    def iterate_postorder(node: dict, depth: int):
        # Leaf node
        if node['children'] is None:
            idx = wnids.index(node['wnid'])
            ancestor_map[idx, idx] = depth
            return [idx]
        # Iterate over children
        all_leaves = [
            iterate_postorder(child, depth + 1)
            for child in node['children'].values()
        ]
        # Connect branches together
        for branch in range(len(all_leaves)):
            for other in range(len(all_leaves)):
                if other == branch:
                    continue
                for leaf_a in all_leaves[branch]:
                    for leaf_b in all_leaves[other]:
                        ancestor_map[leaf_a, leaf_b] = max(ancestor_map[leaf_a, leaf_b], depth)
        return sum(all_leaves, [])
    with open("imagenet_hierarchy.json") as fp:
        root = json.load(fp)['tree']
    iterate_postorder(root, 0)
    return ancestor_map


def get_close_examples(num_choices: int = 4, seed: int | np.random.Generator = None):
    """
    Construct semi-hard MCQA examples using ImageNet hierarchy.
    """
    rng = np.random.default_rng(seed)
    with open("imagenet_wnids.txt") as fp:
        wnids = [line.strip() for line in fp]
    gt_labels = get_imagenet_labels()
    ancestor_map = construct_ancestor_map(wnids)
    
    with open("human_accuracy_annotations.pkl", "rb") as fp:
        annotation_data = pickle.load(fp)
    examples = []
    for imgname, annot in annotation_data['initial_annots'].items():
        if imgname.startswith("ILSVRC2012"):
            if gt_labels[imgname] not in annot.get('wrong', []):
                wrong_choices = []
                # Fill in answer choices from hierarchy
                gt_idx = wnids.index(gt_labels[imgname])
                for depth in range(ancestor_map[gt_idx, gt_idx] - 1, -1, -1):
                    if len(wrong_choices) >= num_choices - 1:
                        break
                    neighbors = [
                        wnids[idx]
                        for idx in np.nonzero(ancestor_map[gt_idx] == depth)[0]
                        if wnids[idx] not in annot.get('correct', [])
                    ]
                    if num_choices - 1 - len(wrong_choices) < len(neighbors):
                        wrong_choices += list(rng.choice(neighbors, size=num_choices - 1 - len(wrong_choices), replace=False))
                    else:
                        wrong_choices += neighbors
                examples.append({
                    'image': imgname,
                    'choices': [gt_labels[imgname]] + list(wrong_choices),
                    'correct_answer': gt_labels[imgname]
                })
    return examples


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--n-choices", "-n", type=int, required=True)
    parser.add_argument("--output", "-o", type=str, default=None)
    parser.add_argument("--seed", type=int, default=42)
    args = parser.parse_args()
    dataset = get_close_examples(args.n_choices, seed=args.seed)
    print(f"No. of examples: {len(dataset)}")
    if args.output:
        with open(args.output, "w") as fp:
            json.dump(dataset, fp, indent=2)
        print(f"Saved to '{args.output}'")