File size: 3,035 Bytes
21fc01c
 
 
ff2289d
0a474d0
ff2289d
 
 
21fc01c
 
 
 
 
ff2289d
21fc01c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff2289d
 
21fc01c
 
 
 
 
 
 
ff2289d
21fc01c
 
 
 
ff2289d
21fc01c
 
 
 
ff2289d
 
21fc01c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff2289d
21fc01c
 
 
 
ff2289d
 
21fc01c
 
 
ff2289d
21fc01c
 
 
 
 
ff2289d
 
 
21fc01c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#!/usr/bin/env python3
"""Process raw banking data into three task-specific subsets."""

import json
import re
from pathlib import Path


def extract_labels_and_text(line):
    """Extract labels, sentiments, and clean text from a labeled line."""
    pattern = r"__label__([A-Z_]+)#(positive|negative|neutral)"
    matches = re.findall(pattern, line)
    text = re.sub(r"__label__[A-Z_]+#(positive|negative|neutral)\s*", "", line).strip()

    if not matches or not text:
        return None

    aspects = [m[0] for m in matches]
    sentiments = [m[1] for m in matches]
    return aspects, sentiments, text


def get_overall_sentiment(sentiments):
    """Get overall sentiment from multiple sentiments."""
    if len(set(sentiments)) == 1:
        return sentiments[0]

    # Use most common sentiment
    counts = {}
    for s in sentiments:
        counts[s] = counts.get(s, 0) + 1
    return max(counts, key=counts.get)


def save_subset(data, output_path):
    """Save data to JSONL file."""
    output_path.parent.mkdir(parents=True, exist_ok=True)
    with open(output_path, "w", encoding="utf-8") as f:
        for item in data:
            f.write(json.dumps(item, ensure_ascii=False) + "\n")


def process_file(input_file, output_dir):
    """Process a single input file into three subsets."""
    data = {"classification": [], "sentiment": [], "aspect_sentiment": []}
    split_name = "train" if "train" in str(input_file) else "test"

    with open(input_file, encoding="utf-8") as f:
        for line in f:
            result = extract_labels_and_text(line.strip())
            if not result:
                continue

            aspects, sentiments, text = result

            # Classification subset
            data["classification"].append({
                "text": text,
                "label": aspects[0]
            })

            # Sentiment subset
            data["sentiment"].append({
                "text": text,
                "sentiment": get_overall_sentiment(sentiments)
            })

            # Aspect-sentiment subset
            aspect_pairs = [
                {"aspect": aspect, "sentiment": sentiment}
                for aspect, sentiment in zip(aspects, sentiments, strict=False)
            ]
            data["aspect_sentiment"].append({
                "text": text,
                "aspects": aspect_pairs
            })

    # Save all subsets
    output_dir = Path(output_dir)
    for subset_name, subset_data in data.items():
        output_path = output_dir / subset_name / f"{split_name}.jsonl"
        save_subset(subset_data, output_path)
        print(f"✅ {subset_name}/{split_name}.jsonl: {len(subset_data)} examples")


def main():
    """Process raw data into task-specific subsets."""
    print("🔄 Processing banking data...")

    process_file("raw_data/train.txt", "data")
    process_file("raw_data/test.txt", "data")

    print("\n🎉 Processing complete!")
    print("💡 Run 'python validate.py' to test the dataset")


if __name__ == "__main__":
    main()