Datasets:
v2 filter out link texts that are dictionary words
Browse files- .gitignore +1 -0
- 2018thresh10dev.csv +2 -2
- 2018thresh10test.csv +2 -2
- 2018thresh10train.csv +2 -2
- 2018thresh20dev.csv +2 -2
- 2018thresh20test.csv +2 -2
- 2018thresh20train.csv +2 -2
- 2018thresh5dev.csv +2 -2
- 2018thresh5test.csv +2 -2
- 2018thresh5train.csv +2 -2
- generate_wes_data.py +16 -6
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.venv/
|
2018thresh10dev.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6690985884c8b4a58d0377ef2152bb1fbb233355df68d662539da856e697a414
|
| 3 |
+
size 51406267
|
2018thresh10test.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87ba47d64b3223a9ceda6c4c414312c4c8fa412c023d63ba8495909206f908fd
|
| 3 |
+
size 34147126
|
2018thresh10train.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a4c19706eb30b58641b06fa0e5efe53cf545f6bf49e3b5a30e62d96243517c8
|
| 3 |
+
size 255547715
|
2018thresh20dev.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e80d37c92672e2b2041d47035c737db5c7dc0a9c5938ede28c46cdc25321be5a
|
| 3 |
+
size 37218821
|
2018thresh20test.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7204244b6dc46350616835b741f925b98f50f6e9b4d5585d7815f226d5afde95
|
| 3 |
+
size 24867451
|
2018thresh20train.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fcda4ee523a653d09f34c72e2134d56466d145c56a95596468ca7a1f4f9bf9c
|
| 3 |
+
size 185091962
|
2018thresh5dev.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abf0f45844c3aa91747be22502d9d91570365aa0d085a1faaec17a04a84028c5
|
| 3 |
+
size 67296490
|
2018thresh5test.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d9c39063394851308a52c71de4cae38386bfe7e16f73a355221dafe46a74b0f
|
| 3 |
+
size 44759191
|
2018thresh5train.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7b71f64c53d1aea33ec8bd6067b99c766625897904fd7cccdd82d60eac17b5c
|
| 3 |
+
size 334315073
|
generate_wes_data.py
CHANGED
|
@@ -1,14 +1,19 @@
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
import pandas as pd
|
|
|
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
from tqdm import tqdm
|
| 5 |
|
| 6 |
from collections import defaultdict
|
| 7 |
from operator import itemgetter as ig
|
| 8 |
from itertools import islice, chain, repeat
|
| 9 |
-
from random import sample, choice, shuffle
|
| 10 |
from gc import collect
|
| 11 |
|
|
|
|
|
|
|
|
|
|
| 12 |
def generate_splits(subset, split=[0.75, 0.15, 0.1]):
|
| 13 |
assert abs(sum(split) - 1.0) < 0.0001
|
| 14 |
# get the data in dictionary form
|
|
@@ -16,6 +21,9 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
|
|
| 16 |
ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
|
| 17 |
ds = list(tqdm(ds, total=len(ds)))
|
| 18 |
for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
|
|
|
|
|
|
|
|
|
|
| 19 |
groups[article].append(link)
|
| 20 |
del ds
|
| 21 |
|
|
@@ -38,10 +46,10 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
|
|
| 38 |
for i, keys in enumerate(splits):
|
| 39 |
for key in keys:
|
| 40 |
try:
|
| 41 |
-
got = sample(keys, len(groups[key])+1)
|
| 42 |
ret[i].append(
|
| 43 |
-
[(key, choice(groups[k])) for k in got if k != key]
|
| 44 |
-
[:len(groups[key])]
|
| 45 |
)
|
| 46 |
except ValueError:
|
| 47 |
raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
|
|
@@ -51,14 +59,16 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
|
|
| 51 |
|
| 52 |
|
| 53 |
if __name__ == '__main__':
|
|
|
|
|
|
|
| 54 |
for size in [5, 10, 20]:
|
| 55 |
-
x = generate_splits(subset='
|
| 56 |
|
| 57 |
for (data, labels), split in zip(x, ['train', 'dev', 'test']):
|
| 58 |
articles, lts = list(zip(*data))
|
| 59 |
df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
|
| 60 |
df = df.sample(frac=1).reset_index(drop=True)
|
| 61 |
-
df.to_csv('
|
| 62 |
# print(df.head(30), df.tail(30))
|
| 63 |
|
| 64 |
# tests
|
|
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
import pandas as pd
|
| 3 |
+
from nltk.corpus import words
|
| 4 |
+
from nltk import WordNetLemmatizer
|
| 5 |
import numpy as np
|
| 6 |
from tqdm import tqdm
|
| 7 |
|
| 8 |
from collections import defaultdict
|
| 9 |
from operator import itemgetter as ig
|
| 10 |
from itertools import islice, chain, repeat
|
| 11 |
+
from random import seed, sample, choice, shuffle
|
| 12 |
from gc import collect
|
| 13 |
|
| 14 |
+
filter_dict = set(words.words())
|
| 15 |
+
ltize = WordNetLemmatizer().lemmatize
|
| 16 |
+
|
| 17 |
def generate_splits(subset, split=[0.75, 0.15, 0.1]):
|
| 18 |
assert abs(sum(split) - 1.0) < 0.0001
|
| 19 |
# get the data in dictionary form
|
|
|
|
| 21 |
ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
|
| 22 |
ds = list(tqdm(ds, total=len(ds)))
|
| 23 |
for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
|
| 24 |
+
if (ltize(article.lower()) not in filter_dict) and (ltize(link.lower()) in filter_dict):
|
| 25 |
+
# print(article, link, 'not quite right!')
|
| 26 |
+
continue # remove if link text is a dictionary word but article is not
|
| 27 |
groups[article].append(link)
|
| 28 |
del ds
|
| 29 |
|
|
|
|
| 46 |
for i, keys in enumerate(splits):
|
| 47 |
for key in keys:
|
| 48 |
try:
|
| 49 |
+
got = sample(keys, len(groups[key])+1) # sample n+1 keys
|
| 50 |
ret[i].append(
|
| 51 |
+
[(key, choice(groups[k])) for k in got if k != key] # get a random link title from that key, if it's not the current key
|
| 52 |
+
[:len(groups[key])] # ensure we don't have too many
|
| 53 |
)
|
| 54 |
except ValueError:
|
| 55 |
raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
|
|
|
|
| 59 |
|
| 60 |
|
| 61 |
if __name__ == '__main__':
|
| 62 |
+
seed(0x326ccc)
|
| 63 |
+
year = 2018
|
| 64 |
for size in [5, 10, 20]:
|
| 65 |
+
x = generate_splits(subset=f'{year}thresh' + str(size) + 'corpus')
|
| 66 |
|
| 67 |
for (data, labels), split in zip(x, ['train', 'dev', 'test']):
|
| 68 |
articles, lts = list(zip(*data))
|
| 69 |
df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
|
| 70 |
df = df.sample(frac=1).reset_index(drop=True)
|
| 71 |
+
df.to_csv(f'{year}thresh' + str(size) + split + '.csv', index=False)
|
| 72 |
# print(df.head(30), df.tail(30))
|
| 73 |
|
| 74 |
# tests
|