rfernand commited on
Commit
c7edabd
·
1 Parent(s): 416f455

Upload basic_sentence_transforms.py

Browse files
Files changed (1) hide show
  1. basic_sentence_transforms.py +205 -0
basic_sentence_transforms.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # basic-sentence-transforms.py: the HF datasets "loading script" for the NC_PAT dataset (defines configurations/tasks, columns, etc.)
2
+ import os
3
+ import json
4
+ import datasets
5
+ from datasets import Split, SplitGenerator
6
+
7
+ no_extra = {
8
+ "source": datasets.Value("string"),
9
+ "target": datasets.Value("string"),
10
+ }
11
+
12
+ samp_class = {
13
+ "source": datasets.Value("string"),
14
+ "target": datasets.Value("string"),
15
+ "class": datasets.Value("string"),
16
+ }
17
+
18
+ count_class = {
19
+ "source": datasets.Value("string"),
20
+ "target": datasets.Value("string"),
21
+ "count": datasets.Value("string"),
22
+ "class": datasets.Value("string"),
23
+ }
24
+
25
+ dir_only = {
26
+ "source": datasets.Value("string"),
27
+ "target": datasets.Value("string"),
28
+ "direction": datasets.Value("string"),
29
+ }
30
+
31
+ configs = [
32
+ {"name": "car_cdr_cons",
33
+ "desc": "small phrase translation tasks that require only: CAR, CDR, or CAR+CDR+CONS operations",
34
+ "features": samp_class},
35
+
36
+ {"name": "car_cdr_cons_tuc",
37
+ "desc": "same task as car_cdr_cons, but requires mapping lowercase fillers to their uppercase tokens",
38
+ "features": samp_class},
39
+
40
+ {"name": "car_cdr_rcons",
41
+ "desc": "same task as car_cdr_cons, but the CONS samples have their left/right children swapped",
42
+ "features": samp_class},
43
+
44
+ {"name": "car_cdr_rcons_tuc",
45
+ "desc": "same task as car_cdr_rcons, but requires mapping lowercase fillers to their uppercase tokens",
46
+ "features": samp_class},
47
+
48
+ {"name": "car_cdr_seq",
49
+ "desc": "each samples requires 1-4 combinations of CAR and CDR, as identified by the root filler token",
50
+ "features": count_class},
51
+
52
+ {"name": "car_cdr_seq_40k",
53
+ "desc": "same task as car_cdr_seq, but train samples increased from 10K to 40K",
54
+ "features": count_class},
55
+
56
+ {"name": "car_cdr_seq_tuc",
57
+ "desc": "same task as car_cdr_seq, but requires mapping lowercase fillers to their uppercase tokens",
58
+ "features": count_class},
59
+
60
+ {"name": "car_cdr_seq_40k_tuc",
61
+ "desc": "same task as car_cdr_seq_tuc, but train samples increased from 10K to 40K",
62
+ "features": count_class},
63
+
64
+ {"name": "car_cdr_seq_path",
65
+ "desc": "similiar to car_cdr_seq, but each needed operation in represented as a node in the left child of the root",
66
+ "features": count_class},
67
+
68
+ {"name": "car_cdr_seq_path_40k",
69
+ "desc": "same task as car_cdr_seq_path, but train samples increased from 10K to 40K",
70
+ "features": count_class},
71
+
72
+ {"name": "car_cdr_seq_path_40k_tuc",
73
+ "desc": "same task as car_cdr_seq_path_40k, but requires mapping lowercase fillers to their uppercase tokens",
74
+ "features": count_class},
75
+
76
+ {"name": "car_cdr_seq_path_tuc",
77
+ "desc": "same task as car_cdr_seq_path, but requires mapping lowercase fillers to their uppercase tokens",
78
+ "features": count_class},
79
+
80
+ {"name": "active_active_stb",
81
+ "desc": "active sentence translation, from sentence to parenthesized tree form, both directions",
82
+ "features": dir_only},
83
+
84
+ {"name": "active_active_stb_40k",
85
+ "desc": "same task as active_active_stb, but train samples increased from 10K to 40K",
86
+ "features": dir_only},
87
+
88
+ {"name": "active_logical_ttb",
89
+ "desc": "active to logical tree translation, in both directions",
90
+ "features": dir_only},
91
+
92
+ {"name": "active_logical_ttb_40k",
93
+ "desc": "same task as active_logical_ttb, but train samples increased from 10K to 40K",
94
+ "features": dir_only},
95
+
96
+ {"name": "active_passive_ssb",
97
+ "desc": "active to passive sentence translation, in both directions",
98
+ "features": dir_only},
99
+
100
+ {"name": "active_passive_ssb_40k",
101
+ "desc": "same task as active_passive_ssb, but train samples increased from 10K to 40K",
102
+ "features": dir_only},
103
+
104
+ {"name": "active_passive_ttb",
105
+ "desc": "active to passive tree translation, in both directions",
106
+ "features": dir_only},
107
+
108
+ {"name": "active_passive_ttb_40k",
109
+ "desc": "same task as active_passive_ttb, but train samples increased from 10K to 40K",
110
+ "features": dir_only},
111
+
112
+ {"name": "actpass_logical_tt",
113
+ "desc": "mixture of active to logical and passive to logical tree translations, single direction",
114
+ "features": no_extra},
115
+
116
+ {"name": "actpass_logical_tt_40k",
117
+ "desc": "same task as actpass_logical_tt, but train samples increased from 10K to 40K",
118
+ "features": no_extra},
119
+
120
+ {"name": "passive_logical_ttb",
121
+ "desc": "passive to logical tree translation, in both directions",
122
+ "features": dir_only},
123
+
124
+ {"name": "passive_logical_ttb_40k",
125
+ "desc": "same task as passive_logical_ttb, but train samples increased from 10K to 40K",
126
+ "features": dir_only},
127
+
128
+ {"name": "passive_passive_stb",
129
+ "desc": "passive sentence translation, from sentence to parenthesized tree form, both directions",
130
+ "features": dir_only},
131
+
132
+ {"name": "passive_passive_stb_40k",
133
+ "desc": "same task as passive_passive_stb, but train samples increased from 10K to 40K",
134
+ "features": dir_only},
135
+ ]
136
+
137
+ class NcPatConfig(datasets.BuilderConfig):
138
+ """BuilderConfig for NC_PAT dataset."""
139
+
140
+ def __init__(self, features=None, **kwargs):
141
+ # Version history:
142
+ # 0.0.17: Initial version released to HF datasets
143
+ super().__init__(version=datasets.Version("0.0.17"), **kwargs)
144
+
145
+ self.features = features
146
+ self.label_classes = None
147
+ self.data_url = "./{}.zip".format(kwargs["name"])
148
+ self.citation = None
149
+ self.homepage = None
150
+
151
+ def _info(self):
152
+ return datasets.DatasetInfo(
153
+ description=self.description,
154
+ features=self.features,
155
+ # No default supervised_keys (as we have to pass both question
156
+ # and context as input).
157
+ supervised_keys=None,
158
+ homepage=self.homepage,
159
+ citation=self.citation,
160
+ )
161
+
162
+ class NcPat(datasets.GeneratorBasedBuilder):
163
+ BUILDER_CONFIGS = [NcPatConfig(name=c["name"], description=c["desc"], features=c["features"]) for c in configs]
164
+ VERSION = datasets.Version("0.0.17")
165
+
166
+ def _info(self):
167
+ return datasets.DatasetInfo(
168
+ description="The dataset consists of diagnostic/warm-up tasks and core tasks within this dataset." +
169
+ "The core tasks represent the translation of English sentences between the active, passive, and logical forms.",
170
+ supervised_keys=None,
171
+ homepage=None,
172
+ citation=None,
173
+ )
174
+
175
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
176
+ url = self.config.data_url
177
+ dl_dir = dl_manager.download_and_extract(url)
178
+ task = self.config_id
179
+
180
+ splits = [
181
+ SplitGenerator(name=Split.TRAIN, gen_kwargs={"data_file": os.path.join(dl_dir, "train.jsonl")}),
182
+ SplitGenerator(name=Split.VALIDATION, gen_kwargs={"data_file": os.path.join(dl_dir, "dev.jsonl")}),
183
+ SplitGenerator(name=Split.TEST, gen_kwargs={"data_file": os.path.join(dl_dir, "test.jsonl")}),
184
+ ]
185
+
186
+ if not task.startswith("car_cdr_cons") and not task.startswith("car_cdr_rcons"):
187
+ splits += [
188
+ SplitGenerator(name="ood_new", gen_kwargs={"data_file": os.path.join(dl_dir, "ood_new_adj.jsonl")}),
189
+ SplitGenerator(name="ood_long", gen_kwargs={"data_file": os.path.join(dl_dir, "ood_long_adj.jsonl")}),
190
+ ]
191
+
192
+ return splits
193
+
194
+ def _generate_examples(self, data_file):
195
+ with open(data_file, encoding="utf-8") as f:
196
+ for i, line in enumerate(f):
197
+ key = str(i)
198
+ row = json.loads(line)
199
+ yield key, row
200
+
201
+ if __name__ == "__main__":
202
+ # short test
203
+ builder = NcPat.BUILDER_CONFIGS[0]
204
+ print("name: {}, desc: {}".format(builder.name, builder.description))
205
+