| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | """Dataset of task-like and hopefully-not-task-like examples.""" |
| |
|
| |
|
| | import json |
| | import datasets |
| |
|
| | _DESCRIPTION = """\ |
| | This dataset is a collection of prompted examples from P3 and examples from C4. |
| | The C4 examples are labeled "not-task-like" and the P3 examples are |
| | "task-like". Examples were sampled from C4 so that the distribution of example |
| | lengths is similar for C4 and P3 examples. Some datasets from P3 were ignored |
| | because their examples were too long. Some datasets from P3 are held out for |
| | validation. Non-tasky validation data was gathered from C4 without |
| | intentionally matching the length distribution. Tasky data was gathered from |
| | the validation set of certain held-out datasets from P3. |
| | """ |
| |
|
| |
|
| | class TaskyOrNot(datasets.GeneratorBasedBuilder): |
| | """Dataset of tasky and non-tasky text data.""" |
| | _DATA_URLS = { |
| | "p3_train": "p3_examples_train.json", |
| | "p3_dev": "p3_examples_dev.json", |
| | "c4_train": "c4_examples_train.json", |
| | "c4_dev": "c4_examples_dev.json", |
| | } |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig( |
| | name="10xp3_10xc4", |
| | version=datasets.Version("1.0.0", ""), |
| | description=( |
| | "10 tasky examples per prompt/dataset combination; " |
| | "10 non-tasky examples per tasky example"), |
| | ) |
| | ] |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "text": datasets.Value("string"), |
| | "dataset": datasets.Value("string"), |
| | "prompt": datasets.Value("string"), |
| | "label": datasets.features.ClassLabel( |
| | names=["not tasky", "tasky"] |
| | ), |
| | } |
| | ), |
| | supervised_keys=None, |
| | homepage="https://github.com/craffel/tasky-data", |
| | citation="", |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | files = dl_manager.download(self._DATA_URLS) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "tasky_file": files["p3_train"], |
| | "non_tasky_file": files["c4_train"], |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={ |
| | "tasky_file": files["p3_dev"], |
| | "non_tasky_file": files["c4_dev"], |
| | }, |
| | ), |
| | ] |
| |
|
| | def _generate_examples(self, tasky_file, non_tasky_file): |
| | with open(tasky_file) as f: |
| | tasky_examples = json.load(f) |
| | idx = 0 |
| | for dataset, prompts in tasky_examples.items(): |
| | for prompt, examples in prompts.items(): |
| | for text in examples: |
| | yield idx, { |
| | "text": text, |
| | "dataset": dataset, |
| | "prompt": prompt, |
| | "label": 1, |
| | } |
| | idx += 1 |
| |
|
| | with open(non_tasky_file) as f: |
| | non_tasky_examples = json.load(f) |
| | for text in non_tasky_examples: |
| | yield idx, { |
| | "text": text, "dataset": "c4", "prompt": "N/A", "label": 0 |
| | } |
| | idx += 1 |
| |
|