| import json | |
| import datasets | |
| from datasets.tasks import QuestionAnsweringExtractive | |
| _DESCRIPTION = """\ | |
| combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers | |
| to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but | |
| also determine when no answer is supported by the paragraph and abstain from answering. | |
| """ | |
| _URLS = { | |
| "train": "https://huggingface.co/datasets/TurkuNLP/squad_v2_fi/resolve/main/train-v2.0.json.gz", | |
| "dev": "https://huggingface.co/datasets/TurkuNLP/squad_v2_fi/resolve/main/dev-v2.0.json.gz", | |
| } | |
| class SquadV2Config(datasets.BuilderConfig): | |
| """BuilderConfig for SQUAD.""" | |
| def __init__(self, **kwargs): | |
| """BuilderConfig for SQUADV2. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(SquadV2Config, self).__init__(**kwargs) | |
| class SquadV2(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [ | |
| SquadV2Config(name="squad_v2_fi", version=datasets.Version( | |
| "1.0.0"), description="Finnish SQuAD v2.0"), | |
| ] | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "title": datasets.Value("string"), | |
| "context": datasets.Value("string"), | |
| "question": datasets.Value("string"), | |
| "answers": datasets.features.Sequence( | |
| { | |
| "text": datasets.Value("string"), | |
| "answer_start": datasets.Value("int32"), | |
| } | |
| ), | |
| } | |
| ), | |
| supervised_keys=None, | |
| homepage="https://turkunlp.org/", | |
| task_templates=[ | |
| QuestionAnsweringExtractive( | |
| question_column="question", context_column="context", answers_column="answers" | |
| ) | |
| ], | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| urls_to_download = _URLS | |
| downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ | |
| "filepath": downloaded_files["train"]}), | |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={ | |
| "filepath": downloaded_files["dev"]}), | |
| ] | |
| def _generate_examples(self, filepath): | |
| """Yields examples.""" | |
| with open(filepath, encoding="utf-8") as f: | |
| squad = json.load(f) | |
| for example in squad["data"]: | |
| title = example.get("title", "") | |
| for paragraph in example["paragraphs"]: | |
| context = paragraph["context"] | |
| for qa in paragraph["qas"]: | |
| question = qa["question"] | |
| id_ = qa["id"] | |
| answer_starts = [answer["answer_start"] | |
| for answer in qa["answers"]] | |
| answers = [answer["text"].strip( | |
| ' .,-:') for answer in qa["answers"]] | |
| yield id_, { | |
| "title": title, | |
| "context": context, | |
| "question": question, | |
| "id": id_, | |
| "answers": { | |
| "answer_start": answer_starts, | |
| "text": answers, | |
| }, | |
| } | |