Source code for torchtext.datasets.squad2
import os
from functools import partial
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json",
}
MD5 = {
"train": "62108c273c268d70893182d5cf8df740",
"dev": "246adae8b7002f8679c027697b0b7cf8",
}
NUM_LINES = {
"train": 130319,
"dev": 11873,
}
DATASET_NAME = "SQuAD2"
def _filepath_fn(root, split, _=None):
return os.path.join(root, os.path.basename(URL[split]))
[docs]@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD2(root: str, split: Union[Tuple[str], str]):
"""SQuAD2 Dataset
.. warning::
using datapipes is still currently subject to a few caveats. if you wish
to use this dataset with shuffling, multi-processing, or distributed
learning, please see :ref:`this note <datapipes_warnings>` for further
instructions.
For additional details refer to https://rajpurkar.github.io/SQuAD-explorer/
Number of lines per split:
- train: 130319
- dev: 11873
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at https://github.com/pytorch/data"
)
from torchdata.datapipes.iter import FileOpener, GDriveReader, HttpReader, IterableWrapper # noqa
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root, split),
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
return cache_dp.parse_json_files().read_squad().shuffle().set_shuffle(False).sharding_filter()