Shortcuts

Source code for torchtext.datasets.wikitext103

import os
from functools import partial
from typing import Union, Tuple

from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
    _wrap_split_argument,
    _create_dataset_directory,
)

URL = "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip"

MD5 = "9ddaacaf6af0710eda8c456decff7832"

NUM_LINES = {
    "train": 1801350,
    "valid": 3760,
    "test": 4358,
}

DATASET_NAME = "WikiText103"

_EXTRACTED_FILES = {
    "train": os.path.join("wikitext-103", "wiki.train.tokens"),
    "test": os.path.join("wikitext-103", "wiki.test.tokens"),
    "valid": os.path.join("wikitext-103", "wiki.valid.tokens"),
}


def _filepath_fn(root, _=None):
    return os.path.join(root, os.path.basename(URL))


def _extracted_filepath_fn(root, split, _=None):
    return os.path.join(root, _EXTRACTED_FILES[split])


def _filter_fn(split, x):
    return _EXTRACTED_FILES[split] in x[0]


[docs]@_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "valid", "test")) def WikiText103(root: str, split: Union[Tuple[str], str]): """WikiText103 Dataset .. warning:: using datapipes is still currently subject to a few caveats. if you wish to use this dataset with shuffling, multi-processing, or distributed learning, please see :ref:`this note <datapipes_warnings>` for further instructions. For additional details refer to https://blog.salesforceairesearch.com/the-wikitext-long-term-dependency-language-modeling-dataset/ Number of lines per split: - train: 1801350 - valid: 3760 - test: 4358 Args: root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache') split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `valid`, `test`) :returns: DataPipe that yields text from Wikipedia articles :rtype: str """ if not is_module_available("torchdata"): raise ModuleNotFoundError( "Package `torchdata` not found. Please install following instructions at https://github.com/pytorch/data" ) from torchdata.datapipes.iter import FileOpener, GDriveReader, HttpReader, IterableWrapper # noqa url_dp = IterableWrapper([URL]) # cache data on-disk cache_compressed_dp = url_dp.on_disk_cache( filepath_fn=partial(_filepath_fn, root), hash_dict={_filepath_fn(root): MD5}, hash_type="md5", ) cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True) cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split)) # Extract zip and filter the appropriate split file cache_decompressed_dp = ( FileOpener(cache_decompressed_dp, mode="b").load_from_zip().filter(partial(_filter_fn, split)) ) cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True) data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8") return data_dp.readlines(strip_newline=False, return_path=False).shuffle().set_shuffle(False).sharding_filter()

Docs

Access comprehensive developer documentation for PyTorch

View Docs

Tutorials

Get in-depth tutorials for beginners and advanced developers

View Tutorials

Resources

Find development resources and get your questions answered

View Resources