Skip to content

penn_treebank

load_data

Load and return the Penn TreeBank dataset.

Parameters:

Name Type Description Default
root_dir Optional[str]

The path to store the downloaded data. When path is not provided, the data will be saved into fastestimator_data under the user's home directory.

None
seq_length int

Length of data sequence.

64

Returns:

Type Description
Tuple[NumpyDataset, NumpyDataset, NumpyDataset, List[str]]

(train_data, eval_data, test_data, vocab)

Source code in fastestimator/fastestimator/dataset/data/penn_treebank.py
def load_data(root_dir: Optional[str] = None,
              seq_length: int = 64) -> Tuple[NumpyDataset, NumpyDataset, NumpyDataset, List[str]]:
    """Load and return the Penn TreeBank dataset.

    Args:
        root_dir: The path to store the downloaded data. When `path` is not provided, the data will be saved into
            `fastestimator_data` under the user's home directory.
        seq_length: Length of data sequence.

    Returns:
        (train_data, eval_data, test_data, vocab)
    """
    home = str(Path.home())

    if root_dir is None:
        root_dir = os.path.join(home, 'fastestimator_data', 'PennTreeBank')
    else:
        root_dir = os.path.join(os.path.abspath(root_dir), 'PennTreeBank')
    os.makedirs(root_dir, exist_ok=True)

    train_data_path = os.path.join(root_dir, 'ptb.train.txt')
    eval_data_path = os.path.join(root_dir, 'ptb.valid.txt')
    test_data_path = os.path.join(root_dir, 'ptb.test.txt')

    files = [(train_data_path, 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt'),
             (eval_data_path, 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt'),
             (test_data_path, 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt')]

    texts = []
    for data_path, download_link in files:
        if not os.path.exists(data_path):
            # Download
            print("Downloading data: {}".format(data_path))
            wget.download(download_link, data_path, bar=bar_custom)

        text = []
        with open(data_path, 'r') as f:
            for line in f:
                text.extend(line.split() + ['<eos>'])
        texts.append(text)

    # Build dictionary from training data
    vocab = sorted(set(texts[0]))
    word2idx = {u: i for i, u in enumerate(vocab)}

    #convert word to index and split the sequences and discard the last incomplete sequence
    data = [[word2idx[word] for word in text[:-(len(text) % seq_length)]] for text in texts]
    x_train, x_eval, x_test = [np.array(d).reshape(-1, seq_length) for d in data]

    train_data = NumpyDataset(data={"x": x_train})
    eval_data = NumpyDataset(data={"x": x_eval})
    test_data = NumpyDataset(data={"x": x_test})
    return train_data, eval_data, test_data, vocab