|
| 1 | +--- |
| 2 | +title: Quickstart |
| 3 | +--- |
| 4 | + |
| 5 | +# Quickstart |
| 6 | + |
| 7 | +This page shows short examples loading datasets and iterating samples. |
| 8 | + |
| 9 | +## LibriBrain Speech (public) |
| 10 | + |
| 11 | +```python |
| 12 | +from pnpl.datasets.libribrain2025 import constants |
| 13 | +from pnpl.datasets import LibriBrainSpeech |
| 14 | + |
| 15 | +# pick one run to keep it quick |
| 16 | +include_run_keys = [constants.RUN_KEYS[0]] # e.g. ('0','1','Sherlock1','1') |
| 17 | + |
| 18 | +ds = LibriBrainSpeech( |
| 19 | + data_path="./data/LibriBrain", |
| 20 | + preprocessing_str="bads+headpos+sss+notch+bp+ds", |
| 21 | + include_run_keys=include_run_keys, |
| 22 | + tmin=0.0, |
| 23 | + tmax=0.2, |
| 24 | + standardize=True, |
| 25 | + include_info=True, |
| 26 | +) |
| 27 | + |
| 28 | +print(len(ds), "samples") |
| 29 | +x, y, info = ds[0] |
| 30 | +print(x.shape, y.shape, info["dataset"]) # (channels,time), (time,), "libribrain2025" |
| 31 | +``` |
| 32 | + |
| 33 | +## LibriBrain Phoneme (public) |
| 34 | + |
| 35 | +```python |
| 36 | +from pnpl.datasets.libribrain2025 import constants |
| 37 | +from pnpl.datasets import LibriBrainPhoneme |
| 38 | + |
| 39 | +include_run_keys = [constants.RUN_KEYS[0]] |
| 40 | + |
| 41 | +ds = LibriBrainPhoneme( |
| 42 | + data_path="./data/LibriBrain", |
| 43 | + preprocessing_str="bads+headpos+sss+notch+bp+ds", |
| 44 | + include_run_keys=include_run_keys, |
| 45 | + tmin=-0.2, |
| 46 | + tmax=0.6, |
| 47 | + standardize=True, |
| 48 | +) |
| 49 | + |
| 50 | +print(len(ds), "samples") |
| 51 | +x, y = ds[0] |
| 52 | +print(x.shape, y.item()) |
| 53 | +``` |
| 54 | + |
| 55 | +```{note} |
| 56 | +The first time you instantiate a dataset with `download=True` (default), required files are downloaded from Hugging Face and cached under `data_path`. |
| 57 | +``` |
| 58 | + |
0 commit comments