import datasets from datasets import DownloadManager, DatasetInfo import os import csv import json # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2022} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is from Xinjiang University and to do some ASR in low resource. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "http://www.openslr.org/22" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URL = "https://huggingface.co/datasets/sunlixu/Uyghur/" _DL_URLS = { "train": 'train.tar.gz', "test": "test.tar.gz", "cv": "cv.tar.gz", "all": { "train": 'train.tar.gz', "test": "test.tar.gz", "cv": "cv.tar.gz" }, } class UyghurASRConfig(datasets.BuilderConfig): def __init__(self, **kwargs): """BuilderConfig for SQUAD. Args: **kwargs: keyword arguments forwarded to super. """ super(UyghurASRConfig, self).__init__(**kwargs) class UyghurASR(datasets.GeneratorBasedBuilder): DEFAULT_WRITER_BATCH_SIZE = 256 DEFAULT_CONFIG_NAME = "all" BUILDER_CONFIGS = [ UyghurASRConfig(name="train", description="'train' speech."), UyghurASRConfig(name="test", description="'test' speech."), UyghurASRConfig(name="cv", description="'cv' speech."), UyghurASRConfig(name="all", description="all"), ] def _info(self) -> DatasetInfo: return datasets.DatasetInfo( description="Uyghur_20", features=datasets.Features( { "file": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=16_000), "text": datasets.Value("string"), "speaker_id": datasets.Value("string"), "id": datasets.Value("string"), } ), supervised_keys=None, homepage='https://huggingface.co/datasets/sunlixu/Uyghur/', citation=_CITATION, ) def _split_generators(self, dl_manager: DownloadManager): # print(self.config.name) # archive_path = dl_manager.download_and_extract(_DL_URLS[self.config.name]) archive_path = dl_manager.download(_DL_URLS[self.config.name]) local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"local_extracted_archive": archive_path["train"], "files": dl_manager.iter_archive(archive_path["train"]), }, ), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"local_extracted_archive": archive_path['test'], "files": dl_manager.iter_archive(archive_path["test"]), }, ), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"local_extracted_archive": archive_path['cv'], "files": dl_manager.iter_archive(archive_path["cv"]), }, ) ] def _generate_examples(self, files, local_extracted_archive): """Generate examples from a Uyghur archive_path.""" key = 0 audio_data = {} transcripts = [] for path, f in files: if path.endswith(".wav"): id_ = path.split("/")[-1][: -len(".wav")] audio_data[id_] = f.read() elif path.endswith(".txt"): for line in f: if line: line = line.decode("utf-8").strip() id_, transcript = line.split(" ", 1) audio_file = f"{id_}.wav" # speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]] speaker_id = id_.split('_')[0] audio_file = ( os.path.join(local_extracted_archive, audio_file) if local_extracted_archive else audio_file ) transcripts.append( { "id": id_, "speaker_id": speaker_id, # "chapter_id": chapter_id, "file": audio_file, "text": transcript, } ) if audio_data and len(audio_data) == len(transcripts): for transcript in transcripts: audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]} yield key, {"audio": audio, **transcript} key += 1 audio_data = {} transcripts = []