U
    %d                     @   s  d dl mZ d dlmZmZmZ d dlZd dlmZ d dl	m
Z
mZ ddlmZ g ZeG dd	 d	ZeG d
d deZedddddddddgdddddddddddddddddZde_edddddddddgddddddddddddddde dd Zd!e_ed"ddddddddgddddddddddddddde dd#Zd$e_ed%ddddddddgddddddddddddddde dd#Zd&e_ed'ddddddddgdd(dddd)ddd*dddd+dddd,Zd-e_ed.ddddddddgdd(dddd)ddd*dddd+dde dd#Zd/e_ed0ddddddddgdd(dddd)ddd*dddd+dde dd#Zd1e_ed2ddddddddgdd(dddd)ddd*dddd+dde dd#Zd3e_ed4d5dddddddgd6d(dddd)ddd*ddd6ddddd,Zd7e_ed8d5dddddddgd6d(dddd)ddd*ddd6ddde dd#Zd9e_ed:d5dddddddgd6d(dddd)ddd*ddd6ddde dd#Zd;e_ed<d5dddddddgd6d(dddd)ddd*ddd6ddde dd#Zd=e_ed>d5dddddddgd6d(dddd)ddd*ddd6ddddd,Zd?e_ed@ddddddddgddddddddddddddddd,Z dAe _edBd5dddddddgdd(dddd)ddd*ddd6ddddd,Z!dCe!_edDd5dddddddgddEddddFdddGddd6ddddd,Z"dHe"_edId5dddddddgdd(dddd)ddd*ddd6ddde dd#Z#dJe#_edKd5dddddddgddEddddFdddGddd6ddde dd#Z$dLe$_edMddddddddgddddddddddddddNde% ddOdPZ&dQe&_edRddddddddgddddddddddddddSde' ddTdPZ(dUe(_edVddddddddgddddddddddddddWde) ddOdPZ*dXe*_edYddddddddgddddddddddddddZde+ dd#Z,d[e,_ed\ddddddddgdddddddddddddd]de- dd^dPZ.d_e._dS )`    )	dataclass)AnyDictTupleN)load_state_dict_from_url)wav2vec2_modelWav2Vec2Model   )utilsc                   @   s`   e Zd ZU dZeed< eeef ed< eed< e	edddZ
dd	 Zd
dedddZd
S )Wav2Vec2Bundleu  torchaudio.pipelines.Wav2Vec2Bundle()

    Data class that bundles associated information to use pretrained Wav2Vec2Model.

    This class provides interfaces for instantiating the pretrained model along with
    the information necessary to retrieve pretrained weights and additional data
    to be used with the model.

    Torchaudio library instantiates objects of this class, each of which represents
    a different pretrained model. Client code should access pretrained models via these
    instances.

    Please see below for the usage and the available values.

    Example - Feature Extraction
        >>> import torchaudio
        >>>
        >>> bundle = torchaudio.pipelines.HUBERT_BASE
        >>>
        >>> # Build the model and load pretrained weight.
        >>> model = bundle.get_model()
        Downloading:
        100%|███████████████████████████████| 360M/360M [00:06<00:00, 60.6MB/s]
        >>>
        >>> # Resample audio to the expected sampling rate
        >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
        >>>
        >>> # Extract acoustic features
        >>> features, _ = model.extract_features(waveform)
    _path_params_sample_rate)returnc                 C   s   | j S )zUSample rate of the audio that the model is trained on.

        :type: float
        r   )self r   G/tmp/pip-unpacked-wheel-lbdmvq91/torchaudio/pipelines/_wav2vec2/impl.pysample_rate3   s    zWav2Vec2Bundle.sample_ratec                 C   s,   d| j  }|d kri n|}t|f|}|S )Nz/https://download.pytorch.org/torchaudio/models/)r   r   )r   	dl_kwargsurl
state_dictr   r   r   _get_state_dict;   s    zWav2Vec2Bundle._get_state_dictN)r   c                C   s(   t f | j}|| | |  |S )a  get_model(self, *, dl_kwargs=None) -> torchaudio.models.Wav2Vec2Model

        Construct the model and load the pretrained weight.

        The weight file is downloaded from the internet and cached with
        :func:`torch.hub.load_state_dict_from_url`

        Args:
            dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`.
        )r   r   Zload_state_dictr   eval)r   r   modelr   r   r   	get_modelA   s    zWav2Vec2Bundle.get_model)__name__
__module____qualname____doc__str__annotations__r   r   floatpropertyr   r   r   r   r   r   r   r   r      s   
r   c                       sX   e Zd ZU dZee ed< dZee ed< ddeee ddd	Z	 fd
dZ
  ZS )Wav2Vec2ASRBundleu  torchaudio.pipelines.Wav2Vec2ASRBundle()

    Data class that bundles associated information to use pretrained Wav2Vec2Model.

    This class provides interfaces for instantiating the pretrained model along with
    the information necessary to retrieve pretrained weights and additional data
    to be used with the model.

    Torchaudio library instantiates objects of this class, each of which represents
    a different pretrained model. Client code should access pretrained models via these
    instances.

    Please see below for the usage and the available values.

    Example - ASR
        >>> import torchaudio
        >>>
        >>> bundle = torchaudio.pipelines.HUBERT_ASR_LARGE
        >>>
        >>> # Build the model and load pretrained weight.
        >>> model = bundle.get_model()
        Downloading:
        100%|███████████████████████████████| 1.18G/1.18G [00:17<00:00, 73.8MB/s]
        >>>
        >>> # Check the corresponding labels of the output.
        >>> labels = bundle.get_labels()
        >>> print(labels)
        ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z')
        >>>
        >>> # Resample audio to the expected sampling rate
        >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
        >>>
        >>> # Infer the label probability distribution
        >>> emissions, _ = model(waveform)
        >>>
        >>> # Pass emission to decoder
        >>> # `ctc_decode` is for illustration purpose only
        >>> transcripts = ctc_decode(emissions, labels)
    _labelsr	         _remove_aux_axis-)blank)r+   r   c                C   s   |f| j S )a  The output class labels (only applicable to fine-tuned bundles)

        The first is blank token, and it is customizable.

        Args:
            blank (str, optional): Blank token. (default: ``'-'``)

        Returns:
            Tuple[str]:
            For models fine-tuned on ASR, returns the tuple of strings representing
            the output class labels.

        Example
            >>> import torchaudio
            >>> torchaudio.models.HUBERT_ASR_LARGE.get_labels()
            ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z')
        )r%   )r   r+   r   r   r   
get_labels   s    zWav2Vec2ASRBundle.get_labelsc                    sP   t  |} jrLdD ]4}|| t fddtdD ||< q|S )N)z
aux.weightzaux.biasc                    s   g | ]}| j kr| qS r   )r)   ).0ir   tr   r   
<listcomp>   s     
 z5Wav2Vec2ASRBundle._get_state_dict.<locals>.<listcomp>r   )superr   r)   torchstackrangesize)r   r   r   key	__class__r/   r   r      s    *z!Wav2Vec2ASRBundle._get_state_dict)r   r   r   r   r   r    r!   r)   intr,   r   __classcell__r   r   r8   r   r$   S   s   
(r$   zwav2vec2_fairseq_base_ls960.pthZ
group_norm)   
      )r<   r(   r'   )r<   r'   r'   Fi   g?         i   g        g?)Zextractor_modeZextractor_conv_layer_configZextractor_conv_biasZencoder_embed_dimZencoder_projection_dropoutZencoder_pos_conv_kernelZencoder_pos_conv_groupsZencoder_num_layersZencoder_num_headsZencoder_attention_dropoutZencoder_ff_interm_featuresZencoder_ff_interm_dropoutZencoder_dropoutZencoder_layer_norm_firstZencoder_layer_dropZaux_num_outi>  )r   r   r   a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
z)wav2vec2_fairseq_base_ls960_asr_ll10m.pth   )r   r   r%   r   a;  Build "base" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset
[:footcite:`librilight`] ("train-10min" subset).

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z)wav2vec2_fairseq_base_ls960_asr_ls100.pth)r%   r   a  Build "base" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 100 hours of transcribed audio from "train-clean-100" subset.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z)wav2vec2_fairseq_base_ls960_asr_ls960.ptha  Build "base" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on the same audio with the corresponding transcripts.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z wav2vec2_fairseq_large_ls960.pthi      i   g?r   a  Build "large" wav2vec2 model.

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
z*wav2vec2_fairseq_large_ls960_asr_ll10m.ptha<  Build "large" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset
[:footcite:`librilight`] ("train-10min" subset).

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z*wav2vec2_fairseq_large_ls960_asr_ls100.ptha!  Build "large" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 100 hours of transcribed audio from
the same dataset ("train-clean-100" subset).

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z*wav2vec2_fairseq_large_ls960_asr_ls960.ptha  Build "large" wav2vec2 model with an extra linear module

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on the same audio with the corresponding transcripts.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z wav2vec2_fairseq_large_lv60k.pthZ
layer_normTac  Build "large-lv60k" wav2vec2 model.

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
z*wav2vec2_fairseq_large_lv60k_asr_ll10m.ptha  Build "large-lv60k" wav2vec2 model with an extra linear module

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 10 minutes of transcribed audio from
the same dataset ("train-10min" subset).

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z*wav2vec2_fairseq_large_lv60k_asr_ls100.ptha  Build "large-lv60k" wav2vec2 model with an extra linear module

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 100 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`] ("train-clean-100" subset).

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z*wav2vec2_fairseq_large_lv60k_asr_ls960.ptha-  Build "large-lv60k" wav2vec2 model with an extra linear module

Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light*
[:footcite:`librilight`] dataset, and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").

Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z!wav2vec2_fairseq_large_xlsr53.ptha   wav2vec 2.0 model with "Base" configuration.

Trained on 56,000 hours of unlabeled audio from multiple datasets (
*Multilingual LibriSpeech* [:footcite:`Pratap_2020`],
*CommonVoice* [:footcite:`ardila2020common`] and
*BABEL* [:footcite:`Gales2014SpeechRA`]).
Not fine-tuned.

Originally published by the authors of
*Unsupervised Cross-lingual Representation Learning for Speech Recognition*
[:footcite:`conneau2020unsupervised`] under MIT License and redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
zhubert_fairseq_base_ls960.ptha  HuBERT model with "Base" configuration.

Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.

Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
zhubert_fairseq_large_ll60k.pthap  HuBERT model with "Large" configuration.

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.

Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
zhubert_fairseq_xlarge_ll60k.pthi   0   i   av  HuBERT model with "Extra Large" configuration.

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.

Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
z(hubert_fairseq_large_ll60k_asr_ls960.ptha  HuBERT model with "Large" configuration.

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").

Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z)hubert_fairseq_xlarge_ll60k_asr_ls960.ptha%  HuBERT model with "Extra Large" configuration.

Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").

Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z%wav2vec2_voxpopuli_base_10k_asr_de.pt    )r	   r'   r(   #   )r%   r   r)   a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 282 hours of transcribed audio from "de" subset.

Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z%wav2vec2_voxpopuli_base_10k_asr_en.pt   )r	   r'   r(      a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).

Fine-tuned for ASR on 543 hours of transcribed audio from "en" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z%wav2vec2_voxpopuli_base_10k_asr_es.ptrF   a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 166 hours of transcribed audio from "es" subset.

Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z%wav2vec2_voxpopuli_base_10k_asr_fr.pt+   a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 211 hours of transcribed audio from "fr" subset.

Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
z%wav2vec2_voxpopuli_base_10k_asr_it.pt%   r&   a  wav2vec 2.0 model with "Base" configuration.

Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 91 hours of transcribed audio from "it" subset.

Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]

Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
)/Zdataclassesr   typingr   r   r   r3   Ztorchaudio._internalr   Ztorchaudio.modelsr   r    r
   __all__r   r$   ZWAV2VEC2_BASEr   Z_get_en_labelsZWAV2VEC2_ASR_BASE_10MZWAV2VEC2_ASR_BASE_100HZWAV2VEC2_ASR_BASE_960HZWAV2VEC2_LARGEZWAV2VEC2_ASR_LARGE_10MZWAV2VEC2_ASR_LARGE_100HZWAV2VEC2_ASR_LARGE_960HZWAV2VEC2_LARGE_LV60KZWAV2VEC2_ASR_LARGE_LV60K_10MZWAV2VEC2_ASR_LARGE_LV60K_100HZWAV2VEC2_ASR_LARGE_LV60K_960HZWAV2VEC2_XLSR53ZHUBERT_BASEZHUBERT_LARGEZHUBERT_XLARGEZHUBERT_ASR_LARGEZHUBERT_ASR_XLARGEZ_get_de_labelsZVOXPOPULI_ASR_BASE_10K_DEZ_get_vp_en_labelsZVOXPOPULI_ASR_BASE_10K_ENZ_get_es_labelsZVOXPOPULI_ASR_BASE_10K_ESZ_get_fr_labelsZVOXPOPULI_ASR_BASE_10K_FRZ_get_it_labelsZVOXPOPULI_ASR_BASE_10K_ITr   r   r   r   <module>   sr  DX			 																 	 	 		 