U
    %dz                  "   @   s@  d dl mZmZmZ d dlZd dlmZ d dlmZ ddlm	Z	 G dd deZ
G d	d
 d
eZeeeeeeef   eeeeeeeeeeeeeee e
dddZd,eeeeeee e
dddZd-eeeeeee e
dddZd.eeeeeee e
dddZd/eeeeeee e
dddZd0eeeeeee e
dddZd1eeeeeee e
dddZeeeeeeef   eeeeeeeeeeeeeeeeeeeeeeeeeeeeeee ed!dd Zd2eeeeeeeeee eed$d%d&Zd3eeeeeeeeee ed'
d(d)Zd4eeeeeeeeee ed'
d*d+ZdS )5    )ListOptionalTupleN)Tensor)Module   )
componentsc                	       s   e Zd ZdZdeeee d fddZejj	de
ee
 ee eee
 ee
 f dddZde
ee
 ee
ee
 f d	d
dZ  ZS )Wav2Vec2Modela  torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.Module, encoder: torch.nn.Module, aux: Optional[torch.nn.Module] = None)

    Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`].

    Note:
        To build the model, please use one of the factory functions.

    Args:
        feature_extractor (torch.nn.Module):
            Feature extractor that extracts feature vectors from raw audio Tensor.

        encoder (torch.nn.Module):
            Encoder that converts the audio features into the sequence of probability
            distribution (in negative log-likelihood) over labels.

        aux (torch.nn.Module or None, optional):
            Auxiliary module. If provided, the output from encoder is passed to this module.
    Nfeature_extractorencoderauxc                    s    t    || _|| _|| _d S )N)super__init__r   r   r   )selfr   r   r   	__class__ D/tmp/pip-unpacked-wheel-lbdmvq91/torchaudio/models/wav2vec2/model.pyr      s    
zWav2Vec2Model.__init__)	waveformslengths
num_layersreturnc                 C   s(   |  ||\}}| j|||}||fS )a%  Extract feature vectors from raw waveforms

        This returns the list of outputs from the intermediate layers of
        transformer block in encoder.

        Args:
            waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
            lengths (Tensor or None, optional):
                Indicates the valid length of each audio in the batch.
                Shape: `(batch, )`.
                When the ``waveforms`` contains audios with different durations,
                by providing ``lengths`` argument, the model will compute
                the corresponding valid output lengths and apply proper mask in
                transformer attention layer.
                If ``None``, it is assumed that the entire audio waveform
                length is valid.
            num_layers (int or None, optional):
                If given, limit the number of intermediate layers to go through.
                Providing `1` will stop the computation after going through one
                intermediate layers. If not given, the outputs from all the
                intermediate layers are returned.

        Returns:
            (List[Tensor], Optional[Tensor]):
            List of Tensors
                Features from requested layers.
                Each Tensor is of shape: `(batch, time frame, feature dimension)`
            Tensor or None
                If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
                is returned.
                It indicates the valid length in time axis of each feature Tensor.
        )r   r   extract_features)r   r   r   r   xr   r   r   r   )   s    'zWav2Vec2Model.extract_features)r   r   r   c                 C   s8   |  ||\}}| ||}| jdk	r0| |}||fS )a  Compute the sequence of probability distribution over labels.

        Args:
            waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
            lengths (Tensor or None, optional):
                Indicates the valid length of each audio in the batch.
                Shape: `(batch, )`.
                When the ``waveforms`` contains audios with different durations,
                by providing ``lengths`` argument, the model will compute
                the corresponding valid output lengths and apply proper mask in
                transformer attention layer.
                If ``None``, it is assumed that all the audio in ``waveforms``
                have valid length. Default: ``None``.

        Returns:
            (Tensor, Optional[Tensor]):
            Tensor
                The sequences of probability distribution (in logit) over labels.
                Shape: `(batch, frames, num labels)`.
            Tensor or None
                If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
                is returned.
                It indicates the valid length in time axis of the output Tensor.
        Nr
   )r   r   r   r   r   r   r   forwardT   s
    

zWav2Vec2Model.forward)N)NN)N)__name__
__module____qualname____doc__r   r   r   torchZjitexportr   intr   r   r   r   __classcell__r   r   r   r   r	   
   s,      - r	   c                       sV   e Zd ZdZeeeee d fddZd	e	e	ee	 e
e	ee	 f dddZ  ZS )
HuBERTPretrainModelaL  HuBERT pre-train model for training from scratch.

    Note:
        To build the model, please use one of the factory functions in
            `[hubert_pretrain_base, hubert_pretrain_large, hubert_pretrain_xlarge]`.

    Args:
        feature_extractor (torch.nn.Module):
            Feature extractor that extracts feature vectors from raw audio Tensor.

        encoder (torch.nn.Module):
            Encoder that converts the audio features into the sequence of probability
            distribution (in negative log-likelihood) over labels.

        mask_generator (torch.nn.Module):
            Mask generator that generates the mask for masked prediction during the training.

        logit_generator (torch.nn.Module):
            Logit generator that predicts the logits of the masked and unmasked inputs.

        feature_grad_mult (float or None):
            The factor to scale the convolutional feature extraction layer gradients by.
            If ``None``, the gradients of feature extraction layers are not affected.
            The scale factor will not affect the forward pass.
    wav2vec2mask_generatorlogit_generatorfeature_grad_multc                    sR   t    || _|| _|| _|d ksHd|  k r8dk sHn td| || _d S )N              ?zKThe value of `feature_grad_mult` must be ``None`` or between (0, 1). Found )r   r   r&   r'   r(   AssertionErrorr)   )r   r&   r'   r(   r)   r   r   r   r      s    
 
zHuBERTPretrainModel.__init__N)r   labelsaudio_lengthsr   c                 C   s  | j ||\}}| jdk	r6| jdk r6tj|| j}| d }|dk	r^t	||}nd}| j j
||\}}| ||\}}	| j j
j||d}|jd |jd kstd|dk	rt| |	}
t| |
 }n
|	}
|
 }| |||
|\}}|||fS )a  Compute the sequence of probability distribution over labels.

        Args:
            waveforms (Tensor): Audio tensor of dimension `[batch, frames]`.
            labels (Tensor): Label for pre-training. A Tensor of dimension `[batch, frames]`.
            audio_lengths (Tensor or None, optional):
                Indicates the valid length of each audio in the batch.
                Shape: `[batch, ]`.
                When the ``waveforms`` contains audios with different durations,
                by providing ``lengths`` argument, the model will compute
                the corresponding valid output lengths and apply proper mask in
                transformer attention layer.
                If ``None``, it is assumed that all the audio in ``waveforms``
                have valid length. Default: ``None``.

        Returns:
            (Tensor, Tensor, Tensor):
            Tensor
                The masked sequences of probability distribution (in logit).
                Shape: `(masked_frames, num labels)`.
            Tensor
                The unmasked sequence of probability distribution (in logit).
                Shape: `(unmasked_frames, num labels)`.
            Tensor
                The feature mean value for additional penalty loss.
                Shape: `(1,)`.
        Nr+      )attention_maskr   z:The length of label must match that of HuBERT model output)r&   r   r)   r   ZGradMultiplyapplyfloatpowZmeanZ_get_padding_maskr   Z_preprocessr'   Ztransformershaper,   r    logical_andr(   )r   r   r-   r.   r   r   Zfeatures_penZpadding_maskr0   maskZmask_mZmask_uZlogit_mZlogit_ur   r   r   r      s$    !zHuBERTPretrainModel.forward)N)r   r   r   r   r	   r   r   r2   r   r   r   r   r#   r   r   r   r   r$   x   s    r$   )extractor_modeextractor_conv_layer_configextractor_conv_biasencoder_embed_dimencoder_projection_dropoutencoder_pos_conv_kernelencoder_pos_conv_groupsencoder_num_layersencoder_num_headsencoder_attention_dropoutencoder_ff_interm_featuresencoder_ff_interm_dropoutencoder_dropoutencoder_layer_norm_firstencoder_layer_dropaux_num_outr   c                 C   s   |dkr"dgdgd  dgd  }t | ||}t j|d d |||||||	|
||||d	}d}|dk	rxtjj||d
}t|||S )a  wav2vec2_model(extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int]) -> torchaudio.models.Wav2Vec2Model

    Build a custom Wav2Vec2Model

    Note:
        The "feature extractor" below corresponds to
        `ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
        in the original ``fairseq`` implementation.
        This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
        [:footcite:`baevski2020wav2vec`] paper.

        The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
        and this is referred as "Transformer" in the paper.

    Args:
        extractor_mode (str): Operation mode of feature extractor.
            Valid values are ``"group_norm"`` or ``"layer_norm"``.
            If ``"group_norm"``, then a single normalization is applied
            in the first convolution block. Otherwise, all the convolution
            blocks will have layer normalization.

            This option corresponds to ``extractor_mode`` from ``fairseq``.
        extractor_conv_layer_config (list of integer tuples or None):
            Configuration of convolution layers in feature extractor.
            List of convolution configuration,
            i.e. ``[(output_channel, kernel_size, stride), ...]``

            If ``None`` is provided, then the following default value is used.

            .. code-block:: python

               [
                 (512, 10, 5),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 2, 2),
                 (512, 2, 2),
               ]

            This option corresponds to ``conv_feature_layers`` from ``fairseq``.

        extractor_conv_bias (bool):
            Whether to include bias term to each convolution operation.

            This option corresponds to ``conv_bias`` from ``fairseq``.

        encoder_embed_dim (int):
            The dimension of embedding in encoder.

            This option corresponds to ``encoder_embed_dim`` from ``fairseq``.

        encoder_projection_dropout (float):
            The dropout probability applied after the input feature is projected
            to ``encoder_embed_dim``.

            This option corresponds to ``dropout_input`` from ``fairseq``.

        encoder_pos_conv_kernel (int):
            The kernel size of convolutional positional embeddings.

            This option corresponds to ``conv_pos`` from ``fairseq``.

        encoder_pos_conv_groups (int):
            The number of groups of convolutional positional embeddings.

            This option corresponds to ``conv_pos_groups`` from ``fairseq``.

        encoder_num_layers (int):
            The number of self attention layers in transformer block.

            This option corresponds to ``encoder_layers`` from ``fairseq``.

        encoder_num_heads (int):
            The number of heads in self attention layers.

            This option corresponds to ``encoder_attention_heads`` from ``fairseq``.

        encoder_attention_dropout (float):
            The dropout probability applied after softmax in self-attention layer.

            This option corresponds to ``attention_dropout`` from ``fairseq``.

        encoder_ff_interm_features (int):
            The dimension of hidden features in feed forward layer.

            This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.

        encoder_ff_interm_dropout (float):
            The dropout probability applied in feedforward layer.

            This option correspinds to ``activation_dropout`` from ``fairseq``.

        encoder_dropout (float):
            The dropout probability applied at the end of feed forward layer.

            This option corresponds to ``dropout`` from ``fairseq``.

        encoder_layer_norm_first (bool):
            Control the order of layer norm in transformer layer and each encoder layer.
            If True, in transformer layer, layer norm is applied before features are fed
            to encoder layers. In encoder layer, two layer norms are applied before and after
            self attention.
            If False, in transformer layer, layer norm is applied after features are fed
            to encoder layers. In encoder layer, two layer norms are applied after self
            attention, before and after feed forward.

            This option corresponds to ``layer_norm_first`` from ``fairseq``.

        encoder_layer_drop (float):
            Probability to drop each encoder layer during training.

            This option corresponds to ``layerdrop`` from ``fairseq``.

        aux_num_out (int or None):
            When provided, attach an extra linear layer on top of encoder, which can be
            used for fine-tuning.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    N   
      rH      r/      rH   r/   r/   r/   r   in_featuresZ	embed_dimZdropout_inputZpos_conv_kernelZpos_conv_groupsr   Z	num_headsZattention_dropoutZff_interm_featuresZff_interm_dropoutZdropoutZlayer_norm_firstZ
layer_drop)rQ   Zout_features)r   _get_feature_extractor_get_encoderr    nnZLinearr	   )r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   r   r   r   r   r   r   wav2vec2_model   s6       
rU   皙?)r;   r@   rB   rC   rE   rF   r   c                 C   s(   t dddd| dddd|d||d||d	S )
a  wav2vec2_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build Wav2Vec2Model with "base" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    
group_normNF               r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rU   r;   r@   rB   rC   rE   rF   r   r   r   wav2vec2_base  s$    r`   c                 C   s(   t dddd| dddd|d||d||d	S )
a  wav2vec2_large(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build Wav2Vec2Model with "large" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    rW   NF   rY   rZ         r]   r^   r_   r   r   r   wav2vec2_large  s$    rd   r*   c                 C   s(   t dddd| dddd|d||d||d	S )
a  wav2vec2_large_lv60k( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build Wav2Vec2Model with "large lv-60k" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    
layer_normNTra   rY   rZ   rb   rc   r]   r^   r_   r   r   r   wav2vec2_large_lv60k  s$    rf   皙?c                 C   s(   t dddd| dddd|d||d||d	S )
a  hubert_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build HuBERT model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    rW   NFrX   rY   rZ   r[   r\   r]   r^   r_   r   r   r   hubert_base  s$    rh   c                 C   s(   t dddd| dddd|d||d	||d
S )a  hubert_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build HuBERT model with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    re   NFra   rY   rZ   rb   rc   Tr]   r^   r_   r   r   r   hubert_largeQ  s$    ri   c                 C   s(   t dddd| dddd|d||d	||d
S )a  hubert_xlarge(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model

    Build HuBERT model with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_attention_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_dropout (float):
            See :py:func:`wav2vec2_model`.
        encoder_layer_drop (float):
            See :py:func:`wav2vec2_model`.
        aux_num_out (int or None, optional):
            See :py:func:`wav2vec2_model`.

    Returns:
        Wav2Vec2Model:
            The resulting model.
    re   NF   rY   rZ   0      Tr]   r^   r_   r   r   r   hubert_xlarge  s$    rm   )!r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   	mask_probmask_selection
mask_othermask_lengthno_mask_overlapmask_min_spacemask_channel_probmask_channel_selectionmask_channel_othermask_channel_lengthno_mask_channel_overlapmask_channel_min_spaceskip_maskedskip_nomasknum_classes	final_dimr)   r   c            %      C   s   |dkr"dgdgd  dgd  }t | ||} t j|d d |||||||	|
||||d	}!t| |!}"t |||||||||||||}#t |||||}$t|"|#|$|d
S )a#  hubert_pretrain_model(extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, mask_prob: float, mask_selection: str, mask_other: float, mask_length: int, no_mask_overlap: bool, mask_min_space: int, mask_channel_prob: float, mask_channel_selection: str, mask_channel_other: float, mask_channel_length: int, no_mask_channel_overlap: bool, mask_channel_min_space: int, skip_masked: bool, skip_nomask: bool, num_classes: int, final_dim: int) -> torchaudio.models.HuBERTPretrainModel

    Build a custom HuBERTPretrainModel for training from scratch

    Note:
        The "feature extractor" below corresponds to
        `ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
        in the original ``fairseq`` implementation.
        This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
        [:footcite:`baevski2020wav2vec`] paper.

        The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
        and this is referred as "Transformer" in the paper.

    Args:
        extractor_mode (str): Operation mode of feature extractor.
            Valid values are ``"group_norm"`` or ``"layer_norm"``.
            If ``"group_norm"``, then a single normalization is applied
            in the first convolution block. Otherwise, all the convolution
            blocks will have layer normalization.

            This option corresponds to ``extractor_mode`` from ``fairseq``.

        extractor_conv_layer_config (list of integer tuples or None):
            Configuration of convolution layers in feature extractor.
            List of convolution configuration,
            i.e. ``[(output_channel, kernel_size, stride), ...]``

            If ``None`` is provided, then the following default value is used.

            .. code-block:: python

               [
                 (512, 10, 5),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 3, 2),
                 (512, 2, 2),
                 (512, 2, 2),
               ]

            This option corresponds to ``conv_feature_layers`` from ``fairseq``.

        extractor_conv_bias (bool):
            Whether to include bias term to each convolution operation.

            This option corresponds to ``conv_bias`` from ``fairseq``.

        encoder_embed_dim (int):
            The dimension of embedding in encoder.

            This option corresponds to ``encoder_embed_dim`` from ``fairseq``.

        encoder_projection_dropout (float):
            The dropout probability applied after the input feature is projected
            to ``encoder_embed_dim``.

            This option corresponds to ``dropout_input`` from ``fairseq``.

        encoder_pos_conv_kernel (int):
            The kernel size of convolutional positional embeddings.

            This option corresponds to ``conv_pos`` from ``fairseq``.

        encoder_pos_conv_groups (int):
            The number of groups of convolutional positional embeddings.

            This option corresponds to ``conv_pos_groups`` from ``fairseq``.

        encoder_num_layers (int):
            The number of self attention layers in transformer block.

            This option corresponds to ``encoder_layers`` from ``fairseq``.

        encoder_num_heads (int):
            The number of heads in self attention layers.

            This option corresponds to ``encoder_attention_heads`` from ``fairseq``.

        encoder_attention_dropout (float):
            The dropout probability applied after softmax in self-attention layer.

            This option corresponds to ``attention_dropout`` from ``fairseq``.

        encoder_ff_interm_features (int):
            The dimension of hidden features in feed forward layer.

            This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.

        encoder_ff_interm_dropout (float):
            The dropout probability applied in feedforward layer.

            This option correspinds to ``activation_dropout`` from ``fairseq``.

        encoder_dropout (float):
            The dropout probability applied at the end of feed forward layer.

            This option corresponds to ``dropout`` from ``fairseq``.

        encoder_layer_norm_first (bool):
            Control the order of layer norm in transformer layer and each encoder layer.
            If True, in transformer layer, layer norm is applied before features are fed
            to encoder layers. In encoder layer, two layer norms are applied before and after
            self attention.
            If False, in transformer layer, layer norm is applied after features are fed
            to encoder layers. In encoder layer, two layer norms are applied after self
            attention, before and after feed forward.

            This option corresponds to ``layer_norm_first`` from ``fairseq``.

        encoder_layer_drop (float):
            Probability to drop each encoder layer during training.

            This option corresponds to ``layerdrop`` from ``fairseq``.

        mask_prob (float):
            Probability for each token to be chosen as start of the span to be masked. this will be multiplied by
            number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
            However due to overlaps, the actual number will be smaller (unless no_overlap is True).

            This option corresponds to ``mask_prob`` from ``fairseq``.

        mask_selection (str):
            How to choose the mask length. Options: [``static``, ``uniform``, ``normal``, ``poisson``].

            This option corresponds to ``mask_selection`` from ``fairseq``.

        mask_other (float):
            Secondary mask argument (used for more complex distributions).

            This option corresponds to ``mask_other`` from ``fairseq``.

        mask_length (int):
            The lengths of the mask.

            This option corresponds to ``mask_length`` from ``fairseq``.

        no_mask_overlap (bool):
            Whether to allow masks to overlap.

            This option corresponds to ``no_mask_overlap`` from ``fairseq``.

        mask_min_space (int):
            Minimum space between spans (if no overlap is enabled).

            This option corresponds to ``mask_min_space`` from ``fairseq``.

        mask_channel_prob: (float):
            The probability of replacing a feature with 0.

            This option corresponds to ``mask_channel_prob`` from ``fairseq``.

        mask_channel_selection (str):
            How to choose the mask length for channel masking. Options: [``static``, ``uniform``, ``normal``, ``poisson``].

            This option corresponds to ``mask_channel_selection`` from ``fairseq``.

        mask_channel_other (float):
            Secondary mask argument for channel masking(used for more complex distributions).

            This option corresponds to ``mask_channel_other`` from ``fairseq``.

        mask_channel_length (int):
            Minimum space between spans (if no overlap is enabled) for channel masking.

            This option corresponds to ``mask_channel_length`` from ``fairseq``.

        no_mask_channel_overlap (bool):
            Whether to allow channel masks to overlap.

            This option corresponds to ``no_mask_channel_overlap`` from ``fairseq``.

        mask_channel_min_space (int):
            Minimum space between spans for channel masking(if no overlap is enabled).

            This option corresponds to ``mask_channel_min_space`` from ``fairseq``.

        skip_masked (bool):
            If True, skip computing losses over masked frames.

            This option corresponds to ``skip_masked`` from ``fairseq``.

        skip_nomask (bool):
            If True, skip computing losses over unmasked frames.

            This option corresponds to ``skip_nomask`` from ``fairseq``.

        num_classes (int):
            The number of classes in the labels.

        final_dim (int):
            Project final representations and targets to `final_dim`.

            This option corresponds to ``final_dim`` from ``fairseq``.

        feature_grad_mult (float or None):
            The factor to scale the convolutional feature extraction layer gradients by.
            The scale factor will not affect the forward pass.

            This option corresponds to ``feature_grad_mult`` from ``fairseq``.

    Returns:
        HuBERTPretrainModel:
            The resulting model.
    NrG   rK   rM   rN   r/   rO   r   rP   r%   )r   rR   rS   r	   ZMaskGeneratorZLogitGeneratorr$   )%r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r)   r   r   r&   r'   r(   r   r   r   hubert_pretrain_model  sh     r  

r~   皙?rI   d   )r;   r@   rB   rC   rE   rn   rt   rw   r)   r|   r   c
           
   "   C   sH   t dddd| dddd|d||d||d	d
ddd|d	d
|dddd|	d|d S )a  hubert_pretrain_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, mask_prob: float = 0.8, mask_channel_prob: float = 0.0, mask_channel_length: int = 10, feature_grad_mult: Optional[float] = 0.1, num_classes: int = 100) -> torchaudio.models.HuBERTPretrainModel

    Build HuBERTPretrainModel model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_attention_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_layer_drop (float):
            See :py:func:`hubert_pretrain_model`.
        mask_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_length (int):
            See :py:func:`hubert_pretrain_model`.
        feature_grad_mult (float or None):
            See :py:func:`hubert_pretrain_model`.
        num_classes (int, optional):
            See :py:func:`hubert_pretrain_model`.

    Returns:
        HuBERTPretrainModel:
            The resulting model.
    rW   NFrX   rY   rZ   r[   r\   staticr*   rI   r       r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r)   r~   )
r;   r@   rB   rC   rE   rn   rt   rw   r)   r|   r   r   r   hubert_pretrain_base  sD    +r   )
r;   r@   rB   rC   rE   rn   rt   rw   r)   r   c	           	   "   C   sH   t dddd| dddd|d||d	||d
dddd|d
d|dddddd|d S )ac  hubert_pretrain_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, mask_prob: float = 0.8, mask_channel_prob: float = 0.0, mask_channel_length: int = 10, feature_grad_mult: Optional[float] = None) -> torchaudio.models.HuBERTPretrainModel

    Build HuBERTPretrainModel model for pre-training with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_attention_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_layer_drop (float):
            See :py:func:`hubert_pretrain_model`.
        mask_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_length (int):
            See :py:func:`hubert_pretrain_model`.
        feature_grad_mult (float or None):
            See :py:func:`hubert_pretrain_model`.

    Returns:
        HuBERTPretrainModel:
            The resulting model.
    re   NFra   rY   rZ   rb   rc   Tr   r*   rI   r     rX   r   r   	r;   r@   rB   rC   rE   rn   rt   rw   r)   r   r   r   hubert_pretrain_large+  sD    (r   c	           	   "   C   sH   t dddd| dddd|d||d	||d
dddd|d
d|dddddd|d S )aj  hubert_pretrain_xlarge(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, mask_prob: float = 0.8, mask_channel_prob: float = 0.0, mask_channel_length: int = 10, feature_grad_mult: Optional[float] = None) -> torchaudio.models.HuBERTPretrainModel

    Build HuBERTPretrainModel model for pre-training with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]

    Args:
        encoder_projection_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_attention_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_ff_interm_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_dropout (float):
            See :py:func:`hubert_pretrain_model`.
        encoder_layer_drop (float):
            See :py:func:`hubert_pretrain_model`.
        mask_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_prob (float):
            See :py:func:`hubert_pretrain_model`.
        mask_channel_length (int):
            See :py:func:`hubert_pretrain_model`.
        feature_grad_mult (float or None):
            See :py:func:`hubert_pretrain_model`.

    Returns:
        HuBERTPretrainModel:
            The resulting model.
    re   NFrj   rY   rZ   rk   rl   Tr   r*   rI   r   r   ra   r   r   r   r   r   r   hubert_pretrain_xlargew  sD    (r   )rV   rV   rV   rV   rV   N)rV   rV   rV   rV   rV   N)rV   r*   rV   r*   rV   N)rV   rV   r*   rV   rg   N)r*   r*   r*   r*   r*   N)r*   r*   r*   r*   r*   N)
rV   rV   r*   rV   rg   r   r*   rI   rV   r   )	r*   r*   r*   r*   r*   r   r*   rI   N)	r*   r*   r*   r*   r*   r   r*   rI   N)typingr   r   r   r    r   Ztorch.nnr    r   r	   r$   strr"   boolr2   rU   r`   rd   rf   rh   ri   rm   r~   r   r   r   r   r   r   r   <module>   s  ne +      4      4      4      4      4      4  (          P         M         