U
    -e0                     @   s,  U d dl Z d dlmZmZmZ d dlZd dlmZ d dlm  m	Z
 eedddZdedfZeeef ed< G d	d
 d
ejZG dd dejZG dd dejZG dd dejZG dd dejZG dd dejZeeeejjdddZdeeeeeeeeee ed
ddZedddZdS )    N)ListOptionalTuplexreturnc                 C   s   dddt d|  d    S )a  The metric defined by ITU-T P.862 is often called 'PESQ score', which is defined
    for narrow-band signals and has a value range of [-0.5, 4.5] exactly. Here, we use the metric
    defined by ITU-T P.862.2, commonly known as 'wide-band PESQ' and will be referred to as "PESQ score".

    Args:
        x (float): Narrow-band PESQ score.

    Returns:
        (float): Wide-band PESQ score.
    g+?g@   g;pΈgׁsF@)mathexp)r    r   b/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/torchaudio/models/squim/objective.pytransform_wb_pesq_range	   s    r         ?g      @	PESQRangec                       sB   e Zd Zd	eeef dd fddZejejdddZ  Z	S )
RangeSigmoid        r   N)	val_ranger   c                    s<   t t|   t|tr$t|dks(t|| _t	 | _
d S )N   )superr   __init__
isinstancetuplelenAssertionErrorr   nnZSigmoidsigmoid)selfr   	__class__r   r   r       s    zRangeSigmoid.__init__r   c                 C   s,   |  || jd | jd   | jd  }|S )Nr   r   )r   r   r   r   outr   r   r   forward&   s    (zRangeSigmoid.forward)r   )
__name__
__module____qualname__r   floatr   torchTensorr"   __classcell__r   r   r   r   r      s   r   c                       s@   e Zd ZdZdeedd fddZejejdd	d
Z  Z	S )EncoderzEncoder module that transform 1D waveform to 2D representations.

    Args:
        feat_dim (int, optional): The feature dimension after Encoder module. (Default: 512)
        win_len (int, optional): kernel size in the Conv1D layer. (Default: 32)
           N)feat_dimwin_lenr   c                    s,   t t|   tjd|||d dd| _d S )Nr   r   F)ZstrideZbias)r   r*   r   r   ZConv1dconv1d)r   r-   r.   r   r   r   r   3   s    zEncoder.__init__r   c                 C   s    |j dd}t| |}|S )a  Apply waveforms to convolutional layer and ReLU layer.

        Args:
            x (torch.Tensor): Input waveforms. Tensor with dimensions `(batch, time)`.

        Returns:
            (torch,Tensor): Feature Tensor with dimensions `(batch, channel, frame)`.
        r   dim)Z	unsqueezeFZrelur/   r    r   r   r   r"   8   s    	zEncoder.forward)r+   r,   )
r#   r$   r%   __doc__intr   r'   r(   r"   r)   r   r   r   r   r*   +   s   r*   c                       s@   e Zd Zd	eeeedd fddZejejdddZ	  Z
S )
	SingleRNNr   N)rnn_type
input_sizehidden_sizedropoutr   c                    sR   t t|   || _|| _|| _tt|||d|ddd| _t	|d || _
d S )Nr   T)r9   batch_firstbidirectionalr   )r   r5   r   r6   r7   r8   getattrr   rnnLinearproj)r   r6   r7   r8   r9   r   r   r   r   G   s    	zSingleRNN.__init__r   c                 C   s   |  |\}}| |}|S N)r=   r?   )r   r   r!   _r   r   r   r"   Y   s    
zSingleRNN.forward)r   )r#   r$   r%   strr4   r&   r   r'   r(   r"   r)   r   r   r   r   r5   F   s   r5   c                
       s   e Zd ZdZdeeeeeeed	d
 fddZeje	ejef dddZ
eje	ejef dddZejeejdddZejejdddZ  ZS )DPRNNa  *Dual-path recurrent neural networks (DPRNN)* :cite:`luo2020dual`.

    Args:
        feat_dim (int, optional): The feature dimension after Encoder module. (Default: 64)
        hidden_dim (int, optional): Hidden dimension in the RNN layer of DPRNN. (Default: 128)
        num_blocks (int, optional): Number of DPRNN layers. (Default: 6)
        rnn_type (str, optional): Type of RNN in DPRNN. Valid options are ["RNN", "LSTM", "GRU"]. (Default: "LSTM")
        d_model (int, optional): The number of expected features in the input. (Default: 256)
        chunk_size (int, optional): Chunk size of input for DPRNN. (Default: 100)
        chunk_stride (int, optional): Stride of chunk input for DPRNN. (Default: 50)
    @         LSTM   d   2   N)r-   
hidden_dim
num_blocksr6   d_model
chunk_sizechunk_strider   c           	         s   t t|   || _tg | _tg | _tg | _tg | _	t
|D ]\}| jt||| | jt||| | jtjd|dd | j	tjd|dd qLtt||dt | _|| _|| _d S )Nr   g:0yE>)Zeps)r   rC   r   rL   r   
ModuleListrow_rnncol_rnnrow_normcol_normrangeappendr5   Z	GroupNorm
SequentialZConv2dPReLUconvrN   rO   )	r   r-   rK   rL   r6   rM   rN   rO   rA   r   r   r   r   m   s"    
zDPRNN.__init__r   c                 C   sF   |j d }| j| j|| j  | j  }t|| j|| j g}||fS )N)shaperN   rO   r2   pad)r   r   seq_lenrestr!   r   r   r   	pad_chunk   s    
zDPRNN.pad_chunkc           	      C   s   |  |\}}|j\}}}|d d d d d | j f  ||d| j}|d d d d | jd f  ||d| j}tj||gdd}|||d| jdd }||fS )NrZ      r0   r   )	r_   r[   rO   
contiguousviewrN   r'   cat	transpose)	r   r   r!   r^   
batch_sizer-   r]   Z	segments1Z	segments2r   r   r   chunking   s    0.zDPRNN.chunking)r   r^   r   c           	      C   s   |j \}}}}|dd ||d| jd }|d d d d d d d | jf  ||dd d d d | jd f }|d d d d d d | jd f  ||dd d d d d | j f }|| }|dkr|d d d d d | f }| }|S )Nr   r`   rZ   r   )r[   rd   ra   rb   rN   rO   )	r   r   r^   re   r1   rA   r!   Zout1Zout2r   r   r   merging   s    "HJzDPRNN.mergingc                 C   s*  |  |\}}|j\}}}}|}t| j| j| j| jD ]\}}	}
}|dddd 	|| |d }||}|	|||ddddd }|	|}|| }|dddd 	|| |d }|
|}|	|||ddddd }||}|| }q6| 
|}| ||}|dd }|S )Nr   r`   r   r   rZ   )rf   r[   ziprQ   rS   rR   rT   Zpermutera   rb   rY   rg   rd   )r   r   r^   re   rA   Zdim1Zdim2r!   rQ   rS   rR   rT   Zrow_inZrow_outZcol_inZcol_outr   r   r   r"      s$    "& & 

zDPRNN.forward)rD   rE   rF   rG   rH   rI   rJ   )r#   r$   r%   r3   r4   rB   r   r'   r(   r   r_   rf   rg   r"   r)   r   r   r   r   rC   `   s,          	rC   c                       s:   e Zd Zd	edd fddZejejdddZ  ZS )
AutoPoolr   N)pool_dimr   c                    s>   t t|   || _tj|d| _| dtt	
d d S )Nr0   alphar   )r   ri   r   rj   r   ZSoftmaxsoftmaxZregister_parameter	Parameterr'   Zones)r   rj   r   r   r   r      s    zAutoPool.__init__r   c                 C   s0   |  t|| j}tjt||| jd}|S )Nr0   )rl   r'   mulrk   sumrj   )r   r   weightr!   r   r   r   r"      s    zAutoPool.forward)r   )	r#   r$   r%   r4   r   r'   r(   r"   r)   r   r   r   r   ri      s   ri   c                       sH   e Zd ZdZejejejd fddZej	e
ej	 dddZ  ZS )SquimObjectivea  Speech Quality and Intelligibility Measures (SQUIM) model that predicts **objective** metric scores
    for speech enhancement (e.g., STOI, PESQ, and SI-SDR).

    Args:
        encoder (torch.nn.Module): Encoder module to transform 1D waveform to 2D feature representation.
        dprnn (torch.nn.Module): DPRNN module to model sequential feature.
        branches (torch.nn.ModuleList): Transformer branches in which each branch estimate one objective metirc score.
    )encoderdprnnbranchesc                    s$   t t|   || _|| _|| _d S r@   )r   rq   r   rr   rs   rt   )r   rr   rs   rt   r   r   r   r      s    zSquimObjective.__init__r   c                 C   sz   |j dkrtd|j  d|tj|d dddd d  }| |}| |}g }| jD ]}|||jdd	 qZ|S )
z
        Args:
            x (torch.Tensor): Input waveforms. Tensor with dimensions `(batch, time)`.

        Returns:
            List(torch.Tensor): List of score Tenosrs. Each Tensor is with dimension `(batch,)`.
        r   z/The input must be a 2D Tensor. Found dimension .r   T)r1   Zkeepdimg      ?   r0   )	ndim
ValueErrorr'   Zmeanrr   rs   rt   rV   Zsqueeze)r   r   r!   Zscoresbranchr   r   r   r"      s    
 


zSquimObjective.forward)r#   r$   r%   r3   r   ModulerP   r   r'   r(   r   r"   r)   r   r   r   r   rq      s   rq   )rM   nheadmetricr   c                 C   s   t j| || d ddd}t }|dkrNt t | | t  t | dt }nV|dkrt t | | t  t | dttd}n"t t | | t  t | d}t |||S )	al  Create branch module after DPRNN model for predicting metric score.

    Args:
        d_model (int): The number of expected features in the input.
        nhead (int): Number of heads in the multi-head attention model.
        metric (str): The metric name to predict.

    Returns:
        (nn.Module): Returned module to predict corresponding metric score.
       r   T)r9   r:   stoir   pesq)r   )r   ZTransformerEncoderLayerri   rW   r>   rX   r   r   )rM   r{   r|   Zlayer1Zlayer2Zlayer3r   r   r   _create_branch   s$    



"r   )
r-   r.   rM   r{   rK   rL   r6   rN   rO   r   c	                 C   sb   |dkr|d }t | |}	t| ||||||}
tt||dt||dt||dg}t|	|
|S )a  Build a custome :class:`torchaudio.prototype.models.SquimObjective` model.

    Args:
        feat_dim (int, optional): The feature dimension after Encoder module.
        win_len (int): Kernel size in the Encoder module.
        d_model (int): The number of expected features in the input.
        nhead (int): Number of heads in the multi-head attention model.
        hidden_dim (int): Hidden dimension in the RNN layer of DPRNN.
        num_blocks (int): Number of DPRNN layers.
        rnn_type (str): Type of RNN in DPRNN. Valid options are ["RNN", "LSTM", "GRU"].
        chunk_size (int): Chunk size of input for DPRNN.
        chunk_stride (int or None, optional): Stride of chunk input for DPRNN.
    Nr   r~   r   Zsisdr)r*   rC   r   rP   r   rq   )r-   r.   rM   r{   rK   rL   r6   rN   rO   rr   rs   rt   r   r   r   squim_objective_model  s    



r   )r   c                
   C   s   t dddddddddS )zWBuild :class:`torchaudio.prototype.models.SquimObjective` model with default arguments.rH   rD   r}   r   rG   G   )r-   r.   rM   r{   rK   rL   r6   rN   )r   r   r   r   r   squim_objective_base;  s    r   )N)r	   typingr   r   r   r'   Ztorch.nnr   Ztorch.nn.functionalZ
functionalr2   r&   r   r   __annotations__rz   r   r*   r5   rC   ri   rq   r4   rB   modulesr   r   r   r   r   r   r   <module>   s<    `() &