o
    hh^                     @   s  d Z ddlZddlZddlmZ ddlZddlmZ dFddZdFddZ	dFd	d
Z
dd Zdd ZdFddZ			dGdedededeej def
ddZ			dGdedededeej def
ddZ					dHdedededededeej defd d!Zded"edefd#d$Zdedefd%d&Zdedefd'd(Zd)d* ZdId,d-Zd.d/ Z		dJded0edeej defd1d2Z		dJded0edeej defd3d4Zd5d6 Z		7	8	dKdeded9ed:edeej f
d;d<Z		7	8	dKdeded9ed:edeej f
d=d>Z	+	dLdeej fd?d@Z	A	dMdeej fdBdCZ dDdE Z!e!eZ"e!eZ#e!eZ$e!eZ%e!eZ&e!eZ'e!eZ(e!eZ)e!eZ*e!eZ+e!e Z,dS )NzHThis file contains utilities for initializing neural network parameters.    N)Optional)Tensorc                 C   <   t   | j|||dW  d    S 1 sw   Y  d S N	generator)torchno_graduniform_tensorabr    r   a/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/nn/init.py_no_grad_uniform_      
$r   c                 C   r   r   )r   r	   normal_r   meanstdr   r   r   r   _no_grad_normal_   r   r   c           	      C   s   dd }||d|  k s||d|  krt jddd t D ||| | }||| | }| jd| d d| d |d |   | |td  | 	| | j
||d	 | W  d    S 1 sfw   Y  d S )
Nc                 S   s   dt | t d  d S )N      ?       @)matherfsqrt)xr   r   r   norm_cdf   s   z(_no_grad_trunc_normal_.<locals>.norm_cdf   zjmean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.
stacklevel   r   r   )minmax)warningswarnr   r	   r
   erfinv_mul_r   r   add_clamp_)	r   r   r   r   r   r   r   lur   r   r   _no_grad_trunc_normal_   s     
 
$r-   c                 C   s6   t   | |W  d    S 1 sw   Y  d S N)r   r	   fill_r   valr   r   r   _no_grad_fill_>   s   
$r2   c                 C   s4   t   |  W  d    S 1 sw   Y  d S r.   )r   r	   zero_r   r   r   r   _no_grad_zero_C   s   
$r5   c                 C   s   g d}| |v s| dkrdS | dkrdS | dkrt dS | dkrM|d	u r(d
}nt|ts2t|ts7t|tr:|}ntd| dt dd|d   S | dkrT	 dS td|  )a  Return the recommended gain value for the given nonlinearity function.

    The values are as follows:

    ================= ====================================================
    nonlinearity      gain
    ================= ====================================================
    Linear / Identity :math:`1`
    Conv{1,2,3}D      :math:`1`
    Sigmoid           :math:`1`
    Tanh              :math:`\frac{5}{3}`
    ReLU              :math:`\sqrt{2}`
    Leaky Relu        :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
    SELU              :math:`\frac{3}{4}`
    ================= ====================================================

    .. warning::
        In order to implement `Self-Normalizing Neural Networks`_ ,
        you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
        This gives the initial weights a variance of ``1 / N``,
        which is necessary to induce a stable fixed point in the forward pass.
        In contrast, the default gain for ``SELU`` sacrifices the normalization
        effect for more stable gradient flow in rectangular layers.

    Args:
        nonlinearity: the non-linear function (`nn.functional` name)
        param: optional parameter for the non-linear function

    Examples:
        >>> gain = nn.init.calculate_gain('leaky_relu', 0.2)  # leaky_relu with negative_slope=0.2

    .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
    )linearconv1dconv2dconv3dconv_transpose1dconv_transpose2dconv_transpose3dsigmoidr"   tanhg?relur   
leaky_reluN{Gz?znegative_slope z not a valid numberr   selug      ?zUnsupported nonlinearity )r   r   
isinstanceboolintfloat
ValueError)nonlinearityparam
linear_fnsnegative_sloper   r   r   calculate_gainH   s.   "	
rL           r   r   r   r   r   returnc                 C   4   t j| rt jjt| f| |||dS t| |||S )a  Fill the input Tensor with values drawn from the uniform distribution.

    :math:`\mathcal{U}(a, b)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the lower bound of the uniform distribution
        b: the upper bound of the uniform distribution
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.uniform_(w)
    r   )r   	overrideshas_torch_function_variadichandle_torch_functionr
   r   r   r   r   r   r
      
   r
   r   r   c                 C   rO   )a  Fill the input Tensor with values drawn from the normal distribution.

    :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.normal_(w)
    r   )r   rP   rQ   rR   r   r   r   r   r   r   r      rS   r          r   c                 C   s   t | |||||dS )a  Fill the input Tensor with values drawn from a truncated normal distribution.

    The values are effectively drawn from the
    normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
    with values outside :math:`[a, b]` redrawn until they are within
    the bounds. The method used for generating the random values works
    best when :math:`a \leq \text{mean} \leq b`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        a: the minimum cutoff value
        b: the maximum cutoff value
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.trunc_normal_(w)
    r   )r-   )r   r   r   r   r   r   r   r   r   trunc_normal_   s   rU   r1   c                 C   s,   t j| rt jjt| f| |dS t| |S )zFill the input Tensor with the value :math:`\text{val}`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        val: the value to fill the tensor with

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.constant_(w, 0.3)
    r0   )r   rP   rQ   rR   	constant_r2   r0   r   r   r   rV      s
   

rV   c                 C   s
   t | dS )zFill the input Tensor with the scalar value `1`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.ones_(w)
    r   )r2   r4   r   r   r   ones_   s   

rW   c                 C   s   t | S )zFill the input Tensor with the scalar value `0`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.zeros_(w)
    )r5   r4   r   r   r   zeros_  s   
rX   c                 C   sX   |   dkr
tdt  tj| j| | jd W d   | S 1 s%w   Y  | S )a=  Fill the 2-dimensional input `Tensor` with the identity matrix.

    Preserves the identity of the inputs in `Linear` layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.eye_(w)
    r   ,Only tensors with 2 dimensions are supported)outrequires_gradN)
ndimensionrG   r   r	   eyeshaper[   r4   r   r   r   eye_  s   

r_   r"   c                 C   s<  |   }|dvrtd|  }|d | dkrtd|d | }t||d }t g |   t|D ]U}t|D ]N}|dkrSd| || | || dd f< q<|dkrnd| || | || dd | dd f< q<d| || | || dd | dd | dd f< q<q6W d	   | S 1 sw   Y  | S )
aF  Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function.

    Preserves the identity of the inputs in `Convolutional`
    layers, where as many input channels are preserved as possible. In case
    of groups>1, each group of channels preserves identity

    Args:
        tensor: a {3, 4, 5}-dimensional `torch.Tensor`
        groups (int, optional): number of groups in the conv layer (default: 1)
    Examples:
        >>> w = torch.empty(3, 16, 5, 5)
        >>> nn.init.dirac_(w)
        >>> w = torch.empty(3, 24, 5, 5)
        >>> nn.init.dirac_(w, 3)
    )         z5Only tensors with 3, 4, or 5 dimensions are supportedr   z!dim 0 must be divisible by groupsr"   r`   r   ra   N)r\   rG   sizer#   r   r	   r3   range)r   groups
dimensionssizesout_chans_per_grpmin_dimgdr   r   r   dirac_$  sL   
"


rl   c                 C   sp   |   }|dk rtd| d}| d}d}|   dkr,| jdd  D ]}||9 }q%|| }|| }||fS )Nr   zNFan in and fan out can not be computed for tensor with fewer than 2 dimensionsr"   r   )dimrG   rc   r^   )r   rf   num_input_fmapsnum_output_fmapsreceptive_field_sizesfan_infan_outr   r   r   _calculate_fan_in_and_fan_outY  s   


rt   gainc                 C   sD   t | \}}|tdt||   }td| }t| | ||S )a  Fill the input `Tensor` with values using a Xavier uniform distribution.

    The method is described in `Understanding the difficulty of training
    deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010).
    The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-a, a)` where

    .. math::
        a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.xavier_uniform_(w.T, ...)``.
    r         @)rt   r   r   rF   r   )r   ru   r   rr   rs   r   r   r   r   r   xavier_uniform_n  s   "rw   c                 C   s4   t | \}}|tdt||   }t| d||S )a  Fill the input `Tensor` with values using a Xavier normal distribution.

    The method is described in `Understanding the difficulty of training deep feedforward
    neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor
    will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_normal_(w)

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.xavier_normal_(w.T, ...)``.
    r   rM   )rt   r   r   rF   r   )r   ru   r   rr   rs   r   r   r   r   xavier_normal_  s   !rx   c                 C   sH   |  }ddg}||vrtd| d| t| \}}|dkr"|S |S )Nrr   rs   zMode z" not supported, please use one of )lowerrG   rt   )r   modevalid_modesrr   rs   r   r   r   _calculate_correct_fan  s   r|   rr   r@   rz   rH   c           	   	   C   s   t j| rt jjt| f| ||||dS d| jv r td | S t| |}t	||}|t
| }t
d| }t   | j| ||dW  d   S 1 sPw   Y  dS )a  Fill the input `Tensor` with values using a Kaiming uniform distribution.

    The method is described in `Delving deep into rectifiers: Surpassing
    human-level performance on ImageNet classification` - He, K. et al. (2015).
    The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-\text{bound}, \text{bound})` where

    .. math::
        \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.kaiming_uniform_(w.T, ...)``.
    )r   r   rz   rH   r   r   ,Initializing zero-element tensors is a no-oprv   r   N)r   rP   rQ   rR   kaiming_uniform_r^   r%   r&   r|   rL   r   r   r	   r
   )	r   r   rz   rH   r   fanru   r   boundr   r   r   r~     s(   +





$r~   c                 C   sv   d| j v rtd | S t| |}t||}|t| }t  | j	d||dW  d   S 1 s4w   Y  dS )a  Fill the input `Tensor` with values using a Kaiming normal distribution.

    The method is described in `Delving deep into rectifiers: Surpassing
    human-level performance on ImageNet classification` - He, K. et al. (2015).
    The resulting tensor will have values sampled from
    :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.kaiming_normal_(w.T, ...)``.
    r   r}   r   N)
r^   r%   r&   r|   rL   r   r   r   r	   r   )r   r   rz   rH   r   r   ru   r   r   r   r   kaiming_normal_	  s   
+



$r   c           
      C   s   |   dk r
td|  dkr| S | d}|  | }| ||fjdd|d}||k r2|  tj	|\}}t
|d}| }	||	9 }||k rP|  t  | || | | W d   | S 1 smw   Y  | S )a  Fill the input `Tensor` with a (semi) orthogonal matrix.

    Described in `Exact solutions to the nonlinear dynamics of learning in deep
    linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
    at least 2 dimensions, and for tensors with more than 2 dimensions the
    trailing dimensions are flattened.

    Args:
        tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
        gain: optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
        >>> w = torch.empty(3, 5)
        >>> nn.init.orthogonal_(w)
    r   z4Only tensors with 2 or more dimensions are supportedr   r"   r   N)r\   rG   numelrc   	new_emptyr   t_r   linalgqrdiagsignr	   view_ascopy_r(   )
r   ru   r   rowscols	flattenedqrrk   phr   r   r   orthogonal_>  s,   


r   rA   c           
      C   s   |   dkr
td| j\}}tt|| }t ) | jd||d t	|D ]}t
|}|d| }	d| |	|f< q)W d   | S 1 sHw   Y  | S )a  Fill the 2D input `Tensor` as a sparse matrix.

    The non-zero elements will be drawn from the normal distribution
    :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
    Hessian-free optimization` - Martens, J. (2010).

    Args:
        tensor: an n-dimensional `torch.Tensor`
        sparsity: The fraction of elements in each column to be set to zero
        std: the standard deviation of the normal distribution used to generate
            the non-zero values
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.sparse_(w, sparsity=0.1)
    r   rY   r   r   N)r\   rG   r^   rE   r   ceilr   r	   r   rd   randperm)
r   sparsityr   r   r   r   	num_zeroscol_idxrow_indiceszero_indicesr   r   r   sparse_q  s   



r   c                    sF    j d d  fdd}d d d d|_|_ |S )Nc                     s,   t jd d dtdd  | i |S )Nz	`nn.init.z)` is now deprecated in favor of `nn.init.z`.r   r    )r%   r&   FutureWarning)argskwargsmethnew_nameold_namer   r   deprecated_init  s   z(_make_deprecate.<locals>.deprecated_initz
    z_(...)

    .. warning::
        This method is now deprecated in favor of :func:`torch.nn.init.z"`.

    See :func:`~torch.nn.init.z` for details.)__name____doc__)r   r   r   r   r   _make_deprecate  s   
r   r.   )rM   r   N)rM   r   rT   r   N)r"   )r   N)r   rr   r@   N)r"   N)rA   N)-r   r   r%   typingr   	_Optionalr   r   r   r   r-   r2   r5   rL   rF   	Generatorr
   r   rU   rV   rW   rX   r_   rl   rt   rw   rx   r|   strr~   r   r   r   r   uniformnormalconstantr]   diracxavier_uniformxavier_normalkaiming_uniformkaiming_normal
orthogonalsparser   r   r   r   <module>   s  


%
H



5
+
'
C
7
6
'