U
    d?/                     @   s   d dl Z d dl mZ ddlmZ d dlmZmZ G dd deZdee ee ee ee eee	e	e	e	edd	d
Z
dd Zee ee ee ee e	e	e	e	eed
ddZee ee ee ee e	e	e	e	eed
ddZdS )    N)Tensor   )	Optimizer)ListOptionalc                       sZ   e Zd ZdZdddee ed fd	d
Z fddZdd Ze	
 dddZ  ZS )Adagrada
  Implements Adagrad algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
                \text{ (objective)}, \: \lambda \text{ (weight decay)},                          \\
            &\hspace{12mm}    \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\
            &\textbf{initialize} :  state\_sum_0 \leftarrow 0                             \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \tilde{\gamma}    \leftarrow \gamma / (1 +(t-1) \eta)                  \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{5mm}state\_sum_t  \leftarrow  state\_sum_{t-1} + g^2_t                      \\
            &\hspace{5mm}\theta_t \leftarrow
                \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon}            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning
    and Stochastic Optimization`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        lr_decay (float, optional): learning rate decay (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-10)
        foreach (bool, optional): whether foreach implementation of optimizer is used (default: None)
        maximize (bool, optional): maximize the params based on the objective, instead of
            minimizing (default: False)

    .. _Adaptive Subgradient Methods for Online Learning and Stochastic
        Optimization: http://jmlr.org/papers/v12/duchi11a.html
    {Gz?r   绽|=NF)maximize)foreachr
   c             	      s   d|kst d|d|ks,t d|d|ksBt d|d|ksXt d|d|ksnt d|t|||||||d}	tt| ||	 | jD ]X}
|
d D ]J}| j| }t	d|d	< t
|rt||n|}tj||tjd
|d< qqd S )Ng        zInvalid learning rate: {}zInvalid lr_decay value: {}zInvalid weight_decay value: {}z+Invalid initial_accumulator_value value: {}zInvalid epsilon value: {})lrlr_decayepsweight_decayinitial_accumulator_valuer   r
   paramsstep)Zmemory_formatsum)
ValueErrorformatdictsuperr   __init__param_groupsstatetorchtensor
is_complexcomplexZ	full_likeZpreserve_format)selfr   r   r   r   r   r   r   r
   defaultsgrouppr   Z
init_value	__class__ 7/tmp/pip-unpacked-wheel-ua33x9lu/torch/optim/adagrad.pyr   3   sJ    	

  zAdagrad.__init__c                    s   t  | | jD ]}|dd  |dd qt| j }t|dkoZt	|d d }|s|D ]}t
t|d |d< qdd S )Nr   r
   Fr   r   )r   __setstate__r   
setdefaultlistr   valueslenr   Z	is_tensorr   float)r   r   r!   Zstate_valuesZstep_is_tensorsr#   r%   r&   r'   f   s    

zAdagrad.__setstate__c                 C   s4   | j D ](}|d D ]}| j| }|d   qqd S )Nr   r   )r   r   Zshare_memory_)r   r!   r"   r   r%   r%   r&   share_memoryt   s    

zAdagrad.share_memoryc                 C   s   d}|dk	r&t   | }W 5 Q R X | jD ]}g }g }g }g }d}|d D ]V}	|	jdk	rL|	jjrfd}||	 ||	j | j|	 }
||
d  ||
d  qLt|||||d |d |d	 |d
 ||d |d d q,|S )zPerforms a single optimization step.

        Args:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        NFr   Tr   r   r   r   r   r   r   r
   )r   r   r   r   has_sparse_gradr   r
   )r   Zenable_gradr   grad	is_sparseappendr   adagrad)r   closureZlossr!   Zparams_with_gradgrads
state_sumsstate_stepsr/   r"   r   r%   r%   r&   r   z   sB    




zAdagrad.step)r   r   r   r   r	   N)N)__name__
__module____qualname____doc__r   boolr   r'   r.   r   Zno_gradr   __classcell__r%   r%   r#   r&   r      s"   -      
3r   )r   r5   r6   r7   r/   r   r   r   r   r   r
   c                C   st   t dd |D std|dkr&d}|r<tj r<td|rPtj sPt}nt}|| |||||||	||
d
 dS )ztFunctional API that performs Adagrad algorithm computation.

    See :class:`~torch.optim.Adagrad` for details.
    c                 S   s   g | ]}t |tjqS r%   )
isinstancer   r   ).0tr%   r%   r&   
<listcomp>   s     zadagrad.<locals>.<listcomp>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNFz6torch.jit.script not supported with foreach optimizersr   r   r   r   r/   r
   )allRuntimeErrorr   ZjitZis_scripting_multi_tensor_adagrad_single_tensor_adagrad)r   r5   r6   r7   r/   r   r   r   r   r   r
   funcr%   r%   r&   r3      s.    r3   c                 C   s8   |   }| dks | dkr*t| S t|||S )Nr   )sizeZnumelr   Z
empty_likeZsparse_coo_tensor)r0   grad_indicesr*   rH   r%   r%   r&   _make_sparse   s    
rJ   )
r   r5   r6   r7   r   r   r   r   r/   r
   c             	   C   sf  t | |||D ]P\}
}}}|d7 }| }|	s4|n| }|dkr^|jrPtd|j|
|d}|d|d |   }|jr| }| }| }| }|	t
|||d ||}|  	|}|
j	t
|||| | d qt|
}|rt|}t|}t|
}
|j||dd | 	|}|
j||| d |rt|
}
t|}qd S )Nr   r   z;weight_decay option is not compatible with sparse gradientsalpha   value)zipitemr1   rD   addZcoalesceZ_indicesZ_valuesrH   Zadd_rJ   powZsparse_maskZsqrt_r   r   view_as_realZaddcmul_sqrtZaddcdiv_view_as_complex)r   r5   r6   r7   r   r   r   r   r/   r
   paramr0   Z	state_sumZstep_tr   ZclrrI   Zgrad_valuesrH   stdZ
std_valuesr   r%   r%   r&   rF      sD    
 




rF   c                   s   t dkrd S |	rt|}|d kr8tdd |D }|rXt||| |||dd
S t|d |dkr|tj||d  fdd|D }
d	d |D }d
d |D }tj|||dd tt||}t	t
||
|}fddt|D }t| fddt|D }d S )Nr   c                 S   s   g | ]
}|j qS r%   )r1   )r?   r0   r%   r%   r&   rA   2  s     z)_multi_tensor_adagrad.<locals>.<listcomp>FrB   r   rK   c                    s"   g | ]}  d |d     qS )r   r%   )r?   r   )r   r   r%   r&   rA   H  s     c                 S   s$   g | ]}t |rt |n|qS r%   r   r   rT   r?   xr%   r%   r&   rA   J  s     c                 S   s$   g | ]}t |rt |n|qS r%   rY   rZ   r%   r%   r&   rA   K  s    rN   c                    s,   g | ]$\}}t  | r$t |n|qS r%   r   r   rV   r?   ir[   r   r%   r&   rA   Q  s   c                    s,   g | ]$\}}t  | r$t |n|qS r%   r\   r]   r_   r%   r&   rA   V  s   )r+   r   Z_foreach_neganyrF   Z_foreach_add_Z_foreach_addcmul_Z_foreach_addZ_foreach_sqrtZ_foreach_divZ_foreach_mul	enumerate)r   r5   r6   r7   r   r   r   r   r/   r
   Z	minus_clrrX   ZtoAddr%   )r   r   r   r&   rE     sJ    


rE   )NN)r   r   Z	optimizerr   typingr   r   r   r<   r,   r3   rJ   rF   rE   r%   r%   r%   r&   <module>   sV    *  48