U
    3d8m                  
   @   s  d Z ddlZddlZddlZddlZddlmZ ddlZddl	m
Z
 ddlmZmZmZ ddlmZmZ ddlmZ ddlmZ dd	lmZmZmZmZ d
d ZeddddddgjZedddddgjZee  Z!edddZ"edde"edddeddeddd eddeddd edd eddeddd edd gZ#dd  e#D Z$ej%&d!e#d"d# Z'd$d% Z(ej%&d!e$d&d' Z)ej%&d!e#d(d) Z*ej%&d!e#d*d+ Z+ej%&d!e$d,d- Z,ej%&d!e$d.d/ Z-ej%&d!e#d0d1 Z.ej%&d!e#d2d3 Z/ej%&d!e#d4d5 Z0d6d7 Z1ej%&d!e#ej%&d8e!ej2ej3d ej4d9gd:d; Z5d<d= Z6d>d? Z7ej%&d!e#d@dA Z8dBdC Z9dDdE Z:ej%&d!e$dFdG Z;dHdI Z<ej%&d!e#dJdK Z=dLdM Z>dNdO Z?dPdQ Z@ej%&d!e#dRdS ZAdTdU ZBej%&dVdWeCdXieDdYfeejE ejEfdZd[d\eDd]fgd^d_ ZFd`da ZGdbdc ZHej%&dddedfgej%&dgddhdigdjdk ZIej%&dddedfgej%&dgddhdigdldm ZJG dndo doeZKdpdq ZLdS )rz(Testing for Gaussian process regression     N)approx_fprime)GaussianProcessRegressor)RBFConstantKernelWhiteKernel)
DotProductExpSineSquared)MiniSeqKernel)ConvergenceWarning)assert_array_lessassert_almost_equalassert_array_almost_equalassert_allclosec                 C   s   | t |  S )N)npsin)x r   K/tmp/pip-unpacked-wheel-zrfo1fqw/sklearn/gaussian_process/tests/test_gpr.pyf!   s    r         ?g      @g      @g      @g      @g       @       @g      @g      @g      @g      @fixedlength_scalelength_scale_boundsr   )MbP?     @@{Gz?      Y@h㈵>)r!   r    皙?c                 C   s   g | ]}|t kr|qS r   )fixed_kernel).0kernelr   r   r   
<listcomp>4   s      r&   r%   c                 C   sV   t jdkrtd t| dtt}|jtdd\}}t	|t t	t
|d d S )N        #This test may fail on 32 bit Pythonr%   T
return_cov        )sysmaxsizepytestxfailr   fitXypredictr   r   diag)r%   gpry_predy_covr   r   r   test_gpr_interpolation7   s    


r9   c                  C   s   t dd} dddg}tdddg}t| d	||}|j|d
d\}}t| |d
dd  dtt	|   t|| tt
|d d S )Nr   )Zbaseline_similarity_boundsABC         r)   Tr*   Zeval_gradientr,   )r	   r   arrayr   r1   r4   r   ravelZeyelenr5   )r%   r2   r3   r6   r7   r8   r   r   r   !test_gpr_interpolation_structuredD   s    

 
rD   c                 C   sH   t jdkrtd t| dtt}||j	j
|| j
ksDtd S )Nr'   r(   r)   )r-   r.   r/   r0   r   r1   r2   r3   log_marginal_likelihoodkernel_thetaAssertionErrorr%   r6   r   r   r   test_lml_improvingS   s    

rJ   c                 C   s6   t | dtt}||jjt| ks2t	d S )Nr)   )
r   r1   r2   r3   rE   rF   rG   r/   approxrH   rI   r   r   r   test_lml_precomputed_   s    rL   c                 C   sJ   t | dtt}tj|jjjtj	d}|j
|dd t|jj|d d S )Nr)   dtypeF)Zclone_kernel   )r   r1   r2   r3   r   onesrF   rG   shapefloat64rE   r   )r%   r6   Zinput_thetar   r   r   test_lml_without_cloning_kernelh   s    rS   c                 C   sz   t | dtt}||jjd\}}tt	|dk |jj|jj
d d df kB |jj|jj
d d df kB svtd S )Nr)   T-C6?r   r=   )r   r1   r2   r3   rE   rF   rG   r   allabsboundsrH   )r%   r6   lmllml_gradientr   r   r   test_converged_to_local_maximumr   s    rZ   c                 C   s   t | dtt}|jj}t|jjj	j
}d}||t|d d df  df< t|d d df |jj|  t|jj|d d df |  d S )Nr)   绽|=r=   r   )r   r1   r2   r3   rF   rW   r   finforG   rN   maxisfiniter   )r%   r6   rW   Zmax_Ztinyr   r   r   test_solution_inside_bounds   s     r_   c                    sJ   t | dtt  | jd\}}t| j fddd}t||d d S )Nr)   Tc                    s     | dS )NF)rE   )rG   r6   r   r   <lambda>       z#test_lml_gradient.<locals>.<lambda>r[   r?   )r   r1   r2   r3   rE   rG   r   r   )r%   rX   rY   Zlml_gradient_approxr   r`   r   test_lml_gradient   s     
 rc   c                 C   sn   t | d}|jtdd\}}t|dd t|jjdkrXtt|t	| jd d ntt|dd d S )Nr)   Tr*   r      r=   )
r   r4   r2   r   rC   r%   rG   r   r5   exp)r%   r6   y_meanr8   r   r   r   
test_prior   s    
 rg   c                 C   s~   t | dtt}|jtdd\}}|td}t|t	|dd tt
|t
|  t|dt
|  d d S )Nr)   Tr*   i r=   )r   r1   r2   r3   r4   X2sample_yr   r   meanr5   r]   var)r%   r6   rf   r8   Zsamplesr   r   r   test_sample_statistics   s    rl   c                  C   s6   t d} t| d dtt}t|jjdks2t	d S )Nr   r%   	optimizer)
r   r   r1   r2   r3   r   re   rF   rG   rH   rI   r   r   r   test_no_optimizer   s    ro   targetrM   c                 C   sd   t jdkrtd t| dtt}|jt	dd\}}|jt	dd\}}t
tt|| d S )Nr'   r(   r)   Tr*   
return_std)r-   r.   r/   r0   r   r1   r2   r3   r4   rh   r   r   sqrtr5   )r%   rp   r6   rf   r8   y_stdr   r   r   test_predict_cov_vs_std   s    

ru   c                  C   s   t jd} | ddd}|d d df d|d d df   }tddg}t|d||}t |jj	d t |jj	d d kst
d S )	Nr   r=   )2   r>   r"   r   r)   rd   )r   randomRandomStateuniformr   r   r1   re   rF   rG   rH   )rngr2   r3   r%   r6   r   r   r   test_anisotropic_kernel   s    $r|   c            
      C   s   d\} }t jd}|| |d d }t |jddt d| jdd |jd| d }td	d
td	g| dg| d t	ddd }t j
 }tdD ]F}t||dd||}||jj}	|	|t t jj kst|	}qd S )N)   r>   r   r>   r=   Zaxisr?   r"   )Zscalesizer   r   )rT   r    r   r!   )r!         $@)noise_levelnoise_level_boundsrd   )r%   n_restarts_optimizerZrandom_state)r   rx   ry   randnr   sumnormalr<   r   r   infranger   r1   rE   rF   rG   r\   Zfloat32ZepsrH   )
	n_samples
n_featuresr{   r2   r3   r%   Zlast_lmlr   ZgprX   r   r   r   test_random_starts   s8    
 
 r   c                 C   s   t t}t t}t| | }t| d}|t| t| dd}|tt |jtdd\}}|| | }|| }|jtdd\}}	t	|| t	||	 |jtdd\}
}||d  }|jtdd\}
}t	|| dS )a  
    Test normalization of the target values in GP

    Fitting non-normalizing GP on normalized y and fitting normalizing GP
    on unnormalized y should yield identical results. Note that, here,
    'normalized y' refers to y that has been made zero mean and unit
    variance.

    r)   Tr%   normalize_yrq   r*   r>   N)
r   rj   r3   stdr   r1   r2   r4   rh   r   )r%   rf   rt   Zy_normr6   Zgpr_normr7   
y_pred_stdZy_pred_normZy_pred_std_norm_r8   Z
y_cov_normr   r   r   test_y_normalization   s"    




r   c                  C   s   dt  } ddi}tf |}t|dd}|t|  |jtdd\}}tddd	d
dg}tdddddg}t	||ddd t	||ddd dS )a  
    Here we test that, when noramlize_y=True, our GP can produce a
    sensible fit to training data whose variance is significantly
    larger than unity. This test was made in response to issue #15612.

    GP predictions are verified against predictions that were made
    using GPy which, here, is treated as the 'gold standard'. Note that we
    only investigate the RBF kernel here, as that is what was used in the
    GPy implementation.

    The following code can be used to recreate the GPy data:

    --------------------------------------------------------------------------
    import GPy

    kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
    gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
    gpy.optimize()
    y_pred_gpy, y_var_gpy = gpy.predict(X2)
    y_pred_std_gpy = np.sqrt(y_var_gpy)
    --------------------------------------------------------------------------
    
   r   r   Tr   rq   gy(V.@gV],;g6}~CgHW-@gƠKQ@gؼG'@gsr@g{u>5?g	?g.[?gQ?r   )rtolatolg333333?N)
r3   r   r   r1   r2   r4   rh   r   rA   r   )Zy_largeZ
RBF_paramsr%   r6   r7   r   Z
y_pred_gpyZy_pred_std_gpyr   r   r   test_large_variance_y!  s    
r   c                  C   s  t ttd fj} tdd}t|d dd}|tt t|d dd}|t|  |jt	dd\}}|jt	dd\}}|jt	dd\}}	|jt	dd\}}
t
||d d d	f  t
||d d d
f d  t| jd
 D ](}t
||d|f  t
|	|
d|f  q|jt	dd}|jt	dd}|jdks.t|jdks>tt
||d d d	d d f  tD ]V}t|dd}|tt t|dd}|tt ttfj t
|jj|jjd q^d S )Nr>   r   r   F)r%   rn   r   Trq   r*   r   r=   .r   r   )rd   r   )rd   r>   r   r      )r   vstackr3   Tr   r   r1   r2   r4   rh   r   r   rQ   ri   rH   kernelsrF   rG   )Zy_2dr%   r6   Zgpr_2dZ	y_pred_1dZy_std_1dZ	y_pred_2dZy_std_2dr   Zy_cov_1dZy_cov_2drp   Zy_sample_1dZy_sample_2dr   r   r   test_y_multioutputX  s4    
r   c                 C   sD   dd }t | |d}|tt ||jj||jjks@td S )Nc           	      S   s   t jd}|| |dd }}tdD ]\}t |t d|d d df t d|d d df }| |dd}||k r&|| }}q&||fS )Nr   Fr@   rw   r=   )r   rx   ry   r   Z
atleast_1drz   maximumZminimum)	Zobj_funcZinitial_thetarW   r{   Z	theta_optZfunc_minr   rG   r   r   r   r   rn     s     
2z(test_custom_optimizer.<locals>.optimizerrm   )	r   r1   r2   r3   rE   rF   rG   r%   rH   )r%   rn   r6   r   r   r   test_custom_optimizer  s    r   c               	   C   sj   t ddd} t d}t }t|dd}d| }tjt jj	t
|d || | W 5 Q R X d S )N      rv   r,   r%   alphazThe kernel, %s, is not returning a positive definite matrix. Try gradually increasing the 'alpha' parameter of your GaussianProcessRegressor estimator.match)r   ZarangeZreshaperP   r   r   r/   raisesZlinalgZLinAlgErrorreescaper1   )r2   r3   r%   r6   messager   r   r   test_gpr_correct_error_message  s    
r   c           
      C   s   t | dd}t | dd}tttd f}tttd d f}||| tttd d f}tttd d f}||| tdddd d d f }|j|dd	\}}|j|dd	\}}	t	|| t	||	 d S )
Nr   r   r   r=   gV瞯<r   d   Trq   )
r   r   r   r2   Zhstackr3   r1   Zlinspacer4   r   )
r%   Zgpr_equal_inputsZgpr_similar_inputsZX_Zy_X_testZy_pred_equalZy_std_equalZy_pred_similarZy_std_similarr   r   r   test_duplicate_input  s    
r   c                  C   s   t dddtddd } t }|jtdd\}}|jtdd\}}t| d}|jtdd\}}|jtdd\}}t|| t|| d S )	Nr   r   )Zconstant_value_boundsr   Trq   r*   r)   )r<   r   r   r4   r2   r   )Zdefault_kernelZgpr1r   Zy_std1Zy_cov1Zgpr2Zy_std2Zy_cov2r   r   r   test_no_fit_default_predict  s     

r   c            	   	   C   s  t ddgd} t| d}d}tjt|d |tt W 5 Q R X tddgdt dd	gd }t|d}t	j
d
d}t	d |tt t|dkstt|d jtst|d jjd dkstt|d jtst|d jjd dkstW 5 Q R X ttd}t ddgddgd}t|d}t	j
d
d}t	d ||t t|dks^tt|d jtstt|d jjd dkstt|d jtst|d jjd dkstW 5 Q R X d S )Nr!   r   r   r)   zThe optimal value found for dimension 0 of parameter length_scale is close to the specified upper bound 0.001. Increasing the bound and calling fit again may find a better value.r   r   r   g     j@T)recordalwaysr>   r   zThe optimal value found for dimension 0 of parameter k1__noise_level is close to the specified upper bound 0.001. Increasing the bound and calling fit again may find a better value.r=   zThe optimal value found for dimension 0 of parameter k2__length_scale is close to the specified lower bound 1000.0. Decreasing the bound and calling fit again may find a better value.r   r   r   r    r   zThe optimal value found for dimension 0 of parameter length_scale is close to the specified lower bound 10.0. Decreasing the bound and calling fit again may find a better value.zThe optimal value found for dimension 1 of parameter length_scale is close to the specified lower bound 10.0. Decreasing the bound and calling fit again may find a better value.)r   r   r/   Zwarnsr
   r1   r2   r3   r   warningscatch_warningssimplefilterrC   rH   
issubclasscategoryr   argsr   Ztile)	r%   r6   Zwarning_messageZ
kernel_sumZgpr_sumr   ZX_tileZkernel_dimsZgpr_dimsr   r   r   test_warning_bounds  sR    







r   c                  C   s:   dt dd } tdddd}| | }t|dtt d S )Ng     @g      I@r   r   r   )r   ZperiodicityZperiodicity_boundsr)   )r   r   r   r1   r2   r3   )Zk1Zk2r%   r   r   r   %test_bound_check_fixed_hyperparameter#  s      r   c                 C   s4  t jtjd t jd}t| dd}|t| |jt	dksBt
|jtdd\}}t|| tt |ddd	 tjd d
 }}t jd}t j|j|dfdt j|dfd
dgdd}|t| |jtdd\}	}
t|	dddf d
 tt |
d ddd	 |	j||fkst
|
j|||fks0t
dS )a  Check that the std. dev. is affected to 1 when normalizing a constant
    feature.
    Non-regression test for:
    https://github.com/scikit-learn/scikit-learn/issues/18318
    NaN where affected to the target when scaling due to null std. dev. with
    constant target.
    r   rM   Tr   r   r*   r,   g&.>)r   r>   r=   )r   )rQ   Z
fill_valuer~   N).r=   )r   rP   r2   rQ   rR   r   r1   Z_y_train_stdr/   rK   rH   r4   r   r5   rx   ry   Zconcatenater   full)r%   Z
y_constantr6   r7   r8   r   	n_targetsr{   r3   ZY_predZY_covr   r   r   test_constant_target/  s*    	
r   c            	   	   C   s   t ddtddgd tdd } t| ddd	}td
d
gddgdd
gd
dgddgddgg}tdgdgdgdgdgdgg}||| tddgddgddgddgg}|j|dd\}}|j|dd\}}t|t	t
|dd dS )a
  Check the consistency between the returned std. dev. and the covariance.
    Non-regression test for:
    https://github.com/scikit-learn/scikit-learn/issues/19936
    Inconsistencies were observed when the kernel cannot be inverted (or
    numerically stable).
    gT l+A)g-q=g   mBg닄z@gG\@r!   )r   r   N)r%   r   rn   r,   g6~?g6~g6~g6~?g6$}gNխgHx@gR|=gd-ig^ L
@gzgz?Trq   r*   )r   )r<   r   r   r   r   rA   r1   r4   r   rs   Zdiagonal)	r%   r6   X_trainy_trainr   Zpred1r   Zpred2Zcovr   r   r   2test_gpr_consistency_std_cov_non_invertible_kernelX  sH    
 

r   zparams, TypeError, err_msgr   r   zCalpha must be a scalar or an array with same number of entries as yr   r>   )r%   r   z#requires that all bounds are finitec              	   C   s4   t f | }tj||d |tt W 5 Q R X dS )z0Check that expected error are raised during fit.r   N)r   r/   r   r1   r2   r3   )params	TypeErrorerr_msgr6   r   r   r   test_gpr_fit_error  s    
r   c               	   C   sB   t t dtt} d}tjt|d | jdd W 5 Q R X dS )z7Check that we raise the proper error in the LML method.r)   z.Gradient can only be evaluated for theta!=Noner   Tr@   N)	r   r   r1   r2   r3   r/   r   
ValueErrorrE   r6   r   r   r   r   test_gpr_lml_error  s    r   c               	   C   sF   t t dtt} d}tjt|d | jtddd W 5 Q R X dS )z4Check that we raise the proper error during predict.r)   z9At most one of return_std or return_cov can be requested.r   T)r+   rr   N)	r   r   r1   r2   r3   r/   r   RuntimeErrorr4   r   r   r   r   test_gpr_predict_error  s    r   r   TFr   r=   r   c                 C   s   t jd}d\}}}|f}|dk	r.||f }|f}|dk	rN|dkrN||f }|||}|||}	|j| }
t| d}|||
 |j|	dd\}}|j|	dd\}}|j|kst|j|kst|j|f| kstdS )	a  Check the shapes of y_mean, y_std, and y_cov in single-output
    (n_targets=None) and multi-output settings, including the edge case when
    n_targets=1, where the sklearn convention is to squeeze the predictions.

    Non-regression test for:
    https://github.com/scikit-learn/scikit-learn/issues/17394
    https://github.com/scikit-learn/scikit-learn/issues/18065
    https://github.com/scikit-learn/scikit-learn/issues/22174
      )r   	   rO   Nr=   r   Trq   r*   )	r   rx   ry   r   r   r1   r4   rQ   rH   )r   r   r{   r   n_samples_trainZn_samples_testy_train_shapey_test_shaper   r   r   modelr7   rt   r   r8   r   r   r   test_predict_shapes  s$    




r   c                 C   s   t jd}d\}}d}d}|f}|dk	r4||f }|dk	rP|dkrP|||f}n||f}|||}	|||}
|j| }t| d}||	| |j|
|d}|j|kstdS )	a&  Check the shapes of y_samples in single-output (n_targets=0) and
    multi-output settings, including the edge case when n_targets=1, where the
    sklearn convention is to squeeze the predictions.

    Non-regression test for:
    https://github.com/scikit-learn/scikit-learn/issues/22175
    r   )r   r   rO   rd   Nr=   r   r   )	r   rx   ry   r   r   r1   ri   rQ   rH   )r   r   r{   r   r   Zn_samples_X_testZn_samples_y_testr   r   r   r   r   r   Z	y_samplesr   r   r   test_sample_y_shapes  s"    




r   c                   @   s   e Zd ZdZdd ZdS )CustomKernelz
    A custom kernel that has a diag method that returns the first column of the
    input matrix X. This is a helper for the test to check that the input
    matrix X is not mutated.
    c                 C   s   |d d df S )Nr   r   )selfr2   r   r   r   r5     s    zCustomKernel.diagN)__name__
__module____qualname____doc__r5   r   r   r   r   r     s   r   c                  C   s>   t t dtt} tt}| jtdd\}}t	t| dS )z
    Check that the input X is not modified by the predict method of the
    GaussianProcessRegressor when setting return_std=True.

    Non-regression test for:
    https://github.com/scikit-learn/scikit-learn/issues/24340
    r)   Trq   N)
r   r   r1   r2   r3   r   copyrh   r4   r   )r6   ZX2_copyr   r   r   r   #test_gpr_predict_input_not_modified  s    
r   )Mr   r   r-   r   Znumpyr   Zscipy.optimizer   r/   Zsklearn.gaussian_processr   Z sklearn.gaussian_process.kernelsr   r   r<   r   r   r   Z4sklearn.gaussian_process.tests._mini_sequence_kernelr	   Zsklearn.exceptionsr
   Zsklearn.utils._testingr   r   r   r   r   Z
atleast_2dr   r2   rh   rB   r3   r#   r   Znon_fixed_kernelsmarkZparametrizer9   rD   rJ   rL   rS   rZ   r_   rc   rg   rl   ro   rP   rQ   rR   ru   r|   r   r   r   r   r   r   r   r   r   r   r   r   zerosr   r   r   r   r   r   r   r   r   r   r   r   r   <module>   s   





	




"
'7.

M
(-
		&.