U
    2dp                     @   s  d Z ddlmZ ddlZddlmZ ddlmZ ddlm	Z	 ddl
ZddlmZ ddlmZ dd	lmZ d
dlmZmZmZmZmZmZ d
dlmZmZ d
dlmZmZmZ d
dlm Z  d
dl!m"Z"m#Z# d
dl$m%Z%m&Z&m'Z' d
dl(m)Z)m*Z*m+Z+m,Z,m-Z- d
dlm.Z. d
dl/m0Z0 d
dl1m2Z2 d
dl3m4Z4m5Z5 d
dl6m7Z7 d
dl8m9Z9 G dd deeeZ:d0ddZ;dd Z<dd Z=d1d d!Z>G d"d# d#Z?d2d$d%Z@G d&d' d'eeZAdd(d)d*d+d,d-ZBG d.d/ d/ZCdS )3z'Calibration of predicted probabilities.    )IntegralN)	signature)partial)log)expit)xlogy)	fmin_bfgs   )BaseEstimatorClassifierMixinRegressorMixincloneMetaEstimatorMixinis_classifier)label_binarizeLabelEncoder)column_or_1d	indexablecheck_matplotlib_support)check_classification_targets)delayedParallel)
StrOptions
HasMethodsHidden)_check_fit_params_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fitted)_safe_indexing)IsotonicRegression)	LinearSVC)check_cvcross_val_predict)_check_pos_label_consistency)_get_responsec                   @   s   e Zd ZU dZeddgeddgdgeddhgded	hgedgd
geddgeddgdeedhgdZe	e
d< dddddddddZdddZdd Zdd Zdd ZdS )CalibratedClassifierCVa7  Probability calibration with isotonic regression or logistic regression.

    This class uses cross-validation to both estimate the parameters of a
    classifier and subsequently calibrate a classifier. With default
    `ensemble=True`, for each cv split it
    fits a copy of the base estimator to the training subset, and calibrates it
    using the testing subset. For prediction, predicted probabilities are
    averaged across these individual calibrated classifiers. When
    `ensemble=False`, cross-validation is used to obtain unbiased predictions,
    via :func:`~sklearn.model_selection.cross_val_predict`, which are then
    used for calibration. For prediction, the base estimator, trained using all
    the data, is used. This is the method implemented when `probabilities=True`
    for :mod:`sklearn.svm` estimators.

    Already fitted classifiers can be calibrated via the parameter
    `cv="prefit"`. In this case, no cross-validation is used and all provided
    data is used for calibration. The user has to take care manually that data
    for model fitting and calibration are disjoint.

    The calibration is based on the :term:`decision_function` method of the
    `estimator` if it exists, else on :term:`predict_proba`.

    Read more in the :ref:`User Guide <calibration>`.

    Parameters
    ----------
    estimator : estimator instance, default=None
        The classifier whose output need to be calibrated to provide more
        accurate `predict_proba` outputs. The default classifier is
        a :class:`~sklearn.svm.LinearSVC`.

        .. versionadded:: 1.2

    method : {'sigmoid', 'isotonic'}, default='sigmoid'
        The method to use for calibration. Can be 'sigmoid' which
        corresponds to Platt's method (i.e. a logistic regression model) or
        'isotonic' which is a non-parametric approach. It is not advised to
        use isotonic calibration with too few calibration samples
        ``(<<1000)`` since it tends to overfit.

    cv : int, cross-validation generator, iterable or "prefit",             default=None
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:

        - None, to use the default 5-fold cross-validation,
        - integer, to specify the number of folds.
        - :term:`CV splitter`,
        - An iterable yielding (train, test) splits as arrays of indices.

        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
        neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
        is used.

        Refer to the :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.

        If "prefit" is passed, it is assumed that `estimator` has been
        fitted already and all data is used for calibration.

        .. versionchanged:: 0.22
            ``cv`` default value if None changed from 3-fold to 5-fold.

    n_jobs : int, default=None
        Number of jobs to run in parallel.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors.

        Base estimator clones are fitted in parallel across cross-validation
        iterations. Therefore parallelism happens only when `cv != "prefit"`.

        See :term:`Glossary <n_jobs>` for more details.

        .. versionadded:: 0.24

    ensemble : bool, default=True
        Determines how the calibrator is fitted when `cv` is not `'prefit'`.
        Ignored if `cv='prefit'`.

        If `True`, the `estimator` is fitted using training data, and
        calibrated using testing data, for each `cv` fold. The final estimator
        is an ensemble of `n_cv` fitted classifier and calibrator pairs, where
        `n_cv` is the number of cross-validation folds. The output is the
        average predicted probabilities of all pairs.

        If `False`, `cv` is used to compute unbiased predictions, via
        :func:`~sklearn.model_selection.cross_val_predict`, which are then
        used for calibration. At prediction time, the classifier used is the
        `estimator` trained on all the data.
        Note that this method is also internally implemented  in
        :mod:`sklearn.svm` estimators with the `probabilities=True` parameter.

        .. versionadded:: 0.24

    base_estimator : estimator instance
        This parameter is deprecated. Use `estimator` instead.

        .. deprecated:: 1.2
           The parameter `base_estimator` is deprecated in 1.2 and will be
           removed in 1.4. Use `estimator` instead.

    Attributes
    ----------
    classes_ : ndarray of shape (n_classes,)
        The class labels.

    n_features_in_ : int
        Number of features seen during :term:`fit`. Only defined if the
        underlying estimator exposes such an attribute when fit.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Only defined if the
        underlying estimator exposes such an attribute when fit.

        .. versionadded:: 1.0

    calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"`             or `ensemble=False`)
        The list of classifier and calibrator pairs.

        - When `cv="prefit"`, the fitted `estimator` and fitted
          calibrator.
        - When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted
          `estimator` and calibrator pairs. `n_cv` is the number of
          cross-validation folds.
        - When `cv` is not "prefit" and `ensemble=False`, the `estimator`,
          fitted on all the data, and fitted calibrator.

        .. versionchanged:: 0.24
            Single calibrated classifier case when `ensemble=False`.

    See Also
    --------
    calibration_curve : Compute true and predicted probabilities
        for a calibration curve.

    References
    ----------
    .. [1] Obtaining calibrated probability estimates from decision trees
           and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001

    .. [2] Transforming Classifier Scores into Accurate Multiclass
           Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)

    .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
           Regularized Likelihood Methods, J. Platt, (1999)

    .. [4] Predicting Good Probabilities with Supervised Learning,
           A. Niculescu-Mizil & R. Caruana, ICML 2005

    Examples
    --------
    >>> from sklearn.datasets import make_classification
    >>> from sklearn.naive_bayes import GaussianNB
    >>> from sklearn.calibration import CalibratedClassifierCV
    >>> X, y = make_classification(n_samples=100, n_features=2,
    ...                            n_redundant=0, random_state=42)
    >>> base_clf = GaussianNB()
    >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3)
    >>> calibrated_clf.fit(X, y)
    CalibratedClassifierCV(...)
    >>> len(calibrated_clf.calibrated_classifiers_)
    3
    >>> calibrated_clf.predict_proba(X)[:5, :]
    array([[0.110..., 0.889...],
           [0.072..., 0.927...],
           [0.928..., 0.071...],
           [0.928..., 0.071...],
           [0.071..., 0.928...]])
    >>> from sklearn.model_selection import train_test_split
    >>> X, y = make_classification(n_samples=100, n_features=2,
    ...                            n_redundant=0, random_state=42)
    >>> X_train, X_calib, y_train, y_calib = train_test_split(
    ...        X, y, random_state=42
    ... )
    >>> base_clf = GaussianNB()
    >>> base_clf.fit(X_train, y_train)
    GaussianNB()
    >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv="prefit")
    >>> calibrated_clf.fit(X_calib, y_calib)
    CalibratedClassifierCV(...)
    >>> len(calibrated_clf.calibrated_classifiers_)
    1
    >>> calibrated_clf.predict_proba([[-0.5, 0.5]])
    array([[0.936..., 0.063...]])
    fitpredict_probadecision_functionNisotonicsigmoidZ	cv_objectprefitboolean
deprecated	estimatormethodcvn_jobsensemblebase_estimator_parameter_constraintsT)r2   r3   r4   r5   r6   c                C   s(   || _ || _|| _|| _|| _|| _d S Nr0   )selfr1   r2   r3   r4   r5   r6    r:   7/tmp/pip-unpacked-wheel-zrfo1fqw/sklearn/calibration.py__init__  s    
zCalibratedClassifierCV.__init__c              
      s     t t \ dk	r0t  D ]}t| q8jdkrxjdk	rdtdt	
dt jnjdkrtddg _jdkrtjdgd	 jj_t\}}tj}t|| |}	t|	jj}
j|
 nt }|j_tj}tjj}d
|kdk	rdsdtj}t	
d| d tjtrzjntjdrjj ndrt!"fddjD rtd d dt#jdd}j$r"t%j&d}| fdd|' D _nt(}t|\}}dk	rNrNd
indt)t*| ||j&d}t|| |}	dk	rr|j d n|  t||	jj}
j|
 jd j}t|dr|j+_+t|dr|j,_,S )aO  Fit the calibrated model.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training data.

        y : array-like of shape (n_samples,)
            Target values.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted.

        **fit_params : dict
            Parameters to pass to the `fit` method of the underlying
            classifier.

        Returns
        -------
        self : object
            Returns an instance of self.
        Nr/   ziBoth `base_estimator` and `estimator` are set. Only set `estimator` since `base_estimator` is deprecated.zV`base_estimator` was renamed to `estimator` in version 1.2 and will be removed in 1.4.r   )Zrandom_stater-   classes_)
attributessample_weightzSince aW   does not appear to accept sample_weight, sample weights will only be used for the calibration itself. This can be caused by a limitation of the current scikit-learn API. See the following issue for more details: https://github.com/scikit-learn/scikit-learn/issues/21134. Be warned that the result of the calibration is likely to be incorrect.n_splitsc                    s   g | ]}t |k k qS r:   )npsum).0class_)n_foldsyr:   r;   
<listcomp>  s     z.CalibratedClassifierCV.fit.<locals>.<listcomp>zRequesting z.-fold cross-validation but provided less than z! examples for at least one class.T)
classifier)r4   c              
   3   s@   | ]8\}}t tt f||jjd V  qdS ))traintestr2   classessupports_swr?   N)r   _fit_classifier_calibrator_pairr   r2   r=   )rC   rI   rJ   )Xr1   
fit_paramsr?   r9   rL   rF   r:   r;   	<genexpr>  s   
z-CalibratedClassifierCV.fit.<locals>.<genexpr>)r1   rN   rF   r3   r2   r4   rO   r?   n_features_in_feature_names_in_)-Z_validate_paramsr   r   r   valuesr   r6   r1   
ValueErrorwarningswarnFutureWarningr"   calibrated_classifiers_r3   r   r=   _get_prediction_methodlen_compute_predictions_fit_calibratorr2   appendr   r(   r   
parameterstype__name__
isinstanceinthasattrr@   rA   anyr#   r5   r   r4   splitr   r   r$   rR   rS   )r9   rN   rF   r?   rO   Zsample_aligned_paramspred_methodmethod_name	n_classespredictionscalibrated_classifierZlabel_encoder_Zfit_parametersestimator_namer3   parallelZthis_estimator_Z	first_clfr:   )rN   r1   rO   rE   r?   r9   rL   rF   r;   r(     s    












   zCalibratedClassifierCV.fitc                 C   sP   t |  tt|t| jf}| jD ]}||}||7 }q&|t| j }|S )a  Calibrated probabilities of classification.

        This function returns calibrated probabilities of classification
        according to each class on an array of test vectors X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The samples, as accepted by `estimator.predict_proba`.

        Returns
        -------
        C : ndarray of shape (n_samples, n_classes)
            The predicted probas.
        )r   rA   zerosr   r[   r=   rY   r)   )r9   rN   Z
mean_probark   probar:   r:   r;   r)     s    


z$CalibratedClassifierCV.predict_probac                 C   s"   t |  | jtj| |dd S )a  Predict the target of new samples.

        The predicted class is the class that has the highest probability,
        and can thus be different from the prediction of the uncalibrated classifier.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The samples, as accepted by `estimator.predict`.

        Returns
        -------
        C : ndarray of shape (n_samples,)
            The predicted class.
        r	   Zaxis)r   r=   rA   Zargmaxr)   )r9   rN   r:   r:   r;   predict  s    zCalibratedClassifierCV.predictc                 C   s   dddiiS )NZ_xfail_checksZcheck_sample_weights_invariancezDue to the cross-validation and sample ordering, removing a sample is not strictly equal to putting is weight to zero. Specific unit tests are added for CalibratedClassifierCV specifically.r:   )r9   r:   r:   r;   
_more_tags  s
    z!CalibratedClassifierCV._more_tags)N)N)ra   
__module____qualname____doc__r   r   r   r   r7   dict__annotations__r<   r(   r)   rr   rs   r:   r:   r:   r;   r'   7   s:   
 A



 
 /r'   c	                 K   s   t ||	|}
t||t|| }}t||t|| }}|dk	rh|rht||}| j||fd|i|
 n| j||f|
 t|}t| \}}t||||}|dkrdnt||}t| |||||d}|S )ay  Fit a classifier/calibration pair on a given train/test split.

    Fit the classifier on the train set, compute its predictions on the test
    set and use the predictions as input to fit the calibrator along with the
    test labels.

    Parameters
    ----------
    estimator : estimator instance
        Cloned base estimator.

    X : array-like, shape (n_samples, n_features)
        Sample data.

    y : array-like, shape (n_samples,)
        Targets.

    train : ndarray, shape (n_train_indices,)
        Indices of the training subset.

    test : ndarray, shape (n_test_indices,)
        Indices of the testing subset.

    supports_sw : bool
        Whether or not the `estimator` supports sample weights.

    method : {'sigmoid', 'isotonic'}
        Method to use for calibration.

    classes : ndarray, shape (n_classes,)
        The target classes.

    sample_weight : array-like, default=None
        Sample weights for `X`.

    **fit_params : dict
        Parameters to pass to the `fit` method of the underlying
        classifier.

    Returns
    -------
    calibrated_classifier : _CalibratedClassifier instance
    Nr?   rQ   )r   r    r(   r[   rZ   r\   r]   )r1   rN   rF   rI   rJ   rL   r2   rK   r?   rO   Zfit_params_trainZX_trainZy_trainZX_testZy_testZsw_trainri   rg   rh   rj   Zsw_testrk   r:   r:   r;   rM     s(    7
     rM   c                 C   s<   t | drt| d}|dfS t | dr8t| d}|dfS dS )a  Return prediction method.

    `decision_function` method of `clf` returned, if it
    exists, otherwise `predict_proba` method returned.

    Parameters
    ----------
    clf : Estimator instance
        Fitted classifier to obtain the prediction method from.

    Returns
    -------
    prediction_method : callable
        The prediction method.
    method_name : str
        The name of the prediction method.
    r*   r)   N)rd   getattr)clfr2   r:   r:   r;   rZ   N  s    



rZ   c                 C   sh   | |d}|dkr0|j dkrd|ddtjf }n4|dkrV|dkrd|ddddf }ntd| |S )a#  Return predictions for `X` and reshape binary outputs to shape
    (n_samples, 1).

    Parameters
    ----------
    pred_method : callable
        Prediction method.

    method_name: str
        Name of the prediction method

    X : array-like or None
        Data used to obtain predictions.

    n_classes : int
        Number of classes present.

    Returns
    -------
    predictions : array-like, shape (X.shape[0], len(clf.classes_))
        The predictions. Note if there are 2 classes, array is of shape
        (X.shape[0], 1).
    )rN   r*   r	   Nr)      zInvalid prediction method: )ndimrA   newaxisrU   )rg   rh   rN   ri   rj   r:   r:   r;   r\   i  s    

r\   c                 C   s   t ||d}t |}|| j}g }	t||jD ]F\}
}|dkrPtdd}nt }|||dd|
f | |		| q4t
| |	||d}|S )a  Fit calibrator(s) and return a `_CalibratedClassifier`
    instance.

    `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
    However, if `n_classes` equals 2, one calibrator is fitted.

    Parameters
    ----------
    clf : estimator instance
        Fitted classifier.

    predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1)                     when binary.
        Raw predictions returned by the un-calibrated base classifier.

    y : array-like, shape (n_samples,)
        The targets.

    classes : ndarray, shape (n_classes,)
        All the prediction classes.

    method : {'sigmoid', 'isotonic'}
        The method to use for calibration.

    sample_weight : ndarray, shape (n_samples,), default=None
        Sample weights. If None, then samples are equally weighted.

    Returns
    -------
    pipeline : _CalibratedClassifier instance
    )rK   r+   Zclip)Zout_of_boundsN)r2   rK   )r   r   r(   	transformr=   zipTr!   _SigmoidCalibrationr^   _CalibratedClassifier)rz   rj   rF   rK   r2   r?   Ylabel_encoderpos_class_indicescalibrators	class_idx	this_pred
calibratorZpipeliner:   r:   r;   r]     s     r]   c                   @   s&   e Zd ZdZddddZdd ZdS )	r   a$  Pipeline-like chaining a fitted classifier and its fitted calibrators.

    Parameters
    ----------
    estimator : estimator instance
        Fitted classifier.

    calibrators : list of fitted estimator instances
        List of fitted calibrators (either 'IsotonicRegression' or
        '_SigmoidCalibration'). The number of calibrators equals the number of
        classes. However, if there are 2 classes, the list contains only one
        fitted calibrator.

    classes : array-like of shape (n_classes,)
        All the prediction classes.

    method : {'sigmoid', 'isotonic'}, default='sigmoid'
        The method to use for calibration. Can be 'sigmoid' which
        corresponds to Platt's method or 'isotonic' which is a
        non-parametric approach based on isotonic regression.
    r,   )r2   c                C   s   || _ || _|| _|| _d S r8   )r1   r   rK   r2   )r9   r1   r   rK   r2   r:   r:   r;   r<     s    z_CalibratedClassifier.__init__c                 C   s  t | j}t| j\}}t||||}t | j}|| jj}t	
t||f}t||j| jD ]0\}	}
}|dkr~|	d7 }	||
|dd|	f< qd|dkrd|dddf  |dddf< nBt	j|ddddt	jf }t	|d| }t	j||||dkd}d|d|k |dk@ < |S )	a  Calculate calibrated probabilities.

        Calculates classification calibrated probabilities
        for each class, in a one-vs-all manner, for `X`.

        Parameters
        ----------
        X : ndarray of shape (n_samples, n_features)
            The sample data.

        Returns
        -------
        proba : array, shape (n_samples, n_classes)
            The predicted probabilities. Can be exact zeros.
        r{   r	   N      ?r   rq   )outwheregrZ|
 ?)r[   rK   rZ   r1   r\   r   r(   r~   r=   rA   ro   r   r   r   r   rr   rB   r}   Z	full_likedivide)r9   rN   ri   rg   rh   rj   r   r   rp   r   r   r   denominatorZuniform_probar:   r:   r;   r)     s4    
  "   z#_CalibratedClassifier.predict_probaN)ra   rt   ru   rv   r<   r)   r:   r:   r:   r;   r     s   r   c           
         s   t | } t |}|  |dk}dk	r@|  }|   }ntt|}|jd | }tj|tjd|d |d  |dk< d|d  |dk< d  fdd} fdd	}td
t|d |d  g}t	|||dd}	|	d |	d fS )aN  Probability Calibration with sigmoid method (Platt 2000)

    Parameters
    ----------
    predictions : ndarray of shape (n_samples,)
        The decision function or predict proba for the samples.

    y : ndarray of shape (n_samples,)
        The targets.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights. If None, then samples are equally weighted.

    Returns
    -------
    a : float
        The slope.

    b : float
        The intercept.

    References
    ----------
    Platt, "Probabilistic Outputs for Support Vector Machines"
    r   N)Zdtyper   g       @c                    sT   t | d   | d   }t|td|   }d k	rH|  S | S d S )Nr   r	   r   )r   r   rB   )ABPZlossFr   ZT1r?   r:   r;   	objective>  s
    z'_sigmoid_calibration.<locals>.objectivec                    sV   t | d   | d   }| }d k	r2|9 }t| }t|}t||gS )Nr   r	   )r   rA   dotrB   array)r   r   ZTEP_minus_T1PZdAZdB)r   r   r?   r:   r;   gradG  s    
z"_sigmoid_calibration.<locals>.grad        F)Zfprimedispr	   )
r   rB   floatrA   shapeZ
zeros_likeZfloat64r   r   r   )
rj   rF   r?   Zmask_negative_samplesZprior0Zprior1r   r   ZAB0ZAB_r:   r   r;   _sigmoid_calibration  s$    	
r   c                   @   s"   e Zd ZdZdddZdd ZdS )r   zSigmoid regression model.

    Attributes
    ----------
    a_ : float
        The slope.

    b_ : float
        The intercept.
    Nc                 C   s6   t |}t |}t||\}}t|||\| _| _| S )a  Fit the model using X, y as training data.

        Parameters
        ----------
        X : array-like of shape (n_samples,)
            Training data.

        y : array-like of shape (n_samples,)
            Training target.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted.

        Returns
        -------
        self : object
            Returns an instance of self.
        )r   r   r   a_b_)r9   rN   rF   r?   r:   r:   r;   r(   b  s
    z_SigmoidCalibration.fitc                 C   s   t |}t| j| | j  S )a  Predict new data by linear interpolation.

        Parameters
        ----------
        T : array-like of shape (n_samples,)
            Data to predict from.

        Returns
        -------
        T_ : ndarray of shape (n_samples,)
            The predicted data.
        )r   r   r   r   )r9   r   r:   r:   r;   rr   |  s    z_SigmoidCalibration.predict)N)ra   rt   ru   rv   r(   rr   r:   r:   r:   r;   r   V  s   
r   r/      uniform)	pos_label	normalizen_binsstrategyc                C   st  t | } t |}t| | t|| }|dkrXtdt |rX||  | |   }| dk sp| dkrxtdt	
| }t|dkrtd| d| |k} |d	krt	dd|d }t	||d
 }n$|dkrt	dd|d }ntdt	|dd |}	t	j|	|t|d}
t	j|	| t|d}t	j|	t|d}|dk}|| ||  }|
| ||  }||fS )a
  Compute true and predicted probabilities for a calibration curve.

    The method assumes the inputs come from a binary classifier, and
    discretize the [0, 1] interval into bins.

    Calibration curves may also be referred to as reliability diagrams.

    Read more in the :ref:`User Guide <calibration>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        True targets.

    y_prob : array-like of shape (n_samples,)
        Probabilities of the positive class.

    pos_label : int or str, default=None
        The label of the positive class.

        .. versionadded:: 1.1

    normalize : bool, default="deprecated"
        Whether y_prob needs to be normalized into the [0, 1] interval, i.e.
        is not a proper probability. If True, the smallest value in y_prob
        is linearly mapped onto 0 and the largest one onto 1.

        .. deprecated:: 1.1
            The normalize argument is deprecated in v1.1 and will be removed in v1.3.
            Explicitly normalizing `y_prob` will reproduce this behavior, but it is
            recommended that a proper probability is used (i.e. a classifier's
            `predict_proba` positive class).

    n_bins : int, default=5
        Number of bins to discretize the [0, 1] interval. A bigger number
        requires more data. Bins with no samples (i.e. without
        corresponding values in `y_prob`) will not be returned, thus the
        returned arrays may have less than `n_bins` values.

    strategy : {'uniform', 'quantile'}, default='uniform'
        Strategy used to define the widths of the bins.

        uniform
            The bins have identical widths.
        quantile
            The bins have the same number of samples and depend on `y_prob`.

    Returns
    -------
    prob_true : ndarray of shape (n_bins,) or smaller
        The proportion of samples whose class is the positive class, in each
        bin (fraction of positives).

    prob_pred : ndarray of shape (n_bins,) or smaller
        The mean predicted probability in each bin.

    References
    ----------
    Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
    Probabilities With Supervised Learning, in Proceedings of the 22nd
    International Conference on Machine Learning (ICML).
    See section 4 (Qualitative Analysis of Predictions).

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.calibration import calibration_curve
    >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
    >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9,  1.])
    >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
    >>> prob_true
    array([0. , 0.5, 1. ])
    >>> prob_pred
    array([0.2  , 0.525, 0.85 ])
    r/   a:  The normalize argument is deprecated in v1.1 and will be removed in v1.3. Explicitly normalizing y_prob will reproduce this behavior, but it is recommended that a proper probability is used (i.e. a classifier's `predict_proba` positive class or `decision_function` output calibrated with `CalibratedClassifierCV`).r   r	   z!y_prob has values outside [0, 1].r{   z9Only binary classification is supported. Provided labels .Zquantiled   r   r   r   zSInvalid entry to 'strategy' input. Strategy must be either 'quantile' or 'uniform'.)weights	minlength)r   )r   r   r%   rV   rW   rX   minmaxrU   rA   uniquer[   ZlinspaceZ
percentileZsearchsortedZbincount)y_truey_probr   r   r   r   labelsZ	quantilesZbinsZbinidsZbin_sumsZbin_trueZ	bin_totalZnonzero	prob_true	prob_predr:   r:   r;   calibration_curve  sF    T



r   c                   @   sj   e Zd ZdZdddddZdddddd	Zed
ddddddddZed
ddddddddZdS )CalibrationDisplaya	  Calibration curve (also known as reliability diagram) visualization.

    It is recommended to use
    :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or
    :func:`~sklearn.calibration.CalibrationDisplay.from_predictions`
    to create a `CalibrationDisplay`. All parameters are stored as attributes.

    Read more about calibration in the :ref:`User Guide <calibration>` and
    more about the scikit-learn visualization API in :ref:`visualizations`.

    .. versionadded:: 1.0

    Parameters
    ----------
    prob_true : ndarray of shape (n_bins,)
        The proportion of samples whose class is the positive class (fraction
        of positives), in each bin.

    prob_pred : ndarray of shape (n_bins,)
        The mean predicted probability in each bin.

    y_prob : ndarray of shape (n_samples,)
        Probability estimates for the positive class, for each sample.

    estimator_name : str, default=None
        Name of estimator. If None, the estimator name is not shown.

    pos_label : str or int, default=None
        The positive class when computing the calibration curve.
        By default, `estimators.classes_[1]` is considered as the
        positive class.

        .. versionadded:: 1.1

    Attributes
    ----------
    line_ : matplotlib Artist
        Calibration curve.

    ax_ : matplotlib Axes
        Axes with calibration curve.

    figure_ : matplotlib Figure
        Figure containing the curve.

    See Also
    --------
    calibration_curve : Compute true and predicted probabilities for a
        calibration curve.
    CalibrationDisplay.from_predictions : Plot calibration curve using true
        and predicted labels.
    CalibrationDisplay.from_estimator : Plot calibration curve using an
        estimator and data.

    Examples
    --------
    >>> from sklearn.datasets import make_classification
    >>> from sklearn.model_selection import train_test_split
    >>> from sklearn.linear_model import LogisticRegression
    >>> from sklearn.calibration import calibration_curve, CalibrationDisplay
    >>> X, y = make_classification(random_state=0)
    >>> X_train, X_test, y_train, y_test = train_test_split(
    ...     X, y, random_state=0)
    >>> clf = LogisticRegression(random_state=0)
    >>> clf.fit(X_train, y_train)
    LogisticRegression(random_state=0)
    >>> y_prob = clf.predict_proba(X_test)[:, 1]
    >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
    >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob)
    >>> disp.plot()
    <...>
    N)rl   r   c                C   s"   || _ || _|| _|| _|| _d S r8   r   r   r   rl   r   )r9   r   r   r   rl   r   r:   r:   r;   r<   _  s
    zCalibrationDisplay.__init__T)axnameref_linec                K   s  t d ddlm} |dkr(| \}}|dkr6| jn|}| jdk	rRd| j dnd}i }|dk	rj||d< |jf | d}	|	| d	 k}
|r|
s|jdd	gdd	gd
|	d |j| j	| j
df|d | _|jdd d| }d| }|j||d || _|j| _| S )aT  Plot visualization.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Parameters
        ----------
        ax : Matplotlib Axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        name : str, default=None
            Name for labeling curve. If `None`, use `estimator_name` if
            not `None`, otherwise no labeling is shown.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`
            Object that stores computed values.
        CalibrationDisplay.plotr   Nz(Positive class: ) labelzPerfectly calibratedr	   zk:)r   zs-zlower right)loczMean predicted probability zFraction of positives )xlabelylabel)r   Zmatplotlib.pyplotZpyplotZsubplotsrl   r   updateZget_legend_handles_labelsplotr   r   Zline_ZlegendsetZax_figureZfigure_)r9   r   r   r   kwargsZpltZfigZinfo_pos_labelZline_kwargsZref_line_labelZexisting_ref_liner   r   r:   r:   r;   r   h  s.    

r   r   r   r   r   r   r   r   r   c             	   K   sn   | j  d}t| t|s$tdt||d|d\}}|dk	rD|n|jj }| j||f||||||	d|
S )a
  Plot calibration curve using a binary classifier and data.

        A calibration curve, also known as a reliability diagram, uses inputs
        from a binary classifier and plots the average predicted probability
        for each bin against the fraction of positive classes, on the
        y-axis.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Read more about calibration in the :ref:`User Guide <calibration>` and
        more about the scikit-learn visualization API in :ref:`visualizations`.

        .. versionadded:: 1.0

        Parameters
        ----------
        estimator : estimator instance
            Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
            in which the last estimator is a classifier. The classifier must
            have a :term:`predict_proba` method.

        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Input values.

        y : array-like of shape (n_samples,)
            Binary target values.

        n_bins : int, default=5
            Number of bins to discretize the [0, 1] interval into when
            calculating the calibration curve. A bigger number requires more
            data.

        strategy : {'uniform', 'quantile'}, default='uniform'
            Strategy used to define the widths of the bins.

            - `'uniform'`: The bins have identical widths.
            - `'quantile'`: The bins have the same number of samples and depend
              on predicted probabilities.

        pos_label : str or int, default=None
            The positive class when computing the calibration curve.
            By default, `estimators.classes_[1]` is considered as the
            positive class.

            .. versionadded:: 1.1

        name : str, default=None
            Name for labeling curve. If `None`, the name of the estimator is
            used.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        ax : matplotlib axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`.
            Object that stores computed values.

        See Also
        --------
        CalibrationDisplay.from_predictions : Plot calibration curve using true
            and predicted labels.

        Examples
        --------
        >>> import matplotlib.pyplot as plt
        >>> from sklearn.datasets import make_classification
        >>> from sklearn.model_selection import train_test_split
        >>> from sklearn.linear_model import LogisticRegression
        >>> from sklearn.calibration import CalibrationDisplay
        >>> X, y = make_classification(random_state=0)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, random_state=0)
        >>> clf = LogisticRegression(random_state=0)
        >>> clf.fit(X_train, y_train)
        LogisticRegression(random_state=0)
        >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
        >>> plt.show()
        .from_estimatorz*'estimator' should be a fitted classifier.r)   )Zresponse_methodr   Nr   )ra   r   r   rU   r&   	__class__from_predictions)clsr1   rN   rF   r   r   r   r   r   r   r   rh   r   r:   r:   r;   from_estimator  s0    g   
	z!CalibrationDisplay.from_estimatorc                K   sl   | j  d}
t|
 t|||||d\}}|dkr6dn|}t||}| |||||d}|jf ||d|	S )a  Plot calibration curve using true labels and predicted probabilities.

        Calibration curve, also known as reliability diagram, uses inputs
        from a binary classifier and plots the average predicted probability
        for each bin against the fraction of positive classes, on the
        y-axis.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Read more about calibration in the :ref:`User Guide <calibration>` and
        more about the scikit-learn visualization API in :ref:`visualizations`.

        .. versionadded:: 1.0

        Parameters
        ----------
        y_true : array-like of shape (n_samples,)
            True labels.

        y_prob : array-like of shape (n_samples,)
            The predicted probabilities of the positive class.

        n_bins : int, default=5
            Number of bins to discretize the [0, 1] interval into when
            calculating the calibration curve. A bigger number requires more
            data.

        strategy : {'uniform', 'quantile'}, default='uniform'
            Strategy used to define the widths of the bins.

            - `'uniform'`: The bins have identical widths.
            - `'quantile'`: The bins have the same number of samples and depend
              on predicted probabilities.

        pos_label : str or int, default=None
            The positive class when computing the calibration curve.
            By default, `estimators.classes_[1]` is considered as the
            positive class.

            .. versionadded:: 1.1

        name : str, default=None
            Name for labeling curve.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        ax : matplotlib axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`.
            Object that stores computed values.

        See Also
        --------
        CalibrationDisplay.from_estimator : Plot calibration curve using an
            estimator and data.

        Examples
        --------
        >>> import matplotlib.pyplot as plt
        >>> from sklearn.datasets import make_classification
        >>> from sklearn.model_selection import train_test_split
        >>> from sklearn.linear_model import LogisticRegression
        >>> from sklearn.calibration import CalibrationDisplay
        >>> X, y = make_classification(random_state=0)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, random_state=0)
        >>> clf = LogisticRegression(random_state=0)
        >>> clf.fit(X_train, y_train)
        LogisticRegression(random_state=0)
        >>> y_prob = clf.predict_proba(X_test)[:, 1]
        >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
        >>> plt.show()
        r   )r   r   r   N
Classifierr   )r   r   )ra   r   r   r%   r   )r   r   r   r   r   r   r   r   r   r   rh   r   r   r   r:   r:   r;   r   #  s&    a    

z#CalibrationDisplay.from_predictions)	ra   rt   ru   rv   r<   r   classmethodr   r   r:   r:   r:   r;   r     s*   J 	=}r   )N)N)N)Drv   Znumbersr   rV   inspectr   	functoolsr   mathr   ZnumpyrA   Zscipy.specialr   r   Zscipy.optimizer   baser
   r   r   r   r   r   Zpreprocessingr   r   utilsr   r   r   Zutils.multiclassr   Zutils.parallelr   r   Zutils._param_validationr   r   r   Zutils.validationr   r   r   r   r   r    r+   r!   Zsvmr"   Zmodel_selectionr#   r$   Zmetrics._baser%   Zmetrics._plot.baser&   r'   rM   rZ   r\   r]   r   r   r   r   r   r:   r:   r:   r;   <module>   sR   	    W 
L&
0Q
F; 	