
    :[gi3             '          d Z ddlZddlZddlZddlZddlmZ ddlmZ ddl	m
Z
mZmZmZmZmZmZmZmZmZmZmZ ddlZddlmZ ddlmZmZmZmZmZ dd	l m!Z! dd
l"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1 ddl2m3Z3m4Z4m5Z5m6Z6 ddl7m8Z8  G d d      Z9dee:   deee!      ddfdZ;dee<   de=fdZ> G d de      Z?eeegeej                  ej                  f   f   ZAeee<e?eAf      ZBdee?eAf   de-fdZCdede,fdZDdedee:   de,fdZEdee:    dZFd j                  g d!ee:    d"ee:    d#ee:    d$ee<    d%eeH    d&ee:    d'eB d(ee<    d)ee<    d*ee:    d+eeH    d,eeH    d-eeH    d.eeH    d/ee<    d0eeH    d1eeH    d2eeH    d3eeH    d4eeH    d5eeH    d6eeH    d7eeej                  j                  ej                  j                  e:f       d8ee:    d9eeee<e:f   e<f       d:eee<eee<      f       d;ee<    d<ee<    d=ee=    d>ee    d?ee:    d@ee:    dAee<    dBeee<ee<   ef       dCee:    dDeee!       dEee
    dF      ZLdGZM	 	 ddHe<dIee<   dJee<   dKee<   deegef   f
dLZNdMeHdNe
dOe
dPee
   dQee
   dRee
   dSee
   dTee
   dUeeee
e
f         dVeee
      dWeee
      dXeee
      dYeee
      dZed[e=d\ee   dee
eee
e<f      f   f"d]ZOd^ZP eNd_g d`       G da dbe%             ZQ edcej                  d      ZRdee:dfeRdgedeRfdhZS eNdidjdkgdlm       G dn doeQe$             ZT eNdpdjdkgdqm       G dr dseT             ZU eNdtg d`       G du dveQe&             ZV eNdwdjdkgdqm       G dx dyeV             ZWdNedQee   deeee   f   fdzZX eNd{d|djgd}~       G d deQe9             ZYy)z+Scikit-Learn Wrapper interface for XGBoost.    N)ThreadPoolExecutor)	signature)AnyCallableDictListOptionalProtocolSequenceTupleTypeTypeVarUnioncast)softmax   )	ArrayLikeFeatureNamesFeatureTypesIterationRangeModelIn)TrainingCallback)SKLEARN_INSTALLEDXGBClassifierBaseXGBModelBaseXGBRegressorBase)config_context)BoosterDMatrixMetric	ObjectiveQuantileDMatrixXGBoostError_deprecate_positional_args_parse_eval_str)_is_cudf_df_is_cudf_ser_is_cupy_alike_is_pandas_df)trainc                       e Zd ZdZdZy)XGBRankerMixInzfMixIn for ranking, defines the _estimator_type usually defined in scikit-learn
    base classes.

    rankerN)__name__
__module____qualname____doc___estimator_type     J/var/www/html/bid-api/venv/lib/python3.12/site-packages/xgboost/sklearn.pyr,   r,   0   s    
 Or4   r,   early_stopping_rounds	callbacksreturnc                 "    | |t        d      y )NzN`early_stopping_rounds` and `callbacks` are not implemented for random forest.)NotImplementedError)r6   r7   s     r5   _check_rf_callbackr;   9   s'     (I,A!
 	
 -Br4   tree_methodc                 
    | dv S )N)histgpu_histNautor3   )r<   s    r5   _can_use_qdmrA   D   s    <<<r4   c            
       4    e Zd Zdededee   deeef   fdZy)_SklObjWProtoy_truey_predsample_weightr8   c                      y Nr3   )selfrD   rE   rF   s       r5   __call__z_SklObjWProto.__call__I   s    
 '*r4   N)r.   r/   r0   r   r	   r   rJ   r3   r4   r5   rC   rC   H   s<    ** *  	*	*
 
y)#	$*r4   rC   funcc                      t               j                  }d|v dt        j                  dt        dt
        t        j                  t        j                  f   f fd}|S )a  Decorate an objective function

    Converts an objective function using the typical sklearn metrics
    signature so that it is usable with ``xgboost.training.train``

    Parameters
    ----------
    func:
        Expects a callable with signature ``func(y_true, y_pred)``:

        y_true: array_like of shape [n_samples]
            The target values
        y_pred: array_like of shape [n_samples]
            The predicted values
        sample_weight :
            Optional sample weight, None or a ndarray.

    Returns
    -------
    new_func:
        The new objective function as expected by ``xgboost.training.train``.
        The signature is ``new_func(preds, dmatrix)``:

        preds: array_like, shape [n_samples]
            The predicted values
        dmatrix: ``DMatrix``
            The training set from which the labels will be extracted using
            ``dmatrix.get_label()``
    rF   predsdmatrixr8   c                    |j                         }|j                         }|j                  dkD  rst        d      |j                  dkD  rt	        t
              } ||| |      S t	        t              } |||       S )zInternal function.r   zXCustom objective doesn't have the `sample_weight` parameter while sample_weight is used.rF   )
get_weight	get_labelsize
ValueErrorr   rC   _SklObjProto)rM   rN   rF   labelsfnwfnrK   supports_sws         r5   innerz#_objective_decorator.<locals>.innerw   s    **,""$!+*  !}d+CvuMBB,%&%  r4   )r   
parametersnpndarrayr   r   )rK   r[   rZ   rY   s   `  @r5   _objective_decoratorr^   U   sU    > 4++J!Z/K!RZZ !' !eBJJ

<R6S !" Lr4   c                 f     dt         j                  dt        dt        t        t
        f   f fd}|S )zDecorate a metric function from sklearn.

    Converts an metric function that uses the typical sklearn metric signature so that
    it is compatible with :py:func:`train`

    y_scorerN   r8   c                     |j                         }|j                         }|j                  dk(  rj                   ||       fS j                   || |      fS )Nr   rP   )rR   rQ   rS   r.   )r`   rN   rD   weightrK   s       r5   rZ   z _metric_decorator.<locals>.inner   sY    ""$##%;;!==$vw"777}}d67&IIIr4   r\   r]   r   r   strfloat)rK   rZ   s   ` r5   _metric_decoratorrf      s5    Jrzz JG Jc5j8I J Lr4   n_jobsc                 j     dt         j                  dt        dt        t        t
        f   f fd}|S )z#Decorate a learning to rank metric.r`   rN   r8   c                     |j                         |j                  d      j                  dk  rt        d      t	        j
                  j                  dz
        }g }|j                         }|j                  dk(  }dt        dt        f fd}nt        j                         }t        |	      5 }t        dj                        D ]%  }	|j                  ||	      }
|j                  |
       ' t        |      D ]  \  }	}
|
j!                         ||	<    	 d d d        |rj"                  |j%                         fS j"                  t	        j&                  ||
      fS # 1 sw Y   JxY w)N	group_ptr   z;Invalid `group_ptr`. Likely caused by invalid qid or group.r   r   ir8   c                 d    | dz
     }|    }|| }|| }|j                   dk(  ry ||      S )Nr         ?)rS   )	rl   beginendgygprK   rj   r`   rD   s	        r5   taskz1ltr_metric_decorator.<locals>.inner.<locals>.task   sN    a!e$EA,Cc"Bs#Bww!| B<r4   )max_workers)weights)rR   get_uint_inforS   rT   r\   empty	get_groupintre   os	cpu_countr   rangesubmitappend	enumerateresultr.   meanaverage)r`   rN   scoresfuturesrb   	no_weightrs   workersexecutorrl   frj   rD   rK   rg   s   `          @@r5   rZ   z#ltr_metric_decorator.<locals>.inner   sD   ""$))+6	>>AM  )..1,-""$KK1$			 C 		 E 		  		  #.&BLLNG41inn-OOD!,q! . "'*1HHJq	 + 5 ==&++-//}}bjj@@@ 54s   <A%E++E4rc   )rK   rg   rZ   s   `` r5   ltr_metric_decoratorr      s6    #Arzz #AG #Ac5j8I #AJ Lr4   z
    n_estimators : z]
        Number of gradient boosted trees.  Equivalent to number of boosting
        rounds.
 z
    max_depth :  zB

        Maximum tree depth for base learners.

    max_leaves : zI

        Maximum number of leaves; 0 indicates no limit.

    max_bin : zd

        If using histogram-based algorithm, maximum number of bins per feature

    grow_policy : z

        Tree growing policy.

        - depthwise: Favors splitting at nodes closest to the node,
        - lossguide: Favors splitting at nodes with highest loss change.

    learning_rate : z@

        Boosting learning rate (xgb's "eta")

    verbosity : z]

        The degree of verbosity. Valid values are 0 (silent) - 3 (debug).

    objective : aI  

        Specify the learning task and the corresponding learning objective or a custom
        objective function to be used.

        For custom objective, see :doc:`/tutorials/custom_metric_obj` and
        :ref:`custom-obj-metric` for more information, along with the end note for
        function signatures.

    booster: za

        Specify which booster to use: ``gbtree``, ``gblinear`` or ``dart``.

    tree_method : a+  

        Specify which tree method to use.  Default to auto.  If this parameter is set to
        default, XGBoost will choose the most conservative option available.  It's
        recommended to study this option from the parameters document :doc:`tree method
        </treemethod>`

    n_jobs : a0  

        Number of parallel threads used to run xgboost.  When used with other
        Scikit-Learn algorithms like grid search, you may choose which algorithm to
        parallelize and balance the threads.  Creating thread contention will
        significantly slow down both algorithms.

    gamma : z

        (min_split_loss) Minimum loss reduction required to make a further partition on
        a leaf node of the tree.

    min_child_weight : z[

        Minimum sum of instance weight(hessian) needed in a child.

    max_delta_step : z\

        Maximum delta step we allow each tree's weight estimation to be.

    subsample : zK

        Subsample ratio of the training instance.

    sampling_method : aI  

        Sampling method. Used only by the GPU version of ``hist`` tree method.

        - ``uniform``: Select random training instances uniformly.
        - ``gradient_based``: Select random training instances with higher probability
            when the gradient and hessian are larger. (cf. CatBoost)

    colsample_bytree : z[

        Subsample ratio of columns when constructing each tree.

    colsample_bylevel : zM

        Subsample ratio of columns for each level.

    colsample_bynode : zF

        Subsample ratio of columns for each split.

    reg_alpha : zM

        L1 regularization term on weights (xgb's alpha).

    reg_lambda : zT

        L2 regularization term on weights (xgb's lambda).

    scale_pos_weight : zG
        Balancing of positive and negative weights.

    base_score : zZ

        The initial prediction score of all instances, global bias.

    random_state : aE  

        Random number seed.

        .. note::

           Using gblinear booster with shotgun updater is nondeterministic as
           it uses Hogwild algorithm.

    missing : float

        Value in the data which needs to be present as a missing value. Default to
        :py:data:`numpy.nan`.

    num_parallel_tree: zG

        Used for boosting random forest.

    monotone_constraints : z

        Constraint of variable monotonicity.  See :doc:`tutorial </tutorials/monotonic>`
        for more information.

    interaction_constraints : a  

        Constraints for interaction representing permitted interactions.  The
        constraints must be specified in the form of a nested list, e.g. ``[[0, 1], [2,
        3, 4]]``, where each inner list is a group of indices of features that are
        allowed to interact with each other.  See :doc:`tutorial
        </tutorials/feature_interaction_constraint>` for more information

    importance_type: a9  

        The feature importance type for the feature_importances\_ property:

        * For tree model, it's either "gain", "weight", "cover", "total_gain" or
          "total_cover".
        * For linear model, only "weight" is defined and it's the normalized
          coefficients without bias.

    device : z

        .. versionadded:: 2.0.0

        Device ordinal, available options are `cpu`, `cuda`, and `gpu`.

    validate_parameters : z

        Give warnings for unknown parameter.

    enable_categorical : bool

        See the same parameter of :py:class:`DMatrix` for details.

    feature_types : z

        .. versionadded:: 1.7.0

        Used for specifying feature types without constructing a dataframe. See
        :py:class:`DMatrix` for details.

    max_cat_to_onehot : aG  

        .. versionadded:: 1.6.0

        .. note:: This parameter is experimental

        A threshold for deciding whether XGBoost should use one-hot encoding based split
        for categorical data.  When number of categories is lesser than the threshold
        then one-hot encoding is chosen, otherwise the categories will be partitioned
        into children nodes. Also, `enable_categorical` needs to be set to have
        categorical feature support. See :doc:`Categorical Data
        </tutorials/categorical>` and :ref:`cat-param` for details.

    max_cat_threshold : a  

        .. versionadded:: 1.7.0

        .. note:: This parameter is experimental

        Maximum number of categories considered for each split. Used only by
        partition-based splits for preventing over-fitting. Also, `enable_categorical`
        needs to be set to have categorical feature support. See :doc:`Categorical Data
        </tutorials/categorical>` and :ref:`cat-param` for details.

    multi_strategy : a  

        .. versionadded:: 2.0.0

        .. note:: This parameter is working-in-progress.

        The strategy used for training multi-target models, including multi-target
        regression and multi-class classification. See :doc:`/tutorials/multioutput` for
        more information.

        - ``one_output_per_tree``: One model for each target.
        - ``multi_output_tree``:  Use multi-target trees.

    eval_metric : al  

        .. versionadded:: 1.6.0

        Metric used for monitoring the training result and early stopping.  It can be a
        string or list of strings as names of predefined metric in XGBoost (See
        doc/parameter.rst), one of the metrics in :py:mod:`sklearn.metrics`, or any
        other user defined metric that looks like `sklearn.metrics`.

        If custom objective is also provided, then custom metric should implement the
        corresponding reverse link function.

        Unlike the `scoring` parameter commonly used in scikit-learn, when a callable
        object is provided, it's assumed to be a cost function and by default XGBoost
        will minimize the result during early stopping.

        For advanced usage on Early stopping like directly choosing to maximize instead
        of minimize, see :py:obj:`xgboost.callback.EarlyStopping`.

        See :doc:`/tutorials/custom_metric_obj` and :ref:`custom-obj-metric` for more
        information.

        .. code-block:: python

            from sklearn.datasets import load_diabetes
            from sklearn.metrics import mean_absolute_error
            X, y = load_diabetes(return_X_y=True)
            reg = xgb.XGBRegressor(
                tree_method="hist",
                eval_metric=mean_absolute_error,
            )
            reg.fit(X, y, eval_set=[(X, y)])

    early_stopping_rounds : a  

        .. versionadded:: 1.6.0

        - Activates early stopping. Validation metric needs to improve at least once in
          every **early_stopping_rounds** round(s) to continue training.  Requires at
          least one item in **eval_set** in :py:meth:`fit`.

        - If early stopping occurs, the model will have two additional attributes:
          :py:attr:`best_score` and :py:attr:`best_iteration`. These are used by the
          :py:meth:`predict` and :py:meth:`apply` methods to determine the optimal
          number of trees during inference. If users want to access the full model
          (including trees built after early stopping), they can specify the
          `iteration_range` in these inference methods. In addition, other utilities
          like model plotting can also use the entire model.

        - If you prefer to discard the trees after `best_iteration`, consider using the
          callback function :py:class:`xgboost.callback.EarlyStopping`.

        - If there's more than one item in **eval_set**, the last entry will be used for
          early stopping.  If there's more than one metric in **eval_metric**, the last
          metric will be used for early stopping.

    callbacks : a  

        List of callback functions that are applied at end of each iteration.
        It is possible to use predefined callbacks by using
        :ref:`Callback API <callback_api>`.

        .. note::

           States in callback are not preserved during training, which means callback
           objects can not be reused for multiple training sessions without
           reinitialization or deepcopy.

        .. code-block:: python

            for params in parameters_grid:
                # be sure to (re)initialize the callbacks before each run
                callbacks = [xgb.callback.LearningRateScheduler(custom_rates)]
                reg = xgboost.XGBRegressor(**params, callbacks=callbacks)
                reg.fit(X, y)

    kwargs : a  

        Keyword arguments for XGBoost Booster object.  Full documentation of parameters
        can be found :doc:`here </parameter>`.
        Attempting to set a parameter via the constructor args and \*\*kwargs
        dict simultaneously will result in a TypeError.

        .. note:: \*\*kwargs unsupported by scikit-learn

            \*\*kwargs is unsupported by scikit-learn.  We do not guarantee
            that parameters passed via this argument will interact properly
            with scikit-learn.
a-  
        .. note::  Custom objective function

            A custom objective function can be provided for the ``objective``
            parameter. In this case, it should have the signature ``objective(y_true,
            y_pred) -> [grad, hess]`` or ``objective(y_true, y_pred, *, sample_weight)
            -> [grad, hess]``:

            y_true: array_like of shape [n_samples]
                The target values
            y_pred: array_like of shape [n_samples]
                The predicted values
            sample_weight :
                Optional sample weights.

            grad: array_like of shape [n_samples]
                The value of the gradient for each sample point.
            hess: array_like of shape [n_samples]
                The value of the second derivative for each sample point
headeritemsextra_parametersend_notec                 \     dt         dt         fddt        dt        f fd}|S )a  Obtain documentation for Scikit-Learn wrappers

    Parameters
    ----------
    header: str
       An introducion to the class.
    items : list
       A list of common doc items.  Available items are:
         - estimators: the meaning of n_estimators
         - model: All the other parameters
         - objective: note for customized objective
    extra_parameters: str
       Document for class specific parameters, placed at the head.
    end_note: str
       Extra notes put to the end.itemr8   c                 0    t         t        t        d}||    S )zReturn selected item
estimatorsmodel	objective)__estimator_doc__model_doc__custom_obj_note)r   __docs     r5   get_docz"xgboost_model_doc.<locals>.get_doc!  s      * *

 T{r4   clsc                    dg}r|j                         |j                  D cg c]
  } |       c}       r|j                         dz   g}|j                  |       dj                  |      | _        | S c c}w )Nz
Parameters
----------
z<
See :doc:`/python/sklearn_estimator` for more information.
r   )r~   extendjoinr1   )	r   docrl   full_docr   r   r   r   r   s	       r5   adddocz!xgboost_model_doc.<locals>.adddoc*  s    
 JJ'(

.1GAJ./JJx UU
 	ggh'
 /s   A?)rd   r   )r   r   r   r   r   r   s   ```` @r5   xgboost_model_docr     s5    ,c c D T  & Mr4   missingXygroupqidrF   base_marginfeature_weightseval_setsample_weight_eval_setbase_margin_eval_set
eval_groupeval_qidcreate_dmatrixenable_categoricalfeature_typesc                     ||||||||| ||d      }|dn
t        |      dt        t           dt        dt        ffd}| ||	d      }	 ||
d	      }
 ||d
      } ||d      }g }t	        |      D ]r  \  }\  }}t        ||u ||u |	|   |u |
|   |u ||   |u ||   |u f      r|j                  |       D ||||	|   ||   ||   |
|   | |||
      }|j                  |       t t        |      }t        |      D cg c]  }d| 	 }}t        t        ||            }||fS t        d |	|
||fD              rt        d      g }||fS c c}w )zXConvert array_like evaluation matrices into DMatrix.  Perform validation on the
    way.N)datalabelr   r   rb   r   r   r   r   r   refr   metanamer8   c           	      t    | d gz  S t        |       k7  r t        | dd dt        |        z         | S )Nz.'s length does not equal `eval_set`'s length, z
expecting , got )lenrT   )r   r   n_validations     r5   validate_or_nonez3_wrap_evaluation_matrices.<locals>.validate_or_noned  sY    <6L((t9$&FG|nF3t9+>?  r4   r   r   r   r   )
r   r   rb   r   r   r   r   r   r   r   validation_c              3   $   K   | ]  }|d u 
 y wrH   r3   ).0r   s     r5   	<genexpr>z,_wrap_evaluation_matrices.<locals>.<genexpr>  s!      
 s   zL`eval_set` is not set but one of the other evaluation meta info is not None.)r   r	   r   rd   r   allr~   r|   listzipanyrT   )r   r   r   r   r   rF   r   r   r   r   r   r   r   r   r   r   train_dmatrixr   evalsrl   valid_Xvalid_ymnevals
eval_namesr   s                            @r5   _wrap_evaluation_matricesr   @  s   ( #'-#M !(1c(mLx1    !1"$<"
  0 "8 
 &j,?
#Hj9%.x%8!A!qLqL*1->(+{:qMU*QK3&	 ]+" !1!4$Q-  4Q 7#'9"/% Q5 &96 U16v?AA3'
?S
+," %  
 '$	
 
   %% @s   Ed   z3Implementation of the Scikit-Learn API for XGBoost.r   c            S           e Zd Zddddddddddddddddddddddddej                  ddddddddddddddf'dee   dee   dee   dee   dee   dee   d	ee   d
e	dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee   dee
ej                  j                  ej                  j                  ef      dedee   dee
eeef   ef      dee
eeee      f      dee   d ee   d!ee   d"ed#ee   d$ee   d%ee   d&ee   d'ee
eee   ef      d(ee   d)eee      d*ed+dfRd,Zd+eeef   fd-Zd+efd.Zd+efd/Zd0ed+d fd1Zd]d3ed+eeef   f fd4Zd+eeef   fd5Zd+efd6Zd+efd7Zd8e
ee jB                  f   d+dfd9Z"ejD                  jF                   e"_#        d8e$d+dfd:Z%ejJ                  jF                   e%_#        d;e&d+dfd<Z'dee
ed ef      d0eeef   d+e(ee
eed f      ee)   eeef   f   fd=Z*d>ee+   d*ed+e+fd?Z,d@ejZ                  d+dfdAZ.e/dddd2dddddBdCe0dDe0dEee0   dFee0   dGeee(e0e0f         dHee
eef      dIee
eed f      dJeee0      dKeee0      dLee0   d+d fdM       Z1d+efdNZ2dOee3   d+e3fdPZ4	 	 	 	 d^dCe0dQedRedFee0   dOee3   d+e0fdSZ5	 d_dCe0dOee3   d+ejl                  fdTZ7d+eeeeee   f   f   fdUZ8e9d+efdV       Z:e9d+ejl                  fdW       Z;e9d+efdX       Z<e9d+efdY       Z=e9d+ejl                  fdZ       Z>e9d+ejl                  fd[       Z?e9d+ejl                  fd\       Z@ xZAS )`XGBModelNF	max_depth
max_leavesmax_bingrow_policylearning_raten_estimators	verbosityr   boosterr<   rg   gammamin_child_weightmax_delta_step	subsamplesampling_methodcolsample_bytreecolsample_bylevelcolsample_bynode	reg_alpha
reg_lambdascale_pos_weight
base_scorerandom_stater   num_parallel_treemonotone_constraintsinteraction_constraintsimportance_typedevicevalidate_parametersr   r   max_cat_to_onehotmax_cat_thresholdmulti_strategyeval_metricr6   r7   kwargsr8   c(                 \   t         st        d      || _        || _        || _        || _        || _        || _        || _        || _	        |	| _
        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _         | | _!        |!| _"        |"| _#        |#| _$        |$| _%        |%| _&        |&| _'        |'| _(        |(r|(| _)        y y )Nz9sklearn needs to be installed in order to use this module)*r   ImportErrorr   r   r   r   r   r   r   r   r   r<   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rg   r   r   r   r   r   r   r   r   r   r   r   r6   r7   r   ))rI   r   r   r   r   r   r   r   r   r   r<   rg   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r6   r7   r   s)                                            r5   __init__zXGBModel.__init__  sR   Z !K  )""$&*"&
 0,". 0!2 0"$ 0$!2($8!'>$.#6 "4*!2!2,&%:"" DK r4   c                 n    ddd}t        | d      r#| j                  j                  d      dk(  rd|d<   |S )z+Tags used for scikit-learn data validation.T)	allow_nanno_validationr   updatershotgunnon_deterministic)hasattrr   get)rI   tagss     r5   
_more_tagszXGBModel._more_tags  s:    !D94"t{{y'AY'N(,D$%r4   c                     t        | d      S )N_Booster)r   rI   s    r5   __sklearn_is_fitted__zXGBModel.__sklearn_is_fitted__  s    tZ((r4   c                 V    | j                         sddlm}  |d      | j                  S )zGet the underlying xgboost Booster of this model.

        This will raise an exception when fit was not called

        Returns
        -------
        booster : a xgboost booster of underlying model
        r   )NotFittedErrorz)need to call fit or load_model beforehand)r   sklearn.exceptionsr  r   )rI   r  s     r5   get_boosterzXGBModel.get_booster  s)     ))+9 !LMM}}r4   paramsc                 4   |s| S |j                         D ]A  \  }}t        | |      rt        | ||        t        | d      si | _        || j                  |<   C | j	                         r/| j                         }| j                         j                  |       | S )a*  Set the parameters of this estimator.  Modification of the sklearn method to
        allow unknown kwargs. This allows using the full range of xgboost
        parameters that are not defined as member variables in sklearn grid
        search.

        Returns
        -------
        self

        r   )r   r   setattrr   r   get_xgb_paramsr  	set_param)rI   r  keyvaluer[   s        r5   
set_paramszXGBModel.set_params(  s     K !,,.JCtS!c5)tX."$DK#(C  ) %%',,.J((4r4   Tdeepc                    t         |   |      }t        j                  |       }|j                  j                  d   |_        |j                  |j                  j                  ||             t        | d      r5t        | j                  t              r|j                  | j                         t        |d   t        j                  j                        rD|d   j                  t        j                  t        j                        j                         |d<   |S t        |d   t        j                  j"                        rKt%        |d   j'                  t        j                  t        j                        j                               |d<   |S )zGet parameters.r   r   r   )super
get_paramscopy	__class__	__bases__updater   
isinstancer   dictr\   randomRandomStaterandintiinfoint32max	Generatorry   integers)rI   r  r  cpr  s       r5   r  zXGBModel.get_paramsG  s"    #D)YYt_||--a0bll--b$784"z$++t'DMM$++&f^,bii.C.CD%+N%;%C%C"&&&F>"  ~.		0C0CD%(~&//0B0F0FG&F>" r4   c                     | j                         }h d}i }|j                         D ]  \  }}||vst        |      r|||<    |S )z Get xgboost specific parameters.>   r   r   r7   r   r   r   r   r6   )r  r   callable)rI   r  wrapper_specificfilteredkvs         r5   r  zXGBModel.get_xgb_paramsb  sP    !%!2	
 LLNDAq((! # r4   c                 >    | j                   t        S | j                   S )z+Gets the number of xgboost boosting rounds.)r   DEFAULT_N_ESTIMATORSr   s    r5   get_num_boosting_roundsz XGBModel.get_num_boosting_roundsx  s    '+'8'8'@#WdFWFWWr4   c                 H    t        | d      st        d      | j                  S )Nr2   zT`_estimator_type` undefined.  Please use appropriate mixin to define estimator type.)r   	TypeErrorr2   r   s    r5   	_get_typezXGBModel._get_type|  s.    t./I  ###r4   fnamec                    i }| j                         |d<   t        j                  |      }| j                         j	                  |       | j                         j                  |       | j                         j	                  d        y )Nr2   scikit_learn)r+  jsondumpsr  set_attr
save_model)rI   r,  r   meta_strs       r5   r3  zXGBModel.save_model  sr    !"&.."2::d####:%%e,###6r4   c                    | j                         st        d| j                  i      | _        | j	                         j                  |       | j	                         j                  d      }|[t        j                  |      }|j                  dd       }|2|| j                         k7  rt        d| j                          d|       | j	                         j                  | _        | j	                         j                  d        t        j                  | j	                         j                               }| j                  |       y )Nrg   r/  r2   z5Loading an estimator with different type. Expecting: z, got: r.  )r   r   rg   r   r  
load_modelattrr0  loadsr   r+  r*  r   r2  save_config_load_model_attributes)rI   r,  r4  r   tconfigs         r5   r6  zXGBModel.load_model  s   ))+#Xt{{$;<DM%%e,##%**>:::h'D*D1A}dnn&6!6K~~'(s4 
 "--/==###6D,,.::<=##F+r4   r<  c                 :   ddl m} | j                         }|d   d   d   | _        |d   d   d   | _        |d   d   d   | _        |j                  | _         ||       r<t        |d   d   d	         | _        | j                  d
k  rd
n| j                  | _        yy)z/Load model attributes without hyper-parameters.r   )is_classifierlearnerr   r   gradient_boosterlearner_model_paramr   	num_classrk   N)	sklearn.baser>  r  r   r   r   r   ry   
n_classes_)rI   r<  r>  r   s       r5   r:  zXGBModel._load_model_attributes  s    .""$	*;7?i();<VD +,AB<P$22!&"34I"J;"WXDO#'??Q#6aDOODO r4   c                    t        |t              r|j                         }n|}dt        ddfd}dt        ddfd}d}| j                  |t        | j                        rJ| j                         dk(  r!t        | j                  | j                        }n3t        | j                        }n|j                  d| j                  i       |j                  dd      }| j                  r|d	k(  rt        d
      |||fS )z(Configure parameters for :py:meth:`fit`.	parameterr8   Nc                 F    t        j                  d|  d|  dt               y )N`zQ` in `fit` method is deprecated for better compatibility with scikit-learn, use `z(` in constructor or`set_params` instead.)warningswarnUserWarningrF  s    r5   _deprecatedz,XGBModel._configure_fit.<locals>._deprecated  s.    MMI; ++4+ 6 	r4   c                      t        d|  d      )Nz2 different `zD` are provided.  Use the one in constructor or `set_params` instead.)rT   rL  s    r5   _duplicatedz,XGBModel._configure_fit.<locals>._duplicated  s     	{ ++ + r4   r-   r   r<   exactzYExperimental support for categorical data is not implemented for current tree method yet.)r  r   r  rd   r   r!  r+  r   rg   rf   r  r   r   rT   )rI   r   r  r   rM  rO  metricr<   s           r5   _configure_fitzXGBModel._configure_fit  s     gx(3:3F3F3HEE	3 	4 		3 	4 	 $('(()>>#x/1$2B2BDKKPF.t/?/?@F}d.>.>?@jj5""{g'=,  ff$$r4   r   c                     t        | j                        r5| j                  dk7  r&	 t        di ||| j                  | j
                  dS t        di |d| j                  iS # t        $ r Y $w xY w)Ngblinear)r   nthreadr   rU  r3   )rA   r<   r   r"   rg   r   r*  r   )rI   r   r   s      r5   _create_dmatrixzXGBModel._create_dmatrix  sv    (()dllj.H& "%t{{DLL 
 5555  s   $A$ $	A0/A0evals_resultc                 v    |r7t        t        t        t        t        t        t           f   f   |      | _        y y rH   )r   r   rd   r   re   evals_result_rI   rW  s     r5   _set_evaluation_resultzXGBModel._set_evaluation_result  s1    !%d3S$u+5E0F+F&G!VD r4   rF   r   r   verbose	xgb_modelr   r   r   r   r   rF   r   r   r]  r^  r   r   r   c                v   t        | j                        5  i }t        di d| j                  d|d|ddddd|d	|d
|
d|d|d|	ddddd| j                  d| j
                  d| j                  \  }}| j                         }t        | j                        rt        | j                        }d|d<   nd}| j                  ||      \  }}}t        ||| j                         || j                  |||||| j                        | _        | j#                  |       | cddd       S # 1 sw Y   yxY w)a	  Fit gradient boosting model.

        Note that calling ``fit()`` multiple times will cause the model object to be
        re-fit from scratch. To resume training from a previous checkpoint, explicitly
        pass ``xgb_model`` argument.

        Parameters
        ----------
        X :
            Feature matrix. See :ref:`py-data` for a list of supported types.

            When the ``tree_method`` is set to ``hist``, internally, the
            :py:class:`QuantileDMatrix` will be used instead of the :py:class:`DMatrix`
            for conserving memory. However, this has performance implications when the
            device of input data is not matched with algorithm. For instance, if the
            input is a numpy array on CPU but ``cuda`` is used for training, then the
            data is first processed on CPU then transferred to GPU.
        y :
            Labels
        sample_weight :
            instance weights
        base_margin :
            Global bias for each instance. See :doc:`/tutorials/intercept` for details.
        eval_set :
            A list of (X, y) tuple pairs to use as validation sets, for which
            metrics will be computed.
            Validation metrics will help us track the performance of the model.

        verbose :
            If `verbose` is True and an evaluation set is used, the evaluation metric
            measured on the validation set is printed to stdout at each boosting stage.
            If `verbose` is an integer, the evaluation metric is printed at each
            `verbose` boosting stage. The last boosting stage / the boosting stage found
            by using `early_stopping_rounds` is also printed.
        xgb_model :
            file name of stored XGBoost model or 'Booster' instance XGBoost model to be
            loaded before training (allows training continuation).
        sample_weight_eval_set :
            A list of the form [L_1, L_2, ..., L_n], where each L_i is an array like
            object storing instance weights for the i-th validation set.
        base_margin_eval_set :
            A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
            object storing base margin for the i-th validation set.
        feature_weights :
            Weight for each feature, defines the probability of each feature being
            selected when colsample is being used.  All values must be greater than 0,
            otherwise a `ValueError` is thrown.

        r   r   r   r   r   Nr   rF   r   r   r   r   r   r   r   r   r   r   reg:squarederrorr   r   r6   rW  objcustom_metricverbose_evalr^  r7   r3   )r   r   r   r   rV  r   r   r  r!  r   r^   rR  r*   r(  r6   r7   r   r[  )rI   r   r   rF   r   r   r]  r^  r   r   r   rW  r   r   r  rc  r   rQ  s                     r5   fitzXGBModel.fit  s   B dnn568L#< $$$ $ 	$
 $ ,$ ($ !0$ "$ (>$ &:$  $ $  $33$ $(#:#:$  #00!$ M5$ ((*F'+?+O&8{#$($7$7	6$J!E66!,,.&*&@&@)$$..DM ''5Y 655s   DD//D8c                 $    | j                   dk7  ryy)NrT  TFr   r   s    r5   _can_use_inplace_predictz!XGBModel._can_use_inplace_predicte  s    <<:%r4   iteration_rangec                     ||d   dk(  r	 d| j                   dz   f}| j                  dk(  rd}|S # t        $ r d}Y  w xY w)Nr   r   )r   r   rT  )best_iterationAttributeErrorr   )rI   rj  s     r5   _get_iteration_rangezXGBModel._get_iteration_rangej  s_     "oa&8A&=)#$d&9&9A&="> <<:%$O	 " )"()s   0 >>output_marginvalidate_featuresc           	      0   t        | j                        5  | j                  |      }| j                         r_	 | j	                         j                  |||rdnd| j                  ||      }t        |      rddl}|j                  |      }|cddd       S t        ||| j                  | j                  | j                  | j                        }| j	                         j                  ||||      cddd       S # t        $ r Y pw xY w# 1 sw Y   yxY w)	a  Predict with `X`.  If the model is trained with early stopping, then
        :py:attr:`best_iteration` is used automatically. The estimator uses
        `inplace_predict` by default and falls back to using :py:class:`DMatrix` if
        devices between the data and the estimator don't match.

        .. note:: This function is only thread safe for `gbtree` and `dart`.

        Parameters
        ----------
        X :
            Data to predict with.
        output_margin :
            Whether to output the raw untransformed margin value.
        validate_features :
            When this is True, validate that the Booster's and data's feature_names are
            identical.  Otherwise, it is assumed that the feature_names are the same.
        base_margin :
            Global bias for each instance. See :doc:`/tutorials/intercept` for details.
        iteration_range :
            Specifies which layer of trees are used in prediction.  For example, if a
            random forest is trained with 100 rounds.  Specifying ``iteration_range=(10,
            20)``, then only the forests built during [10, 20) (half open set) rounds
            are used in this prediction.

            .. versionadded:: 1.4.0

        Returns
        -------
        prediction

        r`  marginr  )r   rj  predict_typer   r   rp  r   N)r   r   rU  r   r   )r   rj  ro  rp  )r   r   rn  ri  r  inplace_predictr   r(   cupyasnumpyr*  r   rg   r   r   predict)	rI   r   ro  rp  r   rj  predtsru  tests	            r5   rw  zXGBModel.predictw  s   N dnn5"77HO,,.!--/??(71>XG $$/*; @ F &f-#!%f!5!! 65* '"00#'#:#:D ##%-- /+"3	 . ; 65" ! # 65s0   "DAC=AD=	D	DD		DDc                    t        | j                        5  | j                  |      }t        || j                  | j
                  | j                        }| j                         j                  |d|      cddd       S # 1 sw Y   yxY w)a  Return the predicted leaf every tree for each sample. If the model is trained
        with early stopping, then :py:attr:`best_iteration` is used automatically.

        Parameters
        ----------
        X : array_like, shape=[n_samples, n_features]
            Input features matrix.

        iteration_range :
            See :py:meth:`predict`.

        Returns
        -------
        X_leaves : array_like, shape=[n_samples, n_trees]
            For each datapoint x in X and for each tree, return the index of the
            leaf x ends up in. Leaves are numbered within
            ``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.

        r`  )r   r   rU  T)	pred_leafrj  N)	r   r   rn  r   r   r   rg   r  rw  )rI   r   rj  test_dmatrixs       r5   applyzXGBModel.apply  su    0 dnn5"77HO""00	L ##%--o .  655s   A BB
c                 N    t        | dd      | j                  }|S t        d      )a  Return the evaluation results.

        If **eval_set** is passed to the :py:meth:`fit` function, you can call
        ``evals_result()`` to get evaluation results for all passed **eval_sets**.  When
        **eval_metric** is also passed to the :py:meth:`fit` function, the
        **evals_result** will contain the **eval_metrics** passed to the :py:meth:`fit`
        function.

        The returned evaluation result is a dictionary:

        .. code-block:: python

            {'validation_0': {'logloss': ['0.604835', '0.531479']},
             'validation_1': {'logloss': ['0.41965', '0.17686']}}

        Returns
        -------
        evals_result

        rY  Nz=No evaluation result, `eval_set` is not used during training.)getattrrY  r#   rZ  s     r5   rW  zXGBModel.evals_result  s8    * 4$/;--L 	 O r4   c                 B    | j                         }|j                         S )z.Number of features seen during :py:meth:`fit`.)r  num_features)rI   r   s     r5   n_features_in_zXGBModel.n_features_in_  s!     ""$##%%r4   c                 z    | j                         j                  }|t        d      t        j                  |      S )z~Names of features seen during :py:meth:`fit`.  Defined only when `X` has
        feature names that are all strings.

        zT`feature_names_in_` is defined only when `X` has feature names that are all strings.)r  feature_namesrm  r\   array)rI   r  s     r5   feature_names_in_zXGBModel.feature_names_in_
  sA     ((*88  #  xx&&r4   c                 6    | j                         j                  S )z*The best score obtained by early stopping.)r  
best_scorer   s    r5   r  zXGBModel.best_score  s     !,,,r4   c                 6    | j                         j                  S )zThe best iteration obtained by early stopping.  This attribute is 0-based,
        for instance if the best iteration is the first round, then best_iteration is 0.

        )r  rl  r   s    r5   rl  zXGBModel.best_iteration  s     !000r4   c                      j                         }dt        f fd}|j                   j                  r j                  n |             }|j                  &t         j                        D cg c]  }d| 	 }}n|j                  }|D cg c]  }|j                  |d       }}t        j                  |t        j                        }|j                         }	|	dk(  r|S ||	z  S c c}w c c}w )au  Feature importances property, return depends on `importance_type`
        parameter. When model trained with multi-class/multi-label/multi-target dataset,
        the feature importance is "averaged" over all targets. The "average" is defined
        based on the importance type. For instance, if the importance type is
        "total_gain", then the score is sum of loss change for each split from all
        trees.

        Returns
        -------
        feature_importances_ : array of shape ``[n_features]`` except for multi-class
        linear model, which returns an array with shape `(n_features, n_classes)`

        r8   c                  *     j                   dk(  rdS dS )NrT  rb   gainrh  r   s   r5   dftz*XGBModel.feature_importances_.<locals>.dft6  s    #||z98EvEr4   )r   r   g        dtyper   )r  rd   	get_scorer   r  r|   r  r   r\   r  float32sum)
rI   br  scorerl   r  r   all_featuresall_features_arrtotals
   `         r5   feature_importances_zXGBModel.feature_importances_%  s     %%'	FS 	F 484H4HD00ce  
 ??"<A$BUBU<V*W<VqQqc7<VM*WOOM3@A=a		!S)=A88L

C $$&A:##%'' +X Bs   0C+C0c                    | j                         d   dk7  rt        d| j                         | j                         }t	        j
                  t        j                  |j                  d      d         d         }t        | dd	      }|I|d
kD  rDt        |j                        dk(  sJ |j                  d   |z  dk(  sJ |j                  |df      }|S )a  
        Coefficients property

        .. note:: Coefficients are defined only for linear learners

            Coefficients are only defined when the linear model is chosen as
            base learner (`booster=gblinear`). It is not defined for other base
            learner types, such as tree learners (`booster=gbtree`).

        Returns
        -------
        coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
        r   rT  z.Coefficients are not defined for Booster type r0  dump_formatr   rb   rD  Nrk   r   )r  rm  r   r  r\   r  r0  r8  get_dumpr  r   shapereshape)rI   r  coef	n_classess       r5   coef_zXGBModel.coef_H  s      +z9 @O  xx

1::&:#A!#DEhOPD,5	 1}4::!+++zz!}y0A555||YO4r4   c                    | j                         d   }| j                         }|dk7  r]t        j                  |j	                               }|d   d   d   }t        j                  t        |      gt
        j                        S t        j                  t        j                  |j                  d      d	         d
   t
        j                        S )zIntercept (bias) property

        For tree-based model, the returned value is the `base_score`.

        Returns
        -------
        intercept_ : array of shape ``(1,)`` or ``[n_classes]``

        r   rT  r?  rA  r   r  r0  r  r   bias)
r  r  r0  r8  r9  r\   r  re   r  r  )rI   booster_configr  r<  	intercepts        r5   
intercept_zXGBModel.intercept_f  s     ,,.y9Z'ZZ0Fy)*?@NI88U9-.bjjAAxxJJqzzfz5a89&A
 	
r4   )TFTNNrH   )Br.   r/   r0   r\   nanr	   ry   rd   re   SklObjectiver   r  r  r  r   r   boolr   r   r   r   r   r   r   r   r   r  r  r  r  r(  r+  rz   PathLiker3  r1   r   r6  r  r:  r   r    rR  r   rV  EvalsLogr[  r$   r   rf  ri  r   rn  rw  r]   r}  rW  propertyr  r  r  rl  r  r  r  __classcell__r  s   @r5   r   r     s    $($(!%%))-&*#'"&!%%) $!%,0*.%))-,0-1,0%)&*,0&* +/EIQU)- $.2#(04+/+/(,AE/36:UZ!C=Z! SMZ! #	Z!
 c]Z!  Z! smZ! C=Z!  Z! #Z! c]Z! Z! Z! #5/Z! !Z!  E?!Z!" "##Z!$ #5/%Z!& $E?'Z!( #5/)Z!* E?+Z!, UO-Z!. #5//Z!0 UO1Z!2 "))'')<)<cAB
3Z!8 9Z!: $C=;Z!< 'uT#s(^S-@'AB=Z!> "*%Xhsm5L0L*M!N?Z!@ "#AZ!B CZ!D &d^EZ!F !GZ!H  -IZ!J $C=KZ!L $C=MZ!N !OZ!P eCcH$<=>QZ!R  (}SZ!T D!123UZ!V WZ!X 
YZ!xDdO )t )W 3 : >t tCH~ 6S#X ,X X$3 $7c2;;&6 7 7D 7 &00889J, ,D ,* &00889JLT Ld L".%%S 89:.% S#X.% 
wZ/01S#X	
	.%`	68G#4 	6 	6 	6W3C3L3L WQU W   .2+/DH.2?C@D>B/3ll l
  	*l i(l 8E)Y*>$?@Al %c	*+l E'3
":;<l !)))< =l 'x	':;l "),l 
l  l\$ 
'7	  $"&+/48II I  	I
 i(I ".1I 
I\ 59"" ".1" 
	"Hd3S$u+-=(>#>? < & & &
 '2:: ' ' -E - - 1 1 1  (bjj  (  (D rzz  : 
BJJ 
 
r4   r   PredtT)boundr  
predictionvstackc                 .   t        |j                        dk  sJ t        |j                        dk(  r|j                  d   | k(  r|S t        |j                        dk(  r| dk(  r|j                  d   | k\  r|S |}d|z
  } |||f      j                         S )Nrk   r   rn   )r   r  	transpose)r  r  r  classone_probsclasszero_probss        r5   _cls_predict_probar    s    z A%%%
:!j&6&6q&9Y&FJ"NQ9, NN*O?N34>>@@r4   zBImplementation of the scikit-learn API for XGBoost classification.r   r   zE
    n_estimators : Optional[int]
        Number of boosting rounds.
)r   c                   >    e Zd Zedddededdf fd       Zdeee	f   f fdZ
edddd	ddddd
dededee   dee   deeeeef         deee	ef      deeeeef      deee      deee      dee   dd fd       Zej&                  j(                  J ej&                  j(                  j+                  ddd      e_        	 	 	 	 ddede	de	dee   dee   def fdZ	 	 	 d dede	dee   dee   dej2                  f
 fdZedej2                  fd       Z xZS )!XGBClassifierbinary:logisticr   r   r   r8   Nc                (    t        |   dd|i| y Nr   r3   r  r   rI   r   r   r  s      r5   r   zXGBClassifier.__init__  s     	7977r4   c                 .    t         |          }d|d<   |S )NT
multilabelr  r   rI   r   r  s     r5   r   zXGBClassifier._more_tags  s    w!#!\r4   Tr\  r   r   rF   r   r   r]  r^  r   r   r   c                   t        | j                        5  i }t        |      st        |      rKdd l}|j                  |j                        }t        |      | _        |j                  | j                        }nt        |      rAdd l}|j                  |      }t        |      | _        |j                  | j                        }nDt        j
                  t        j                  |            }t        |      | _        | j                  }|j                  |j                  k7  s||k(  j                         st!        d| d|       | j#                         }t%        | j&                        rt)        | j&                        }d|d<   nd }| j                  dkD  r)|j+                  dd       dk7  rd	|d<   | j                  |d
<   | j-                  ||      \  }}}t/        di d| j0                  d|d|dd dd d|d|d|
d|d|d|	dd dd d| j2                  d| j4                  d| j6                  \  }}t9        ||| j;                         || j<                  |||||| j>                        | _         t%        | j&                        s
|d   | _        | jC                  |       | cd d d        S # 1 sw Y   y xY w)Nr`  r   z?Invalid classes inferred from unique values of `y`.  Expected: r   r  r   rk   multi:softmaxzmulti:softprobrB  r   r   r   r   r   rF   r   r   r   r   r   r   r   r   r   r   rb  r3   )"r   r   r&   r'   ru  uniquevaluesr   rD  r  classes_r(   r\   asarrayr  r   rT   r  r!  r   r^   r   rR  r   r   rV  r   r   r*   r(  r6   r7   r   r[  )rI   r   r   rF   r   r   r]  r^  r   r   r   rW  r  classesexpected_classesr  rc  r   rQ  r   r   s                        r5   rf  zXGBClassifier.fit  s     dnn568L 1~a!))AHH-"%g,#%88DMM#: "!))A,"%g,#%88DMM#: ))BJJqM2"%g,#'== !1!7!77#3388: !!1 2&	C 
 ((*F'+?+O&7{#"::k40OC*:F;'&*oo{#$($7$7	6$J!E66#< $$$ $ 	$
 $ ,$ ($ !0$ "$ (>$ &:$  $ $  $33$ $(#:#:$  #00!$ M5& ",,.&*&@&@)$$..DM DNN+!'!4''5g 655s   JJ44J=zFit gradient boosting modelz Fit gradient boosting classifierr   ro  rp  rj  c                    t        | j                        5  t        |   |||||      }|r|cd d d        S t	        |j
                        dkD  r'| j                  dk7  rt        j                  |d      }nt	        |j
                        dkD  r:|j
                  d   dk7  r(t        j                  |j
                        }d||dkD  <   nb| j                  dk(  r(|j                  t        j                        cd d d        S t        j                  d|j
                  d         }d||dkD  <   |cd d d        S # 1 sw Y   y xY w)	Nr`  )r   ro  rp  r   rj  r   rk   axisg      ?r  r   )r   r   r  rw  r   r  rD  r\   argmaxzerosr   astyper  repeat)	rI   r   ro  rp  r   rj  class_probscolumn_indexesr  s	           r5   rw  zXGBClassifier.predict  s    dnn5'/+"3' / * K " 65 ;$$%)doo.B-/YY{-K[&&'!+0A0A!0D0I!#+*;*;!<45{S01?2"))"((3) 65. "$1k.?.?.B!C45{S01!5 655s   D8B>D8,D88Ec                     | j                   dk(  r#t        | 	  ||||d      }t        |d      }|S t        | 	  ||||      }t	        | j
                  |t        j                        S )a  Predict the probability of each `X` example being of a given class. If the
        model is trained with early stopping, then :py:attr:`best_iteration` is used
        automatically. The estimator uses `inplace_predict` by default and falls back to
        using :py:class:`DMatrix` if devices between the data and the estimator don't
        match.

        .. note:: This function is only thread safe for `gbtree` and `dart`.

        Parameters
        ----------
        X :
            Feature matrix. See :ref:`py-data` for a list of supported types.
        validate_features :
            When this is True, validate that the Booster's and data's feature_names are
            identical.  Otherwise, it is assumed that the feature_names are the same.
        base_margin :
            Global bias for each instance. See :doc:`/tutorials/intercept` for details.
        iteration_range :
            Specifies which layer of trees are used in prediction.  For example, if a
            random forest is trained with 100 rounds.  Specifying `iteration_range=(10,
            20)`, then only the forests built during [10, 20) (half open set) rounds are
            used in this prediction.

        Returns
        -------
        prediction :
            a numpy array of shape array-like of shape (n_samples, n_classes) with the
            probability of each data example being of a given class.

        r  T)r   rp  r   rj  ro  r   r  )r   rp  r   rj  )r   r  rw  r   r  rD  r\   r  )	rI   r   rp  r   rj  	raw_predt
class_probr  r  s	           r5   predict_probazXGBClassifier.predict_proba8  s    T >>_,"3' /" ( I !3Jgo/#+	 & 
 "$//;		JJr4   c                 @    t        j                  | j                        S rH   )r\   arangerD  r   s    r5   r  zXGBClassifier.classes_t  s    yy))r4   r  )TNN)r.   r/   r0   r$   r  r   r   r   rd   r  r   r   r	   r   r   r   ry   r   r   rf  r1   replacer   rw  r\   r]   r  r  r  r  r  s   @r5   r  r    sT      #48  8 	8
 
8  8DdO 
   .2+/DH.2=A@D>B/3bb b
  	*b i(b 8E)Y*>$?@Ab %c	*+b E'3"89:b !)))< =b 'x	':;b "),b 
b  bH <<+++,,&&..%'I1CK $"&+/48"""" ""  	""
 i("" ".1"" 
""N #'+/48:K:K  :K i(	:K
 ".1:K 
:Kx *"** * *r4   r  z:scikit-learn API for XGBoost random forest classification.zS
    n_estimators : Optional[int]
        Number of trees in random forest to fit.
c                   (    e Zd Zeddddddedededed	ef
 fd
       Zdeeef   f fdZ	de
fdZeddddddddddededee   dee   deeeeef         deeee
f      deeeeef      deee      deee      dee   dd f fd       Z xZS )XGBRFClassifierrn   皙?h㈵>r   r   r   r   r   r   r   r   r   c                n    t        |   d||||d| t        | j                  | j                         y Nr  r3   r  r   r;   r6   r7   rI   r   r   r   r   r   r  s         r5   r   zXGBRFClassifier.__init__  C     	 	
'-!		

 	
 	455t~~Fr4   r8   c                 F    t         |          }t         | 	         |d<   |S Nr   r  r  r(  rI   r  r  s     r5   r  zXGBRFClassifier.get_xgb_params  (    ')&+g&E&G"#r4   c                      yNr   r3   r   s    r5   r(  z'XGBRFClassifier.get_num_boosting_rounds      r4   NTr\  r   r   rF   r   r   r]  r^  r   r   r   c                    t               j                         D ci c]  \  }}|dvs|| }}}t        | j                  | j                         t        |   di | | S c c}}w N)rI   r  r3   localsr   r;   r6   r7   r  rf  rI   r   r   rF   r   r   r]  r^  r   r   r   r$  r%  argsr  s                 r5   rf  zXGBRFClassifier.fit  b     "(!1T!1AQ>S5S1!1T455t~~Fd U
   A#A#r.   r/   r0   r$   re   r   r   r   rd   r  ry   r(  r   r	   r   r   r   r  r   r   rf  r  r  s   @r5   r  r  y  sn       #"% G G 	G
  G G G  G$S#X 
    .2+/DH.2=A@D>B/3 
  	* i( 8E)Y*>$?@A %c	*+ E'3"89: !)))< = 'x	':; "), 
  r4   r  z>Implementation of the scikit-learn API for XGBoost regression.c                   V     e Zd Zedddededdf fd       Zdeee	f   f fdZ
 xZS )	XGBRegressorra  r  r   r   r8   Nc                (    t        |   dd|i| y r  r  r  s      r5   r   zXGBRegressor.__init__  s     	7977r4   c                 8    t         |          }d|d<   d|d<   |S )NTmultioutputFmultioutput_onlyr  r  s     r5   r   zXGBRegressor._more_tags  s*    w!#"]#( r4   )r.   r/   r0   r$   r  r   r   r   rd   r  r   r  r  s   @r5   r  r    sK      +=8(8IL8	8  8
DdO  r4   r  z6scikit-learn API for XGBoost random forest regression.c                   ,    e Zd Zeddddddedededed	ed
df fd       Zd
eeef   f fdZ	d
e
fdZeddddddddddededee   dee   deeeeef         deeee
f      deeeeef      deee      deee      dee   d
d f fd       Z xZS )XGBRFRegressorrn   r  r  r  r   r   r   r   r   r8   Nc                n    t        |   d||||d| t        | j                  | j                         y r  r  r  s         r5   r   zXGBRFRegressor.__init__  r  r4   c                 F    t         |          }t         | 	         |d<   |S r  r  r  s     r5   r  zXGBRFRegressor.get_xgb_params  r  r4   c                      yr  r3   r   s    r5   r(  z&XGBRFRegressor.get_num_boosting_rounds  r  r4   Tr\  r   r   rF   r   r   r]  r^  r   r   r   c                    t               j                         D ci c]  \  }}|dvs|| }}}t        | j                  | j                         t        |   di | | S c c}}w r  r  r  s                 r5   rf  zXGBRFRegressor.fit  r  r  r  r  s   @r5   r   r     sy       #"% G G 	G
  G G G 
G  G$S#X 
    .2+/DH.2=A@D>B/3 
  	* i( 8E)Y*>$?@A %c	*+ E'3"89: !)))< = 'x	':; "), 
  r4   r   c                     t        |       st        |       r<t        | d      r0|t        d      | j                  }| j                  dd      } | |fS | |fS )z,Get the special qid column from X if exists.r   ziFound both the special column `qid` in `X` and the `qid` from the`fit` method. Please remove one of them.r   r  )r)   r&   r   rT   r   drop)r   r   q_xs      r5   _get_qidr    sa     	aKN50A?;  eeFF5qF!#vc6Mr4   zImplementation of the Scikit-Learn API for XGBoost Ranking.

See :doc:`Learning to Rank </tutorials/learning_to_rank>` for an introducion.

    r   aE  
        .. note::

            A custom objective function is currently not supported by XGBRanker.

        .. note::

            Query group information is only required for ranking training but not
            prediction. Multiple groups can be predicted on a single call to
            :py:meth:`predict`.

        When fitting the model with the `group` parameter, your data need to be sorted
        by the query group first. `group` is an array that contains the size of each
        query group.

        Similarly, when fitting the model with the `qid` parameter, the data should be
        sorted according to query index and `qid` is an array that contains the query
        index for each training sample.

        For example, if your original data look like:

        +-------+-----------+---------------+
        |   qid |   label   |   features    |
        +-------+-----------+---------------+
        |   1   |   0       |   x_1         |
        +-------+-----------+---------------+
        |   1   |   1       |   x_2         |
        +-------+-----------+---------------+
        |   1   |   0       |   x_3         |
        +-------+-----------+---------------+
        |   2   |   0       |   x_4         |
        +-------+-----------+---------------+
        |   2   |   1       |   x_5         |
        +-------+-----------+---------------+
        |   2   |   1       |   x_6         |
        +-------+-----------+---------------+
        |   2   |   1       |   x_7         |
        +-------+-----------+---------------+

        then :py:meth:`fit` method can be called with either `group` array as ``[3, 4]``
        or with `qid` as ``[1, 1, 1, 2, 2, 2, 2]``, that is the qid column.  Also, the
        `qid` can be a special column of input `X` instead of a separated parameter, see
        :py:meth:`fit` for more info.)r   c                        e Zd Zedddedef fd       Zdee   de	de	ded	ef
 fd
Z
edddddddddddddde	de	dee	   dee	   dee	   dee	   deeee	e	f         deee	      deee	      deeeef      deeeeef      deee	      deee	      dee	   d	d fd       Z	 	 	 	 d"de	dededee	   dee   d	e	f fdZ	 d#de	dee   d	e	f fd Zde	de	d	efd!Z xZS )$	XGBRankerz	rank:ndcgr  r   r   c                    t        |   dd|i| t        | j                        rt	        d      d|vrt	        d      y )Nr   z4custom objective function not supported by XGBRankerzrank:z%please use XGBRanker for ranking taskr3   )r  r   r!  r   rT   r  s      r5   r   zXGBRanker.__init__G  sI    7977DNN#STT)#DEE $r4   r   r   r   r8   c                     t        ||      \  }}|j                  dd       |t        d      t        |   d|||d|S )Nr   z4Either `group` or `qid` is required for ranking task)r   r   r   r3   )r  r   rT   r  rV  )rI   r   r   r   r   r  s        r5   _create_ltr_dmatrixzXGBRanker._create_ltr_dmatrixO  sR     T3'	c::gt$,STTw&M3TsMfMMr4   NF)r   r   rF   r   r   r   r   r]  r^  r   r   r   r   r   r   rF   r   r   r   r   r]  r^  r   r   r   c                $   t        | j                        5  t        di d| j                  d|d|d|d|d|d|d	|d
|d|d|d|d|	d| j                  d| j
                  d| j                  \  }}i }| j                         }| j                  ||      \  }}}t        ||| j                         | j                  ||||
|| j                  
      | _        |d   | _        | j                  |       | cddd       S # 1 sw Y   yxY w)a  Fit gradient boosting ranker

        Note that calling ``fit()`` multiple times will cause the model object to be
        re-fit from scratch. To resume training from a previous checkpoint, explicitly
        pass ``xgb_model`` argument.

        Parameters
        ----------
        X :
            Feature matrix. See :ref:`py-data` for a list of supported types.

            When this is a :py:class:`pandas.DataFrame` or a :py:class:`cudf.DataFrame`,
            it may contain a special column called ``qid`` for specifying the query
            index. Using a special column is the same as using the `qid` parameter,
            except for being compatible with sklearn utility functions like
            :py:func:`sklearn.model_selection.cross_validation`. The same convention
            applies to the :py:meth:`XGBRanker.score` and :py:meth:`XGBRanker.predict`.

            +-----+----------------+----------------+
            | qid | feat_0         | feat_1         |
            +-----+----------------+----------------+
            | 0   | :math:`x_{00}` | :math:`x_{01}` |
            +-----+----------------+----------------+
            | 1   | :math:`x_{10}` | :math:`x_{11}` |
            +-----+----------------+----------------+
            | 1   | :math:`x_{20}` | :math:`x_{21}` |
            +-----+----------------+----------------+

            When the ``tree_method`` is set to ``hist``, internally, the
            :py:class:`QuantileDMatrix` will be used instead of the :py:class:`DMatrix`
            for conserving memory. However, this has performance implications when the
            device of input data is not matched with algorithm. For instance, if the
            input is a numpy array on CPU but ``cuda`` is used for training, then the
            data is first processed on CPU then transferred to GPU.
        y :
            Labels
        group :
            Size of each query group of training data. Should have as many elements as
            the query groups in the training data.  If this is set to None, then user
            must provide qid.
        qid :
            Query ID for each training sample.  Should have the size of n_samples.  If
            this is set to None, then user must provide group or a special column in X.
        sample_weight :
            Query group weights

            .. note:: Weights are per-group for ranking tasks

                In ranking task, one weight is assigned to each query group/id (not each
                data point). This is because we only care about the relative ordering of
                data points within each group, so it doesn't make sense to assign
                weights to individual data points.

        base_margin :
            Global bias for each instance. See :doc:`/tutorials/intercept` for details.
        eval_set :
            A list of (X, y) tuple pairs to use as validation sets, for which
            metrics will be computed.
            Validation metrics will help us track the performance of the model.
        eval_group :
            A list in which ``eval_group[i]`` is the list containing the sizes of all
            query groups in the ``i``-th pair in **eval_set**.
        eval_qid :
            A list in which ``eval_qid[i]`` is the array containing query ID of ``i``-th
            pair in **eval_set**. The special column convention in `X` applies to
            validation datasets as well.

        verbose :
            If `verbose` is True and an evaluation set is used, the evaluation metric
            measured on the validation set is printed to stdout at each boosting stage.
            If `verbose` is an integer, the evaluation metric is printed at each
            `verbose` boosting stage. The last boosting stage / the boosting stage found
            by using `early_stopping_rounds` is also printed.
        xgb_model :
            file name of stored XGBoost model or 'Booster' instance XGBoost model to be
            loaded before training (allows training continuation).
        sample_weight_eval_set :
            A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
            group weights on the i-th validation set.

            .. note:: Weights are per-group for ranking tasks

                In ranking task, one weight is assigned to each query group (not each
                data point). This is because we only care about the relative ordering of
                data points within each group, so it doesn't make sense to assign
                weights to individual data points.
        base_margin_eval_set :
            A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
            object storing base margin for the i-th validation set.
        feature_weights :
            Weight for each feature, defines the probability of each feature being
            selected when colsample is being used.  All values must be greater than 0,
            otherwise a `ValueError` is thrown.

        r`  r   r   r   r   r   rF   r   r   r   r   r   r   r   r   r   r   )num_boost_roundr6   r   rW  rd  re  r^  r7   r   Nr3   )r   r   r   r   r  r   r   r  rR  r*   r(  r6   r7   r   r   r[  )rI   r   r   r   r   rF   r   r   r   r   r]  r^  r   r   r   r   r   rW  r  r   rQ  s                        r5   rf  zXGBRanker.fitY  si   f dnn5#< $$$ $ 	$
 $ ,$ ($ !0$ "$ (>$ &:$ &$ "$  $77$ $(#:#:$  #00!$ M5& 79L((*F$($7$7	6$J!E66! $ < < >&*&@&@)$$..DM $K0DN''5S 655s   C%DDro  rp  rj  c                 J    t        |d       \  }}t        | 	  |||||      S )N)rj  )r  r  rw  )rI   r   ro  rp  r   rj  _r  s          r5   rw  zXGBRanker.predict  s:     4 1w+  
 	
r4   c                 B    t        |d       \  }}t        | 	  ||      S rH   )r  r  r}  )rI   r   rj  r  r  s       r5   r}  zXGBRanker.apply  s&    
 4 1w}Q00r4   c           	         t        |d      \  }}t        |||| j                  | j                  | j                  | j
                        }t        | j                        rEt        | j                  | j                        }| j                         j                  |dfg|      }n| j                         j                  |      }t        |      }|d   d   S )a  Evaluate score for data using the last evaluation metric. If the model is
        trained with early stopping, then :py:attr:`best_iteration` is used
        automatically.

        Parameters
        ----------
        X : Union[pd.DataFrame, cudf.DataFrame]
          Feature matrix. A DataFrame with a special `qid` column.

        y :
          Labels

        Returns
        -------
        score :
          The result of the first evaluation metric for the ranker.

        N)r   r   r   rU  r   eval)fevalr  r   )r  r   r   r   rg   r   r!  r   r   r  r   r  r%   )rI   r   r   r   XyqrQ  
result_strmetric_scores           r5   r  zXGBRanker.score  s    & !T"3 LL#66KK,,
 D$$%)$*:*:DKKHF))+44sFm_F4SJ))+005J&z2B""r4   r  rH   )r.   r/   r0   r$   rd   r   r   r	   r   r   r  r   r   r   r  ry   r   r   rf  r   rw  r}  re   r  r  r  s   @r5   r
  r
    sU   j  +6 FS F# F  FNG$N,5N<ENQTN	N   &*#'-1+/DH4826.3=A@D>B/3![[ [
 	"[ i [  	*[ i([ 8E)Y*>$?@A[ Xi01[ 8I./[ %c	*+[ E'3"89:[ !)))< =[ 'x	':;[  "),![" 
#[  [@ $"&+/48

 
  	

 i(
 ".1
 

( 5911 ".11 
	1&#y &#Y &#5 &#r4   r
  )NN)Zr1   r  r0  rz   rI  concurrent.futuresr   inspectr   typingr   r   r   r   r	   r
   r   r   r   r   r   r   numpyr\   scipy.specialr   _typingr   r   r   r   r   callbackr   compatr   r   r   r   r<  r   corer   r   r    r!   r"   r#   r$   r%   r   r&   r'   r(   r)   trainingr*   r,   ry   r;   rd   r  rA   rC   r]   rU   r  r^   rf   r   r   r   re   r  r  r  r   r   r   r   r'  r   r  r  r  r  r  r   r  r
  r3   r4   r5   <module>r#     s	   1   	  1      ! S S & Y X "	 	 	 K J  
#C=
!123
 

=hsm = =*H * I.bjj"**6L0MMNc=,>?@3u]L%@A 3i 3lH  $(x (# (6 (VSM? #e e  e3-e!e
 3-e
!e }oee C=/e"e( e_%)e(&)e0 #1e0 1e8 9e8	9eJ sm_KeJKeR C=/SeR"Se` sm_ae`aen e_oenoex !(yex)ye@ uo&Ae@'AeH %!IeH"IeP  }oQeP&Qe` !(ae`)aeh "%)ieh*iep !(qep)qex %!yex"ye@ 5/"Ae@#AeH !(IeH)IeN 5/"OeN#OeV U299#8#8")):M:Ms#RSTUWeVVWer !ser'sez %U4S>3+>%?@A{ezB{eD  (c4c
3C.C(DEFEeDGEeT sm_UeT	%Uef sm_gefger $D>*ser+seB l+,CeB-CeP "#QeP(Qej "#kej(ke@ sm_Ae@%AeZ E#tCy(":;<=[eZ!>[e\ &c]O]e\,]eJ $/012KeJ3Ker sm_serseN	 2 '+"	2292 sm2 sm	2
 tfdl2jg g 
g  g  C=	g 
 
#g  C=g  #g  c]g  xc3h01g  %Xc]3g  #8C=1g  #'g  x}%g  g  g   L)!g " 3U38_%%&#g T   =(I
| I
	I
X 
	,A# A6 A8 APV A$ Hk\*H/ \*\*~ @k0m 00f D(8- 	 <k0\ 00f	*
9hy))*  
 7*)2fq#. q#g2fq#r4   