
    >[gǿ                       d Z ddlZddlmZmZ ddlmZmZ ddlm	Z	m
Z
mZ ddlZddlmZ ddlmZ dd	lmZmZmZmZmZmZ dd
lmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z*m+Z+ ddl,m-Z-m.Z.m/Z/ ddl0m1Z1 ddl2m3Z3m4Z4 ddl5m6Z6m7Z7 ddl8m9Z9m:Z:m;Z;m<Z<m=Z= ddl>m?Z?m@Z@ g dZA ej                  ej                        j                  ZEd ZFd ZGd ZH	 	 	 	 d.dZI G d dee?e      ZJd ZK G d  d!eeJe      ZL G d" d#eeJe      ZM G d$ d%eL      ZN G d& d'eM      ZO G d( d)eL      ZP G d* d+eM      ZQ G d, d-eeJ      ZRy)/a2  
Forest of trees-based ensemble methods.

Those methods include random forests and extremely randomized trees.

The module structure is the following:

- The ``BaseForest`` base class implements a common ``fit`` method for all
  the estimators in the module. The ``fit`` method of the base ``Forest``
  class calls the ``fit`` method of each sub-estimator on random samples
  (with replacement, a.k.a. bootstrap) of the training set.

  The init of the sub-estimator is further delegated to the
  ``BaseEnsemble`` constructor.

- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
  implement the prediction logic by computing an average of the predicted
  outcomes of the sub-estimators.

- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
  classes provide the user with concrete implementations of
  the forest ensemble method using classical, deterministic
  ``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
  sub-estimator implementations.

- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
  classes provide the user with concrete implementations of the
  forest ensemble method using the extremely randomized trees
  ``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
  sub-estimator implementations.

Single and multi-output problems are both handled.
    N)ABCMetaabstractmethod)IntegralReal)catch_warningssimplefilterwarn)hstack)issparse   )ClassifierMixinMultiOutputMixinRegressorMixinTransformerMixin_fit_contextis_classifier)DataConversionWarning)accuracy_scorer2_score)OneHotEncoder)BaseDecisionTreeDecisionTreeClassifierDecisionTreeRegressorExtraTreeClassifierExtraTreeRegressor)DOUBLEDTYPE)check_random_statecompute_sample_weight)Interval
RealNotInt
StrOptions)get_tags)check_classification_targetstype_of_target)Paralleldelayed)_check_feature_names_in_check_sample_weight_num_samplescheck_is_fittedvalidate_data   )BaseEnsemble_partition_estimators)RandomForestClassifierRandomForestRegressorExtraTreesClassifierExtraTreesRegressorRandomTreesEmbeddingc                     || S t        |t              r$|| kD  rd}t        |j                  | |            |S t        |t              rt        t        | |z        d      S y)av  
    Get the number of samples in a bootstrap sample.

    Parameters
    ----------
    n_samples : int
        Number of samples in the dataset.
    max_samples : int or float
        The maximum number of samples to draw from the total available:
            - if float, this indicates a fraction of the total and should be
              the interval `(0.0, 1.0]`;
            - if int, this indicates the exact number of samples;
            - if None, this indicates the total number of samples.

    Returns
    -------
    n_samples_bootstrap : int
        The total number of samples to draw for the bootstrap sample.
    Nz6`max_samples` must be <= n_samples={} but got value {}r-   )
isinstancer   
ValueErrorformatr   maxround)	n_samplesmax_samplesmsgs      S/var/www/html/bid-api/venv/lib/python3.12/site-packages/sklearn/ensemble/_forest.py_get_n_samples_bootstrapr?   \   si    ( +x("JCSZZ	;?@@+t$5[01155 %    c                 b    t        |       }|j                  d||t        j                        }|S )z=
    Private function used to _parallel_build_trees function.r   dtype)r   randintnpint32)random_stater;   n_samples_bootstraprandom_instancesample_indicess        r>   _generate_sample_indicesrK   }   s9     )6O$,,	9) - N r@   c                     t        | ||      }t        j                  ||      }|dk(  }t        j                  |      }||   }|S )z=
    Private function used to forest._set_oob_score function.	minlengthr   )rK   rE   bincountarange)rG   r;   rH   rJ   sample_countsunsampled_maskindices_rangeunsampled_indicess           r>   _generate_unsampled_indicesrU      sQ     .i!4N KK)DM"a'NIIi(M%n5r@   c                 D   |dkD  rt        d|dz   |fz         |r|j                  d   }|'t        j                  |ft        j                        }n|j                         }t        | j                  ||	      }t        j                  ||      }||z  }|dk(  r5t               5  t        dt               |t        d	||
      z  }ddd       n|dk(  r|t        d||
      z  }| j                  |||d|
       | S | j                  |||d|
       | S # 1 sw Y   9xY w)z<
    Private function used to fit a single tree in parallel.r-   zbuilding tree %d of %dr   NrB   rM   	subsampleignoreauto)indicesbalanced_subsamplebalancedF)sample_weightcheck_inputmissing_values_in_feature_mask)printshaperE   onesfloat64copyrK   rG   rO   r   r   DeprecationWarningr   _fit)tree	bootstrapXyr]   tree_idxn_treesverboseclass_weightrH   r_   r;   curr_sample_weightrZ   rQ   s                  r>   _parallel_build_treesrp      sB    {&(Q,)@@AGGAJ	 !#)RZZ!H!.!3!3!5*y*=
 GyAm+;&!X'9:"&;FAw&WW" "! 11"7
Aw"WW		,+I 	 	
  K 			'+I 	 	
 K- "!s   %"DDc                   F    e Zd ZU dZ eeddd      gdgdegedgdgdgdgd eed	d
d       eeddd      gdZe	e
d<   e	 d e       ddddddddd	 fd       Zd Zd Z ed      d d       Zed d       Zd Zd Zd Zed        Zd Zed        Z fdZ xZS )!
BaseForestz
    Base class for forests of trees.

    Warning: This class should not be used directly. Use derived classes
    instead.
    r-   NleftclosedbooleanrG   rm                 ?right)n_estimatorsrh   	oob_scoren_jobsrG   rm   
warm_startr<   _parameter_constraintsFr   	estimator_paramsrh   r{   r|   rG   rm   r}   rn   r<   c       	             t         |   |||       || _        || _        || _        || _        || _        |	| _        |
| _        || _	        y )N)	estimatorrz   r   )
super__init__rh   r{   r|   rG   rm   r}   rn   r<   selfr   rz   r   rh   r{   r|   rG   rm   r}   rn   r<   	__class__s               r>   r   zBaseForest.__init__   s]      	%- 	 	
 #"($(&r@   c                     | j                         t        | j                  | j                  d      fd| j                  D              }t        j                  |      j                  S )a^  
        Apply trees in the forest to X, return leaf indices.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        X_leaves : ndarray of shape (n_samples, n_estimators)
            For each datapoint x in X and for each tree in the forest,
            return the index of the leaf x ends up in.
        threadsr|   rm   preferc              3   X   K   | ]!  } t        |j                        d        # ywFr^   N)r'   apply.0rg   ri   s     r>   	<genexpr>z#BaseForest.apply.<locals>.<genexpr>  s*     	Q@P
'$**
aU
3
3@P   '*)_validate_X_predictr&   r|   rm   estimators_rE   arrayT)r   ri   resultss    ` r>   r   zBaseForest.apply  sb    " $$Q'
(;;LL
 
R@P@P	Q	R xx """r@   c                    | j                         t        | j                  | j                  d      fd| j                  D              }dg}|j                  |D cg c]  }|j                  d    c}       t        j                  |      j                         }t        |      j                         |fS c c}w )ab  
        Return the decision path in the forest.

        .. versionadded:: 0.18

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        indicator : sparse matrix of shape (n_samples, n_nodes)
            Return a node indicator matrix where non zero elements indicates
            that the samples goes through the nodes. The matrix is of CSR
            format.

        n_nodes_ptr : ndarray of shape (n_estimators + 1,)
            The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
            gives the indicator value for the i-th estimator.
        r   r   c              3   X   K   | ]!  } t        |j                        d        # ywr   )r'   decision_pathr   s     r>   r   z+BaseForest.decision_path.<locals>.<genexpr>=  s0      

( (GD&&'u==(r   r   r-   )r   r&   r|   rm   r   extendra   rE   r   cumsumsparse_hstacktocsr)r   ri   
indicatorsn_nodesin_nodes_ptrs    `    r>   r   zBaseForest.decision_path   s    0 $$Q'
X;;LL


((

	

 #J7Jq
J78hhw'..0Z(..0+== 8s    B;Tprefer_skip_nested_validationc           	      	   
 t              rt        d      t         ddt        d      \   t	         j
                         j                        }|j                   j                  j                        
t              t              rj                          t        j                        j                  d	k(  r$j                  d
   d
k(  rt!        dt"        d	       j                  d
k(  rt        j$                  d       j                  dk(  rFt        j&                  dk        rt        d      t        j(                        dk  rt        d      j                  \   _         _         j/                        \  }t1        dd      t2        k7  sj4                  j6                  st        j8                  t2              |
|z  n| j:                  s j<                  t        d       j:                  r%t?        j                  d    j<                        nd _          jC                           j:                  s jD                  rt        d      tG         jH                        } jJ                  rtM         d      sg  _'         jP                  tS         jN                        z
  }|dk  r-t        d jP                  tS         jN                        fz        |dk(  rt!        d       nӉ jJ                  rBtS         jN                        dkD  r*|jU                  tV        tS         jN                               tY        |      D cg c]  } j[                  d|       c} t]         j^                   j`                  d      
 fdtc              D               jN                  je                          jD                  r|dkD  stM         d      sutg              }	|	d k(  sti               r|	d!k(  rt        d"|	 d#      tk         jD                        r jm                   jD                  $       n jm                         tM         d%      r7 j,                  d
k(  r( jn                  d    _7         jp                  d    _8         S c c}w )&a]  
        Build a forest of trees from the training set (X, y).

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The training input samples. Internally, its dtype will be converted
            to ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csc_matrix``.

        y : array-like of shape (n_samples,) or (n_samples, n_outputs)
            The target values (class labels in classification, real numbers in
            regression).

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted. Splits
            that would create child nodes with net zero or negative weight are
            ignored while searching for a split in each node. In the case of
            classification, splits are also ignored if they would result in any
            single class carrying a negative weight in either child node.

        Returns
        -------
        self : object
            Fitted estimator.
        z3sparse multilabel-indicator for y is not supported.TcscF)multi_outputaccept_sparserC   ensure_all_finite	criterion)estimator_nameNr   r-   zA column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().)
stacklevel)r-   poissonr   zLSome value(s) of y are negative which is not allowed for Poisson regression.zLSum of y is not strictly positive which is necessary for Poisson regression.rC   rB   zl`max_sample` cannot be set if `bootstrap=False`. Either switch to `bootstrap=True` or set `max_sample=None`.)r;   r<   z6Out of bag estimation only available if bootstrap=Truer   zTn_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==TruezJWarm-start fitting without increasing n_estimators does not fit new trees.size)appendrG   r   r   c              3      K   | ]N  \  }} t        t              |j                  	|t              j                  j
                          P yw))rm   rn   rH   r_   N)r'   rp   rh   lenrm   rn   )
r   r   tri   r_   rH   r]   r   treesrj   s
      r>   r   z!BaseForest.fit.<locals>.<genexpr>  sf       -DAq /-.NN!J LL!%!2!2(;3Q  -s   AA
oob_score_unknownzmulticlass-multioutputz@The type of target cannot be used to compute OOB estimates. Got zv while only the following are supported: continuous, continuous-multioutput, binary, multiclass, multilabel-indicator.)scoring_functionclasses_)9r   r7   r,   r   typer   r   '_compute_missing_values_in_feature_maskr   __name__r)   sort_indicesrE   
atleast_1dndimra   r	   r   reshapeanysum
_n_samples
n_outputs__validate_y_class_weightgetattrr   flags
contiguousascontiguousarrayrh   r<   r?   _n_samples_bootstrap_validate_estimatorr{   r   rG   r}   hasattrr   rz   r   rD   MAX_INTrange_make_estimatorr&   r|   rm   	enumerater   r%   r   callable_set_oob_score_and_attributes
n_classes_r   )r   ri   rj   r]   r   expanded_class_weightrG   n_more_estimatorsr   y_typer_   rH   r   s   ````      @@@r>   fitzBaseForest.fitH  sw   : A;RSS#
1 )D(4>>B	==$.."9"9 >  	' $0BMA; NNMM!66Q;1771:?? & 66Q; 

1g&A>>Y&vva!e} :  vvayA~ ; 
 ,-77(#'#@#@#C  1gt$.agg6H6H$$Qf5A ,( -0E E 5~~$"2"2">% 
 ^^":''!*$2B2B# #'$7!  "~~$..UVV)$*;*;<gdM&B!D --D4D4D0EEq <$$c$*:*:&;<=  !#!
 3t'7'7#81#< $$W3t7G7G3H$I 011A $$E$M1EH{{   &e,	E, ##E*>>!|)D#A&F"d#2J(J !&&,X .88  '22q4>> 3  221a8 4$A)="ooa0DO MM!,DMCs   1S c                      y)a  Compute and set the OOB score and attributes.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The data matrix.
        y : ndarray of shape (n_samples, n_outputs)
            The target matrix.
        scoring_function : callable, default=None
            Scoring function for OOB score. Default depends on whether
            this is a regression (R2 score) or classification problem
            (accuracy score).
        N r   ri   rj   r   s       r>   r   z(BaseForest._set_oob_score_and_attributes  s    r@   c                    t        |      r|j                         }|j                  d   }| j                  }t	        |       rt        | d      r|| j                  d   |f}n|d|f}t        j                  |t        j                        }t        j                  ||ft        j                        }t        || j                        }| j                  D ]R  }	t        |	j                  ||      }
| j!                  |	||
ddf         }||
dfxx   |z  cc<   ||
ddfxx   dz  cc<   T t#        |      D ]B  }|dk(  j%                         rt'        dt(               d||dk(  <   |d|fxx   |d|gf   z  cc<   D |S )	a  Compute and set the OOB score.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The data matrix.
        y : ndarray of shape (n_samples, n_outputs)
            The target matrix.

        Returns
        -------
        oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or                 (n_samples, 1, n_outputs)
            The OOB predictions.
        r   r   r-   )ra   rC   rB   N.zvSome inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.)r   r   ra   r   r   r   r   rE   zerosrc   int64r?   r<   r   rU   rG   _get_oob_predictionsr   r   r	   UserWarning)r   ri   rj   r;   	n_outputsoob_pred_shapeoob_pred
n_oob_predrH   r   rT   y_predks                r>   _compute_oob_predictionsz#BaseForest._compute_oob_predictions/  s   " A;	AGGAJ	OO	74#> ();YGN
 (I6N88.

CXXy)4BHHE
6
 ))I ;&&#! ..y!<Mq<P:QRF&+,6,(!+,1, * y!Aa$$&%   /0
:?+S!V
38 44 " r@   c                 
    |d fS Nr   )r   rj   s     r>   r   z#BaseForest._validate_y_class_weightq  s    $wr@   c                 T   t        |        | j                  d   j                  |      rd}nd}t        | |t        dd|      }t        |      rY|j                  j                  t        j                  k7  s'|j                  j                  t        j                  k7  rt        d      |S )zH
        Validate X whenever one tries to predict, apply, predict_proba.r   z	allow-nanTcsrF)rC   r   resetr   z3No support for np.int64 index based sparse matrices)r+   r   _support_missing_valuesr,   r   r   rZ   rC   rE   intcindptrr7   )r   ri   r   s      r>   r   zBaseForest._validate_X_predictu  s     	A66q9 + $/
 A;AIIOOrww6!((..BGG:SRSSr@   c                 R   t        |         t        | j                  d      d | j                  D              }|s/t	        j
                  | j                  t        j                        S t	        j                  |dt        j                        }|t	        j                  |      z  S )a  
        The impurity-based feature importances.

        The higher, the more important the feature.
        The importance of a feature is computed as the (normalized)
        total reduction of the criterion brought by that feature.  It is also
        known as the Gini importance.

        Warning: impurity-based feature importances can be misleading for
        high cardinality features (many unique values). See
        :func:`sklearn.inspection.permutation_importance` as an alternative.

        Returns
        -------
        feature_importances_ : ndarray of shape (n_features,)
            The values of this array sum to 1, unless all trees are single node
            trees consisting of only the root node, in which case it will be an
            array of zeros.
        r   )r|   r   c              3   z   K   | ]3  }|j                   j                  d kD  r t        t              |d       5 yw)r-   feature_importances_N)tree_
node_countr'   r   )r   rg   s     r>   r   z2BaseForest.feature_importances_.<locals>.<genexpr>  s:      I
(zz$$q( GGT#9:(s   9;rB   r   )axisrC   )
r+   r&   r|   r   rE   r   n_features_in_rc   meanr   )r   all_importancess     r>   r   zBaseForest.feature_importances_  s    * 	H($++iH I
((I
 
 88D//rzzBB''/L!888r@   c              #     K   | j                   D ]o  }| j                  s2t        j                  | j                  t        j
                         A|j                  }t        || j                  | j                         q y w)NrB   )	r   rh   rE   rP   r   rF   rG   rK   r   )r   rg   seeds      r>   _get_estimators_indicesz"BaseForest._get_estimators_indices  sc     $$D>>iirxx@@
 (( /$//4+D+D  %s   B Bc                 H    | j                         D cg c]  }| c}S c c}w )a  The subset of drawn samples for each base estimator.

        Returns a dynamically generated list of indices identifying
        the samples used for fitting each member of the ensemble, i.e.,
        the in-bag samples.

        Note: the list is re-created at each call to the property in order
        to reduce the object memory footprint by not storing the sampling
        data. Thus fetching the property may be slower than expected.
        )r   )r   rJ   s     r>   estimators_samples_zBaseForest.estimators_samples_  s'     6:5Q5Q5ST5S>5STTTs   	c                     t         |          } t        | j                        | j                        }t        |      j                  j                  |j                  _        |S )Nr   )r   __sklearn_tags__r   r   r   r#   
input_tags	allow_nan)r   tagsr   r   s      r>   r   zBaseForest.__sklearn_tags__  sM    w') )D(4>>B	$,Y$7$B$B$L$L!r@   d   r   )r   
__module____qualname____doc__r    r   r   r!   r~   dict__annotations__r   tupler   r   r   r   r   r   r   r   r   propertyr   r   r   r   __classcell__r   s   @r>   rr   rr      s6    "(AtFCD[*T"'(; kZc':Xq$v6
$D   '
 ' '<#4&>P 5T 6Tl  @D*  9  9D  U U r@   rr   )	metaclassc                      | |d      }|5  t        |      dk(  r|dxx   |z  cc<   n)t        t        |            D ]  }||xx   ||   z  cc<    ddd       y# 1 sw Y   yxY w)z
    This is a utility function for joblib's Parallel.

    It can't go locally in ForestClassifier or ForestRegressor, because joblib
    complains that it cannot pickle it when placed there.
    Fr   r-   r   N)r   r   )predictri   outlock
predictionr   s         r>   _accumulate_predictionr    sZ     .J	s8q=Fj F3s8_A*Q-' %	 
s   AAA%c                        e Zd ZdZe	 d e       ddddddddd	 fd       Zed        Zd fd	Z	d	 Z
d
 Zd Zd Z fdZ xZS )ForestClassifierz
    Base class for forest of trees-based classifiers.

    Warning: This class should not be used directly. Use derived classes
    instead.
    FNr   r   c       	         :    t         |   |||||||||	|
|       y )Nr   rz   r   rh   r{   r|   rG   rm   r}   rn   r<   r   r   r   s               r>   r   zForestClassifier.__init__  s8      	%-%!%# 	 	
r@   c                     | j                  |d      }t        j                  |      }|j                  dk(  r|dt        j                  f   }|S t        j
                  |dd      }|S )a  Compute the OOB predictions for an individual tree.

        Parameters
        ----------
        tree : DecisionTreeClassifier object
            A single decision tree classifier.
        X : ndarray of shape (n_samples, n_features)
            The OOB samples.

        Returns
        -------
        y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
            The OOB associated predictions.
        Fr   r   .r      )r   start)predict_probarE   asarrayr   newaxisrollaxisrg   ri   r   s      r>   r   z%ForestClassifier._get_oob_predictions
  se      ##A5#9F#;;!CO,F  [[aq9Fr@   c                    t         |   ||      | _        | j                  j                  d   dk(  r!| j                  j	                  d      | _        |t
        } ||t        j                  | j                  d            | _        y)a  Compute and set the OOB score and attributes.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The data matrix.
        y : ndarray of shape (n_samples, n_outputs)
            The target matrix.
        scoring_function : callable, default=None
            Scoring function for OOB score. Defaults to `accuracy_score`.
        r   r-   r   N)	r   r   oob_decision_function_ra   squeezer   rE   argmaxr   r   ri   rj   r   r   s       r>   r   z.ForestClassifier._set_oob_score_and_attributes&  s}     ',g&Fq!&L#&&,,R0A5*.*E*E*M*MSU*M*VD'#-*ryy441=
r@   c                 X   t        |       t        j                  |      }d }| j                  t        j                  |      }g | _        g | _        t        j                  |j                  t              }t        | j                        D ]m  }t        j                  |d d |f   d      \  }|d d |f<   | j                  j                  |       | j
                  j                  |j                  d          o |}| j                  d}t        | j                  t              r=| j                  |vrt        d| j                  z        | j                   rt#        d       | j                  dk7  s| j$                  s*| j                  dk(  rd	}n| j                  }t'        |      }||fS )
NrB   T)return_inverser   )r\   r[   zVValid presets for class_weight include "balanced" and "balanced_subsample".Given "%s".a  class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.r[   r\   )r$   rE   rd   rn   r   r   r   ra   intr   r   uniquer   r6   strr7   r}   r	   rh   r   )	r   rj   r   
y_originaly_store_unique_indicesr   	classes_kvalid_presetsrn   s	            r>   r   z)ForestClassifier._validate_y_class_weight>  s   $Q'GGAJ $(J!#!''!=t'A68ii!Q$73I-ad3 MM  +OO""9??1#56 ( #(>M$++S1$$M9$&(,(9(9: 
 ??	2   $88$$(<<#-L#'#4#4L(=lJ(W%'''r@   c                    | j                  |      }| j                  dk(  r2| j                  j                  t	        j
                  |d      d      S |d   j                  d   }| j                  d   j                  }t	        j                  || j                  f|      }t        | j                        D ]A  }| j                  |   j                  t	        j
                  ||   d      d      |dd|f<   C |S )a  
        Predict class for X.

        The predicted class of an input sample is a vote by the trees in
        the forest, weighted by their probability estimates. That is,
        the predicted class is the one with highest mean probability
        estimate across the trees.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
            The predicted classes.
        r-   r$  r   rB   N)
r  r   r   takerE   r'  ra   rC   emptyr   )r   ri   probar;   
class_typepredictionsr   s          r>   r  zForestClassifier.predicts  s    * ""1%??a==%%biiA&>Q%GG aq)Iq)//J((It#?zRK4??+$(MM!$4$9$9IIeAhQ/a %: %AqD! ,
 r@   c                 F   t        |        | j                        t        | j                  | j                        \  }}}t        j                  | j                        D cg c]6  }t        j                  j                  d   |ft
        j                        8 c}t        j                          t        || j                  d      fd| j                  D               D ]  }|t!        | j                        z  } t!              dk(  rd   S S c c}w )a|  
        Predict class probabilities for X.

        The predicted class probabilities of an input sample are computed as
        the mean predicted class probabilities of the trees in the forest.
        The class probability of a single tree is the fraction of samples of
        the same class in a leaf.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        p : ndarray of shape (n_samples, n_classes), or a list of such arrays
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute :term:`classes_`.
        r   rB   	sharedmemr|   rm   requirec              3   b   K   | ]&  } t        t              |j                         ( y wr   )r'   r  r  )r   eri   	all_probar  s     r>   r   z1ForestClassifier.predict_proba.<locals>.<genexpr>  s1      K
% ,G*+AOOQ	4P%s   ,/r-   )r+   r   r/   rz   r|   rE   r   r   r   ra   rc   	threadingLockr&   rm   r   r   )r   ri   r|   _jr5  r>  r  s    `    @@r>   r  zForestClassifier.predict_proba  s    , 	$$Q' -T->->L1
 ]]4??3
3 HHaggaj!_BJJ73
	 ~~JkJ K
%%K
 	

 ES))**E  y>QQ<!
s   %;Dc                     | j                  |      }| j                  dk(  rt        j                  |      S t	        | j                        D ]  }t        j                  ||         ||<    |S )a'  
        Predict class log-probabilities for X.

        The predicted class log-probabilities of an input sample is computed as
        the log of the mean predicted class probabilities of the trees in the
        forest.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        p : ndarray of shape (n_samples, n_classes), or a list of such arrays
            The class probabilities of the input samples. The order of the
            classes corresponds to that in the attribute :term:`classes_`.
        r-   )r  r   rE   logr   )r   ri   r5  r   s       r>   predict_log_probaz"ForestClassifier.predict_log_proba  s`    * ""1%??a66%=  4??+66%(+a , Lr@   c                 F    t         |          }d|j                  _        |S )NT)r   r   classifier_tagsmulti_label)r   r  r   s     r>   r   z!ForestClassifier.__sklearn_tags__  s#    w')+/(r@   r  r   )r   r  r  r  r   r  r   staticmethodr   r   r   r  r  rE  r   r  r  s   @r>   r  r    s      

 
 
:  6
03(j%N.`@ r@   r  c                   v     e Zd ZdZe	 d e       dddddddd fd       Zd Zed        Z	d fd		Z
d
 Z xZS )ForestRegressorz
    Base class for forest of trees-based regressors.

    Warning: This class should not be used directly. Use derived classes
    instead.
    FNr   )r   rh   r{   r|   rG   rm   r}   r<   c                8    t         |   |||||||||	|

       y )N)	rz   r   rh   r{   r|   rG   rm   r}   r<   r  )r   r   rz   r   rh   r{   r|   rG   rm   r}   r<   r   s              r>   r   zForestRegressor.__init__  s5     	%-%!# 	 	
r@   c                 J   t        |        | j                        t        | j                  | j                        \  }}}| j
                  dkD  r?t        j                  j                  d   | j
                  ft        j                        n2t        j                  j                  d   t        j                        t        j                          t        || j                  d      fd| j                  D               t        | j                        z  S )a  
        Predict regression target for X.

        The predicted regression target of an input sample is computed as the
        mean predicted regression targets of the trees in the forest.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Internally, its dtype will be converted to
            ``dtype=np.float32``. If a sparse matrix is provided, it will be
            converted into a sparse ``csr_matrix``.

        Returns
        -------
        y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
            The predicted values.
        r-   r   rB   r9  r:  c              3   d   K   | ]'  } t        t              |j                  g       ) y wr   )r'   r  r  )r   r=  ri   r  y_hats     r>   r   z*ForestRegressor.predict.<locals>.<genexpr>6  s3      K
% ,G*+AIIq5'4H%s   -0)r+   r   r/   rz   r|   r   rE   r   ra   rc   r?  r@  r&   rm   r   r   )r   ri   r|   rA  r  rO  s    `  @@r>   r  zForestRegressor.predict  s    & 	$$Q' -T->->L1 ??QHHaggaj$//:"**MEHHaggaj<E ~~JkJ K
%%K
 	

 	T%%&&r@   c                     | j                  |d      }|j                  dk(  r(|ddt        j                  t        j                  f   }|S |ddt        j                  ddf   }|S )a  Compute the OOB predictions for an individual tree.

        Parameters
        ----------
        tree : DecisionTreeRegressor object
            A single decision tree regressor.
        X : ndarray of shape (n_samples, n_features)
            The OOB samples.

        Returns
        -------
        y_pred : ndarray of shape (n_samples, 1, n_outputs)
            The OOB associated predictions.
        Fr   r-   N)r  r   rE   r   r"  s      r>   r   z$ForestRegressor._get_oob_predictions?  s^      aU3;;!Arzz2::56F  Arzz1,-Fr@   c                 
   t         |   ||      j                  d      | _        | j                  j                  d   dk(  r!| j                  j                  d      | _        |t
        } ||| j                        | _        y)a{  Compute and set the OOB score and attributes.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The data matrix.
        y : ndarray of shape (n_samples, n_outputs)
            The target matrix.
        scoring_function : callable, default=None
            Scoring function for OOB score. Defaults to `r2_score`.
        r-   r$  r   N)r   r   r&  oob_prediction_ra   r   r   r(  s       r>   r   z-ForestRegressor._set_oob_score_and_attributesX  s}      %w?1EMMSTMU%%b)Q.#'#7#7#?#?R#?#HD #'*1d.B.BCr@   c                 |   t        j                  |t        d      }t        j                  |t         j                  d      }t        j                  |j
                  d   t         j                  d      }| j                  D ]  }|j                  j                  |||       ! |t        | j                        z  }|S )a_  Fast partial dependence computation.

        Parameters
        ----------
        grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE
            The grid points on which the partial dependence should be
            evaluated.
        target_features : ndarray of shape (n_target_features), dtype=np.intp
            The set of target features for which the partial dependence
            should be evaluated.

        Returns
        -------
        averaged_predictions : ndarray of shape (n_samples,)
            The value of the partial dependence function on each grid point.
        C)rC   orderr   )ra   rC   rU  )rE   r  r   intpr   ra   rc   r   r   compute_partial_dependencer   )r   gridtarget_featuresaveraged_predictionsrg   s        r>   %_compute_partial_dependence_recursionz5ForestRegressor._compute_partial_dependence_recursionn  s    " zz$e37**_BGG3O!xx**Q-rzz 
 $$D JJ11o'; % 	D$4$4 55##r@   r  r   )r   r  r  r  r   r  r   r  rI  r   r   r[  r  r  s   @r>   rK  rK    sg      

 
 
6)V  0D, $r@   rK  c                        e Zd ZU dZi ej
                  ej
                  d eddh      ee	dgiZee
d<   ej                  d       	 dddd	d
ddddddddddddddd fdZ xZS )r0   a5  
    A random forest classifier.

    A random forest is a meta estimator that fits a number of decision tree
    classifiers on various sub-samples of the dataset and uses averaging to
    improve the predictive accuracy and control over-fitting.
    Trees in the forest use the best split strategy, i.e. equivalent to passing
    `splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeClassifier`.
    The sub-sample size is controlled with the `max_samples` parameter if
    `bootstrap=True` (default), otherwise the whole dataset is used to build
    each tree.

    For a comparison between tree-based ensemble models see the example
    :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`.

    Read more in the :ref:`User Guide <forest>`.

    Parameters
    ----------
    n_estimators : int, default=100
        The number of trees in the forest.

        .. versionchanged:: 0.22
           The default value of ``n_estimators`` changed from 10 to 100
           in 0.22.

    criterion : {"gini", "entropy", "log_loss"}, default="gini"
        The function to measure the quality of a split. Supported criteria are
        "gini" for the Gini impurity and "log_loss" and "entropy" both for the
        Shannon information gain, see :ref:`tree_mathematical_formulation`.
        Note: This parameter is tree-specific.

    max_depth : int, default=None
        The maximum depth of the tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int or float, default=2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` are the minimum
          number of samples for each split.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_samples_leaf : int or float, default=1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` are the minimum
          number of samples for each node.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_weight_fraction_leaf : float, default=0.0
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_features : {"sqrt", "log2", None}, int or float, default="sqrt"
        The number of features to consider when looking for the best split:

        - If int, then consider `max_features` features at each split.
        - If float, then `max_features` is a fraction and
          `max(1, int(max_features * n_features_in_))` features are considered at each
          split.
        - If "sqrt", then `max_features=sqrt(n_features)`.
        - If "log2", then `max_features=log2(n_features)`.
        - If None, then `max_features=n_features`.

        .. versionchanged:: 1.1
            The default of `max_features` changed from `"auto"` to `"sqrt"`.

        Note: the search for a split does not stop until at least one
        valid partition of the node samples is found, even if it requires to
        effectively inspect more than ``max_features`` features.

    max_leaf_nodes : int, default=None
        Grow trees with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, default=0.0
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.

        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.

        .. versionadded:: 0.19

    bootstrap : bool, default=True
        Whether bootstrap samples are used when building trees. If False, the
        whole dataset is used to build each tree.

    oob_score : bool or callable, default=False
        Whether to use out-of-bag samples to estimate the generalization score.
        By default, :func:`~sklearn.metrics.accuracy_score` is used.
        Provide a callable with signature `metric(y_true, y_pred)` to use a
        custom metric. Only available if `bootstrap=True`.

    n_jobs : int, default=None
        The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
        :meth:`decision_path` and :meth:`apply` are all parallelized over the
        trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
        context. ``-1`` means using all processors. See :term:`Glossary
        <n_jobs>` for more details.

    random_state : int, RandomState instance or None, default=None
        Controls both the randomness of the bootstrapping of the samples used
        when building trees (if ``bootstrap=True``) and the sampling of the
        features to consider when looking for the best split at each node
        (if ``max_features < n_features``).
        See :term:`Glossary <random_state>` for details.

    verbose : int, default=0
        Controls the verbosity when fitting and predicting.

    warm_start : bool, default=False
        When set to ``True``, reuse the solution of the previous call to fit
        and add more estimators to the ensemble, otherwise, just fit a whole
        new forest. See :term:`Glossary <warm_start>` and
        :ref:`tree_ensemble_warm_start` for details.

    class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts,             default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        Note that for multioutput (including multilabel) weights should be
        defined for each class of every column in its own dict. For example,
        for four-class multilabel classification weights should be
        [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
        [{1:1}, {2:5}, {3:1}, {4:1}].

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

        The "balanced_subsample" mode is the same as "balanced" except that
        weights are computed based on the bootstrap sample for every tree
        grown.

        For multi-output, the weights of each column of y will be multiplied.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    ccp_alpha : non-negative float, default=0.0
        Complexity parameter used for Minimal Cost-Complexity Pruning. The
        subtree with the largest cost complexity that is smaller than
        ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
        :ref:`minimal_cost_complexity_pruning` for details. See
        :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
        for an example of such pruning.

        .. versionadded:: 0.22

    max_samples : int or float, default=None
        If bootstrap is True, the number of samples to draw from X
        to train each base estimator.

        - If None (default), then draw `X.shape[0]` samples.
        - If int, then draw `max_samples` samples.
        - If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus,
          `max_samples` should be in the interval `(0.0, 1.0]`.

        .. versionadded:: 0.22

    monotonic_cst : array-like of int of shape (n_features), default=None
        Indicates the monotonicity constraint to enforce on each feature.
          - 1: monotonic increase
          - 0: no constraint
          - -1: monotonic decrease

        If monotonic_cst is None, no constraints are applied.

        Monotonicity constraints are not supported for:
          - multiclass classifications (i.e. when `n_classes > 2`),
          - multioutput classifications (i.e. when `n_outputs_ > 1`),
          - classifications trained on data with missing values.

        The constraints hold over the probability of the positive class.

        Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.

        .. versionadded:: 1.4

    Attributes
    ----------
    estimator_ : :class:`~sklearn.tree.DecisionTreeClassifier`
        The child estimator template used to create the collection of fitted
        sub-estimators.

        .. versionadded:: 1.2
           `base_estimator_` was renamed to `estimator_`.

    estimators_ : list of DecisionTreeClassifier
        The collection of fitted sub-estimators.

    classes_ : ndarray of shape (n_classes,) or a list of such arrays
        The classes labels (single output problem), or a list of arrays of
        class labels (multi-output problem).

    n_classes_ : int or list
        The number of classes (single output problem), or a list containing the
        number of classes for each output (multi-output problem).

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    n_outputs_ : int
        The number of outputs when ``fit`` is performed.

    feature_importances_ : ndarray of shape (n_features,)
        The impurity-based feature importances.
        The higher, the more important the feature.
        The importance of a feature is computed as the (normalized)
        total reduction of the criterion brought by that feature.  It is also
        known as the Gini importance.

        Warning: impurity-based feature importances can be misleading for
        high cardinality features (many unique values). See
        :func:`sklearn.inspection.permutation_importance` as an alternative.

    oob_score_ : float
        Score of the training dataset obtained using an out-of-bag estimate.
        This attribute exists only when ``oob_score`` is True.

    oob_decision_function_ : ndarray of shape (n_samples, n_classes) or             (n_samples, n_classes, n_outputs)
        Decision function computed with out-of-bag estimate on the training
        set. If n_estimators is small it might be possible that a data point
        was never left out during the bootstrap. In this case,
        `oob_decision_function_` might contain NaN. This attribute exists
        only when ``oob_score`` is True.

    estimators_samples_ : list of arrays
        The subset of drawn samples (i.e., the in-bag samples) for each base
        estimator. Each subset is defined by an array of the indices selected.

        .. versionadded:: 1.4

    See Also
    --------
    sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
    sklearn.ensemble.ExtraTreesClassifier : Ensemble of extremely randomized
        tree classifiers.
    sklearn.ensemble.HistGradientBoostingClassifier : A Histogram-based Gradient
        Boosting Classification Tree, very fast for big datasets (n_samples >=
        10_000).

    Notes
    -----
    The default values for the parameters controlling the size of the trees
    (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
    unpruned trees which can potentially be very large on some data sets. To
    reduce memory consumption, the complexity and size of the trees should be
    controlled by setting those parameter values.

    The features are always randomly permuted at each split. Therefore,
    the best found split may vary, even with the same training data,
    ``max_features=n_features`` and ``bootstrap=False``, if the improvement
    of the criterion is identical for several splits enumerated during the
    search of the best split. To obtain a deterministic behaviour during
    fitting, ``random_state`` has to be fixed.

    References
    ----------
    .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.

    Examples
    --------
    >>> from sklearn.ensemble import RandomForestClassifier
    >>> from sklearn.datasets import make_classification
    >>> X, y = make_classification(n_samples=1000, n_features=4,
    ...                            n_informative=2, n_redundant=0,
    ...                            random_state=0, shuffle=False)
    >>> clf = RandomForestClassifier(max_depth=2, random_state=0)
    >>> clf.fit(X, y)
    RandomForestClassifier(...)
    >>> print(clf.predict([[0, 0, 0, 0]]))
    [1]
    rn   r[   r\   Nr~   splitterginir   r-   rw   sqrtTFr   r   	max_depthmin_samples_splitmin_samples_leafmin_weight_fraction_leafmax_featuresmax_leaf_nodesmin_impurity_decreaserh   r{   r|   rG   rm   r}   rn   	ccp_alphar<   monotonic_cstc                    t         |   t               |d|
|||||||       || _        || _        || _        || _        || _        || _        || _	        |	| _
        || _        || _        y Nr   ra  rb  rc  rd  re  rf  rg  rG   rh  ri  r  )r   r   r   r   ra  rb  rc  rd  re  rf  rg  ri  rh  r   rz   r   ra  rb  rc  rd  re  rf  rg  rh   r{   r|   rG   rm   r}   rn   rh  r<   ri  r   s                       r>   r   zRandomForestClassifier.__init__  s    . 	,.%  %!%#/ 	 	
4 #"!2 0(@%(,%:"*"r@   r  r   r  r  r  r  r~   r   r"   r	  listr
  popr   r  r  s   @r>   r0   r0     s    wr		$

1
1	$
 
7
7	$ 	,j9:	
	$D 	 z* :# !$!+:# :#r@   r0   c                        e Zd ZU dZi ej
                  ej
                  Zeed<   ej                  d       	 ddddddd	ddd
ddddddddd fdZ
 xZS )r1   a2  
    A random forest regressor.

    A random forest is a meta estimator that fits a number of decision tree
    regressors on various sub-samples of the dataset and uses averaging to
    improve the predictive accuracy and control over-fitting.
    Trees in the forest use the best split strategy, i.e. equivalent to passing
    `splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeRegressor`.
    The sub-sample size is controlled with the `max_samples` parameter if
    `bootstrap=True` (default), otherwise the whole dataset is used to build
    each tree.

    For a comparison between tree-based ensemble models see the example
    :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`.

    Read more in the :ref:`User Guide <forest>`.

    Parameters
    ----------
    n_estimators : int, default=100
        The number of trees in the forest.

        .. versionchanged:: 0.22
           The default value of ``n_estimators`` changed from 10 to 100
           in 0.22.

    criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"},             default="squared_error"
        The function to measure the quality of a split. Supported criteria
        are "squared_error" for the mean squared error, which is equal to
        variance reduction as feature selection criterion and minimizes the L2
        loss using the mean of each terminal node, "friedman_mse", which uses
        mean squared error with Friedman's improvement score for potential
        splits, "absolute_error" for the mean absolute error, which minimizes
        the L1 loss using the median of each terminal node, and "poisson" which
        uses reduction in Poisson deviance to find splits.
        Training using "absolute_error" is significantly slower
        than when using "squared_error".

        .. versionadded:: 0.18
           Mean Absolute Error (MAE) criterion.

        .. versionadded:: 1.0
           Poisson criterion.

    max_depth : int, default=None
        The maximum depth of the tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int or float, default=2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` are the minimum
          number of samples for each split.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_samples_leaf : int or float, default=1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` are the minimum
          number of samples for each node.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_weight_fraction_leaf : float, default=0.0
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_features : {"sqrt", "log2", None}, int or float, default=1.0
        The number of features to consider when looking for the best split:

        - If int, then consider `max_features` features at each split.
        - If float, then `max_features` is a fraction and
          `max(1, int(max_features * n_features_in_))` features are considered at each
          split.
        - If "sqrt", then `max_features=sqrt(n_features)`.
        - If "log2", then `max_features=log2(n_features)`.
        - If None or 1.0, then `max_features=n_features`.

        .. note::
            The default of 1.0 is equivalent to bagged trees and more
            randomness can be achieved by setting smaller values, e.g. 0.3.

        .. versionchanged:: 1.1
            The default of `max_features` changed from `"auto"` to 1.0.

        Note: the search for a split does not stop until at least one
        valid partition of the node samples is found, even if it requires to
        effectively inspect more than ``max_features`` features.

    max_leaf_nodes : int, default=None
        Grow trees with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, default=0.0
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.

        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.

        .. versionadded:: 0.19

    bootstrap : bool, default=True
        Whether bootstrap samples are used when building trees. If False, the
        whole dataset is used to build each tree.

    oob_score : bool or callable, default=False
        Whether to use out-of-bag samples to estimate the generalization score.
        By default, :func:`~sklearn.metrics.r2_score` is used.
        Provide a callable with signature `metric(y_true, y_pred)` to use a
        custom metric. Only available if `bootstrap=True`.

    n_jobs : int, default=None
        The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
        :meth:`decision_path` and :meth:`apply` are all parallelized over the
        trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
        context. ``-1`` means using all processors. See :term:`Glossary
        <n_jobs>` for more details.

    random_state : int, RandomState instance or None, default=None
        Controls both the randomness of the bootstrapping of the samples used
        when building trees (if ``bootstrap=True``) and the sampling of the
        features to consider when looking for the best split at each node
        (if ``max_features < n_features``).
        See :term:`Glossary <random_state>` for details.

    verbose : int, default=0
        Controls the verbosity when fitting and predicting.

    warm_start : bool, default=False
        When set to ``True``, reuse the solution of the previous call to fit
        and add more estimators to the ensemble, otherwise, just fit a whole
        new forest. See :term:`Glossary <warm_start>` and
        :ref:`tree_ensemble_warm_start` for details.

    ccp_alpha : non-negative float, default=0.0
        Complexity parameter used for Minimal Cost-Complexity Pruning. The
        subtree with the largest cost complexity that is smaller than
        ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
        :ref:`minimal_cost_complexity_pruning` for details. See
        :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
        for an example of such pruning.

        .. versionadded:: 0.22

    max_samples : int or float, default=None
        If bootstrap is True, the number of samples to draw from X
        to train each base estimator.

        - If None (default), then draw `X.shape[0]` samples.
        - If int, then draw `max_samples` samples.
        - If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus,
          `max_samples` should be in the interval `(0.0, 1.0]`.

        .. versionadded:: 0.22

    monotonic_cst : array-like of int of shape (n_features), default=None
        Indicates the monotonicity constraint to enforce on each feature.
          - 1: monotonically increasing
          - 0: no constraint
          - -1: monotonically decreasing

        If monotonic_cst is None, no constraints are applied.

        Monotonicity constraints are not supported for:
          - multioutput regressions (i.e. when `n_outputs_ > 1`),
          - regressions trained on data with missing values.

        Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.

        .. versionadded:: 1.4

    Attributes
    ----------
    estimator_ : :class:`~sklearn.tree.DecisionTreeRegressor`
        The child estimator template used to create the collection of fitted
        sub-estimators.

        .. versionadded:: 1.2
           `base_estimator_` was renamed to `estimator_`.

    estimators_ : list of DecisionTreeRegressor
        The collection of fitted sub-estimators.

    feature_importances_ : ndarray of shape (n_features,)
        The impurity-based feature importances.
        The higher, the more important the feature.
        The importance of a feature is computed as the (normalized)
        total reduction of the criterion brought by that feature.  It is also
        known as the Gini importance.

        Warning: impurity-based feature importances can be misleading for
        high cardinality features (many unique values). See
        :func:`sklearn.inspection.permutation_importance` as an alternative.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    n_outputs_ : int
        The number of outputs when ``fit`` is performed.

    oob_score_ : float
        Score of the training dataset obtained using an out-of-bag estimate.
        This attribute exists only when ``oob_score`` is True.

    oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
        Prediction computed with out-of-bag estimate on the training set.
        This attribute exists only when ``oob_score`` is True.

    estimators_samples_ : list of arrays
        The subset of drawn samples (i.e., the in-bag samples) for each base
        estimator. Each subset is defined by an array of the indices selected.

        .. versionadded:: 1.4

    See Also
    --------
    sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
    sklearn.ensemble.ExtraTreesRegressor : Ensemble of extremely randomized
        tree regressors.
    sklearn.ensemble.HistGradientBoostingRegressor : A Histogram-based Gradient
        Boosting Regression Tree, very fast for big datasets (n_samples >=
        10_000).

    Notes
    -----
    The default values for the parameters controlling the size of the trees
    (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
    unpruned trees which can potentially be very large on some data sets. To
    reduce memory consumption, the complexity and size of the trees should be
    controlled by setting those parameter values.

    The features are always randomly permuted at each split. Therefore,
    the best found split may vary, even with the same training data,
    ``max_features=n_features`` and ``bootstrap=False``, if the improvement
    of the criterion is identical for several splits enumerated during the
    search of the best split. To obtain a deterministic behaviour during
    fitting, ``random_state`` has to be fixed.

    The default value ``max_features=1.0`` uses ``n_features``
    rather than ``n_features / 3``. The latter was originally suggested in
    [1], whereas the former was more recently justified empirically in [2].

    References
    ----------
    .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.

    .. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
           trees", Machine Learning, 63(1), 3-42, 2006.

    Examples
    --------
    >>> from sklearn.ensemble import RandomForestRegressor
    >>> from sklearn.datasets import make_regression
    >>> X, y = make_regression(n_features=4, n_informative=2,
    ...                        random_state=0, shuffle=False)
    >>> regr = RandomForestRegressor(max_depth=2, random_state=0)
    >>> regr.fit(X, y)
    RandomForestRegressor(...)
    >>> print(regr.predict([[0, 0, 0, 0]]))
    [-8.32987858]
    r~   r]  squared_errorNr   r-   rw   rx   TFr   r   ra  rb  rc  rd  re  rf  rg  rh   r{   r|   rG   rm   r}   rh  r<   ri  c                    t         |   t               |d|
||||||
       || _        || _        || _        || _        || _        || _        || _	        |	| _
        || _        || _        y Nrl  
r   rz   r   rh   r{   r|   rG   rm   r}   r<   )r   r   r   r   ra  rb  rc  rd  re  rf  rg  rh  ri  r   rz   r   ra  rb  rc  rd  re  rf  rg  rh   r{   r|   rG   rm   r}   rh  r<   ri  r   s                      r>   r   zRandomForestRegressor.__init__B  s    , 	+-%  %!#- 	 	
2 #"!2 0(@%(,%:""*r@   r  r   r  r  r  rK  r~   r   r	  r
  rp  r   r  r  s   @r>   r1   r1     s    eN	$

0
0$

6
6$D  z* 8+ "!$!)8+ 8+r@   r1   c                        e Zd ZU dZi ej
                  ej
                  d eddh      ee	dgiZee
d<   ej                  d       	 dddd	d
ddddddddddddddd fdZ xZS )r2   a	2  
    An extra-trees classifier.

    This class implements a meta estimator that fits a number of
    randomized decision trees (a.k.a. extra-trees) on various sub-samples
    of the dataset and uses averaging to improve the predictive accuracy
    and control over-fitting.

    Read more in the :ref:`User Guide <forest>`.

    Parameters
    ----------
    n_estimators : int, default=100
        The number of trees in the forest.

        .. versionchanged:: 0.22
           The default value of ``n_estimators`` changed from 10 to 100
           in 0.22.

    criterion : {"gini", "entropy", "log_loss"}, default="gini"
        The function to measure the quality of a split. Supported criteria are
        "gini" for the Gini impurity and "log_loss" and "entropy" both for the
        Shannon information gain, see :ref:`tree_mathematical_formulation`.
        Note: This parameter is tree-specific.

    max_depth : int, default=None
        The maximum depth of the tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int or float, default=2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` are the minimum
          number of samples for each split.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_samples_leaf : int or float, default=1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` are the minimum
          number of samples for each node.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_weight_fraction_leaf : float, default=0.0
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_features : {"sqrt", "log2", None}, int or float, default="sqrt"
        The number of features to consider when looking for the best split:

        - If int, then consider `max_features` features at each split.
        - If float, then `max_features` is a fraction and
          `max(1, int(max_features * n_features_in_))` features are considered at each
          split.
        - If "sqrt", then `max_features=sqrt(n_features)`.
        - If "log2", then `max_features=log2(n_features)`.
        - If None, then `max_features=n_features`.

        .. versionchanged:: 1.1
            The default of `max_features` changed from `"auto"` to `"sqrt"`.

        Note: the search for a split does not stop until at least one
        valid partition of the node samples is found, even if it requires to
        effectively inspect more than ``max_features`` features.

    max_leaf_nodes : int, default=None
        Grow trees with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, default=0.0
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.

        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.

        .. versionadded:: 0.19

    bootstrap : bool, default=False
        Whether bootstrap samples are used when building trees. If False, the
        whole dataset is used to build each tree.

    oob_score : bool or callable, default=False
        Whether to use out-of-bag samples to estimate the generalization score.
        By default, :func:`~sklearn.metrics.accuracy_score` is used.
        Provide a callable with signature `metric(y_true, y_pred)` to use a
        custom metric. Only available if `bootstrap=True`.

    n_jobs : int, default=None
        The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
        :meth:`decision_path` and :meth:`apply` are all parallelized over the
        trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
        context. ``-1`` means using all processors. See :term:`Glossary
        <n_jobs>` for more details.

    random_state : int, RandomState instance or None, default=None
        Controls 3 sources of randomness:

        - the bootstrapping of the samples used when building trees
          (if ``bootstrap=True``)
        - the sampling of the features to consider when looking for the best
          split at each node (if ``max_features < n_features``)
        - the draw of the splits for each of the `max_features`

        See :term:`Glossary <random_state>` for details.

    verbose : int, default=0
        Controls the verbosity when fitting and predicting.

    warm_start : bool, default=False
        When set to ``True``, reuse the solution of the previous call to fit
        and add more estimators to the ensemble, otherwise, just fit a whole
        new forest. See :term:`Glossary <warm_start>` and
        :ref:`tree_ensemble_warm_start` for details.

    class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts,             default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        Note that for multioutput (including multilabel) weights should be
        defined for each class of every column in its own dict. For example,
        for four-class multilabel classification weights should be
        [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
        [{1:1}, {2:5}, {3:1}, {4:1}].

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

        The "balanced_subsample" mode is the same as "balanced" except that
        weights are computed based on the bootstrap sample for every tree
        grown.

        For multi-output, the weights of each column of y will be multiplied.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    ccp_alpha : non-negative float, default=0.0
        Complexity parameter used for Minimal Cost-Complexity Pruning. The
        subtree with the largest cost complexity that is smaller than
        ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
        :ref:`minimal_cost_complexity_pruning` for details. See
        :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
        for an example of such pruning.

        .. versionadded:: 0.22

    max_samples : int or float, default=None
        If bootstrap is True, the number of samples to draw from X
        to train each base estimator.

        - If None (default), then draw `X.shape[0]` samples.
        - If int, then draw `max_samples` samples.
        - If float, then draw `max_samples * X.shape[0]` samples. Thus,
          `max_samples` should be in the interval `(0.0, 1.0]`.

        .. versionadded:: 0.22

    monotonic_cst : array-like of int of shape (n_features), default=None
        Indicates the monotonicity constraint to enforce on each feature.
          - 1: monotonically increasing
          - 0: no constraint
          - -1: monotonically decreasing

        If monotonic_cst is None, no constraints are applied.

        Monotonicity constraints are not supported for:
          - multiclass classifications (i.e. when `n_classes > 2`),
          - multioutput classifications (i.e. when `n_outputs_ > 1`),
          - classifications trained on data with missing values.

        The constraints hold over the probability of the positive class.

        Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.

        .. versionadded:: 1.4

    Attributes
    ----------
    estimator_ : :class:`~sklearn.tree.ExtraTreeClassifier`
        The child estimator template used to create the collection of fitted
        sub-estimators.

        .. versionadded:: 1.2
           `base_estimator_` was renamed to `estimator_`.

    estimators_ : list of DecisionTreeClassifier
        The collection of fitted sub-estimators.

    classes_ : ndarray of shape (n_classes,) or a list of such arrays
        The classes labels (single output problem), or a list of arrays of
        class labels (multi-output problem).

    n_classes_ : int or list
        The number of classes (single output problem), or a list containing the
        number of classes for each output (multi-output problem).

    feature_importances_ : ndarray of shape (n_features,)
        The impurity-based feature importances.
        The higher, the more important the feature.
        The importance of a feature is computed as the (normalized)
        total reduction of the criterion brought by that feature.  It is also
        known as the Gini importance.

        Warning: impurity-based feature importances can be misleading for
        high cardinality features (many unique values). See
        :func:`sklearn.inspection.permutation_importance` as an alternative.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    n_outputs_ : int
        The number of outputs when ``fit`` is performed.

    oob_score_ : float
        Score of the training dataset obtained using an out-of-bag estimate.
        This attribute exists only when ``oob_score`` is True.

    oob_decision_function_ : ndarray of shape (n_samples, n_classes) or             (n_samples, n_classes, n_outputs)
        Decision function computed with out-of-bag estimate on the training
        set. If n_estimators is small it might be possible that a data point
        was never left out during the bootstrap. In this case,
        `oob_decision_function_` might contain NaN. This attribute exists
        only when ``oob_score`` is True.

    estimators_samples_ : list of arrays
        The subset of drawn samples (i.e., the in-bag samples) for each base
        estimator. Each subset is defined by an array of the indices selected.

        .. versionadded:: 1.4

    See Also
    --------
    ExtraTreesRegressor : An extra-trees regressor with random splits.
    RandomForestClassifier : A random forest classifier with optimal splits.
    RandomForestRegressor : Ensemble regressor using trees with optimal splits.

    Notes
    -----
    The default values for the parameters controlling the size of the trees
    (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
    unpruned trees which can potentially be very large on some data sets. To
    reduce memory consumption, the complexity and size of the trees should be
    controlled by setting those parameter values.

    References
    ----------
    .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
           trees", Machine Learning, 63(1), 3-42, 2006.

    Examples
    --------
    >>> from sklearn.ensemble import ExtraTreesClassifier
    >>> from sklearn.datasets import make_classification
    >>> X, y = make_classification(n_features=4, random_state=0)
    >>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0)
    >>> clf.fit(X, y)
    ExtraTreesClassifier(random_state=0)
    >>> clf.predict([[0, 0, 0, 0]])
    array([1])
    rn   r[   r\   Nr~   r]  r^  r   r-   rw   r_  Fr   r`  c                    t         |   t               |d|
|||||||       || _        || _        || _        || _        || _        || _        || _	        |	| _
        || _        || _        y rk  )r   r   r   r   ra  rb  rc  rd  re  rf  rg  rh  ri  rm  s                       r>   r   zExtraTreesClassifier.__init__  s    . 	)+%  %!%#/ 	 	
4 #"!2 0(@%(,%:""*r@   r  rn  r  s   @r>   r2   r2   }  s    iV		$

1
1	$
 
7
7	$ 	,j9:	
	$D 	 z* :+ !$!+:+ :+r@   r2   c                        e Zd ZU dZi ej
                  ej
                  Zeed<   ej                  d       	 ddddddd	ddd
d
dddd
dddd fdZ
 xZS )r3   a-  
    An extra-trees regressor.

    This class implements a meta estimator that fits a number of
    randomized decision trees (a.k.a. extra-trees) on various sub-samples
    of the dataset and uses averaging to improve the predictive accuracy
    and control over-fitting.

    Read more in the :ref:`User Guide <forest>`.

    Parameters
    ----------
    n_estimators : int, default=100
        The number of trees in the forest.

        .. versionchanged:: 0.22
           The default value of ``n_estimators`` changed from 10 to 100
           in 0.22.

    criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"},             default="squared_error"
        The function to measure the quality of a split. Supported criteria
        are "squared_error" for the mean squared error, which is equal to
        variance reduction as feature selection criterion and minimizes the L2
        loss using the mean of each terminal node, "friedman_mse", which uses
        mean squared error with Friedman's improvement score for potential
        splits, "absolute_error" for the mean absolute error, which minimizes
        the L1 loss using the median of each terminal node, and "poisson" which
        uses reduction in Poisson deviance to find splits.
        Training using "absolute_error" is significantly slower
        than when using "squared_error".

        .. versionadded:: 0.18
           Mean Absolute Error (MAE) criterion.

    max_depth : int, default=None
        The maximum depth of the tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int or float, default=2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` are the minimum
          number of samples for each split.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_samples_leaf : int or float, default=1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` are the minimum
          number of samples for each node.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_weight_fraction_leaf : float, default=0.0
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_features : {"sqrt", "log2", None}, int or float, default=1.0
        The number of features to consider when looking for the best split:

        - If int, then consider `max_features` features at each split.
        - If float, then `max_features` is a fraction and
          `max(1, int(max_features * n_features_in_))` features are considered at each
          split.
        - If "sqrt", then `max_features=sqrt(n_features)`.
        - If "log2", then `max_features=log2(n_features)`.
        - If None or 1.0, then `max_features=n_features`.

        .. note::
            The default of 1.0 is equivalent to bagged trees and more
            randomness can be achieved by setting smaller values, e.g. 0.3.

        .. versionchanged:: 1.1
            The default of `max_features` changed from `"auto"` to 1.0.

        Note: the search for a split does not stop until at least one
        valid partition of the node samples is found, even if it requires to
        effectively inspect more than ``max_features`` features.

    max_leaf_nodes : int, default=None
        Grow trees with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, default=0.0
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.

        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.

        .. versionadded:: 0.19

    bootstrap : bool, default=False
        Whether bootstrap samples are used when building trees. If False, the
        whole dataset is used to build each tree.

    oob_score : bool or callable, default=False
        Whether to use out-of-bag samples to estimate the generalization score.
        By default, :func:`~sklearn.metrics.r2_score` is used.
        Provide a callable with signature `metric(y_true, y_pred)` to use a
        custom metric. Only available if `bootstrap=True`.

    n_jobs : int, default=None
        The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
        :meth:`decision_path` and :meth:`apply` are all parallelized over the
        trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
        context. ``-1`` means using all processors. See :term:`Glossary
        <n_jobs>` for more details.

    random_state : int, RandomState instance or None, default=None
        Controls 3 sources of randomness:

        - the bootstrapping of the samples used when building trees
          (if ``bootstrap=True``)
        - the sampling of the features to consider when looking for the best
          split at each node (if ``max_features < n_features``)
        - the draw of the splits for each of the `max_features`

        See :term:`Glossary <random_state>` for details.

    verbose : int, default=0
        Controls the verbosity when fitting and predicting.

    warm_start : bool, default=False
        When set to ``True``, reuse the solution of the previous call to fit
        and add more estimators to the ensemble, otherwise, just fit a whole
        new forest. See :term:`Glossary <warm_start>` and
        :ref:`tree_ensemble_warm_start` for details.

    ccp_alpha : non-negative float, default=0.0
        Complexity parameter used for Minimal Cost-Complexity Pruning. The
        subtree with the largest cost complexity that is smaller than
        ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
        :ref:`minimal_cost_complexity_pruning` for details. See
        :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
        for an example of such pruning.

        .. versionadded:: 0.22

    max_samples : int or float, default=None
        If bootstrap is True, the number of samples to draw from X
        to train each base estimator.

        - If None (default), then draw `X.shape[0]` samples.
        - If int, then draw `max_samples` samples.
        - If float, then draw `max_samples * X.shape[0]` samples. Thus,
          `max_samples` should be in the interval `(0.0, 1.0]`.

        .. versionadded:: 0.22

    monotonic_cst : array-like of int of shape (n_features), default=None
        Indicates the monotonicity constraint to enforce on each feature.
          - 1: monotonically increasing
          - 0: no constraint
          - -1: monotonically decreasing

        If monotonic_cst is None, no constraints are applied.

        Monotonicity constraints are not supported for:
          - multioutput regressions (i.e. when `n_outputs_ > 1`),
          - regressions trained on data with missing values.

        Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.

        .. versionadded:: 1.4

    Attributes
    ----------
    estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor`
        The child estimator template used to create the collection of fitted
        sub-estimators.

        .. versionadded:: 1.2
           `base_estimator_` was renamed to `estimator_`.

    estimators_ : list of DecisionTreeRegressor
        The collection of fitted sub-estimators.

    feature_importances_ : ndarray of shape (n_features,)
        The impurity-based feature importances.
        The higher, the more important the feature.
        The importance of a feature is computed as the (normalized)
        total reduction of the criterion brought by that feature.  It is also
        known as the Gini importance.

        Warning: impurity-based feature importances can be misleading for
        high cardinality features (many unique values). See
        :func:`sklearn.inspection.permutation_importance` as an alternative.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    n_outputs_ : int
        The number of outputs.

    oob_score_ : float
        Score of the training dataset obtained using an out-of-bag estimate.
        This attribute exists only when ``oob_score`` is True.

    oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
        Prediction computed with out-of-bag estimate on the training set.
        This attribute exists only when ``oob_score`` is True.

    estimators_samples_ : list of arrays
        The subset of drawn samples (i.e., the in-bag samples) for each base
        estimator. Each subset is defined by an array of the indices selected.

        .. versionadded:: 1.4

    See Also
    --------
    ExtraTreesClassifier : An extra-trees classifier with random splits.
    RandomForestClassifier : A random forest classifier with optimal splits.
    RandomForestRegressor : Ensemble regressor using trees with optimal splits.

    Notes
    -----
    The default values for the parameters controlling the size of the trees
    (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
    unpruned trees which can potentially be very large on some data sets. To
    reduce memory consumption, the complexity and size of the trees should be
    controlled by setting those parameter values.

    References
    ----------
    .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
           Machine Learning, 63(1), 3-42, 2006.

    Examples
    --------
    >>> from sklearn.datasets import load_diabetes
    >>> from sklearn.model_selection import train_test_split
    >>> from sklearn.ensemble import ExtraTreesRegressor
    >>> X, y = load_diabetes(return_X_y=True)
    >>> X_train, X_test, y_train, y_test = train_test_split(
    ...     X, y, random_state=0)
    >>> reg = ExtraTreesRegressor(n_estimators=100, random_state=0).fit(
    ...    X_train, y_train)
    >>> reg.score(X_test, y_test)
    0.2727...
    r~   r]  rr  Nr   r-   rw   rx   Fr   rs  c                    t         |   t               |d|
||||||
       || _        || _        || _        || _        || _        || _        || _	        |	| _
        || _        || _        y ru  )r   r   r   r   ra  rb  rc  rd  re  rf  rg  rh  ri  rw  s                      r>   r   zExtraTreesRegressor.__init__
  s    , 	(*%  %!#- 	 	
2 #"!2 0(@%(,%:""*r@   r  rx  r  s   @r>   r3   r3     s    Pd$

0
0$

6
6$D  z* 8+ "!$!)8+ 8+r@   r3   c                       e Zd ZU dZ eeddd      gedgdgdgdej                  d	dgiZee	d
<   dD ]  Z
ej                  e
        dZdZ	 ddddddddddddd fdZddZddZ ed      d fd	       ZddZd Z xZS )r4   a  
    An ensemble of totally random trees.

    An unsupervised transformation of a dataset to a high-dimensional
    sparse representation. A datapoint is coded according to which leaf of
    each tree it is sorted into. Using a one-hot encoding of the leaves,
    this leads to a binary coding with as many ones as there are trees in
    the forest.

    The dimensionality of the resulting representation is
    ``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
    the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.

    Read more in the :ref:`User Guide <random_trees_embedding>`.

    Parameters
    ----------
    n_estimators : int, default=100
        Number of trees in the forest.

        .. versionchanged:: 0.22
           The default value of ``n_estimators`` changed from 10 to 100
           in 0.22.

    max_depth : int, default=5
        The maximum depth of each tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int or float, default=2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` is the minimum
          number of samples for each split.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_samples_leaf : int or float, default=1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` is the minimum
          number of samples for each node.

        .. versionchanged:: 0.18
           Added float values for fractions.

    min_weight_fraction_leaf : float, default=0.0
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_leaf_nodes : int, default=None
        Grow trees with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, default=0.0
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.

        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.

        .. versionadded:: 0.19

    sparse_output : bool, default=True
        Whether or not to return a sparse CSR matrix, as default behavior,
        or to return a dense array compatible with dense pipeline operators.

    n_jobs : int, default=None
        The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`,
        :meth:`decision_path` and :meth:`apply` are all parallelized over the
        trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
        context. ``-1`` means using all processors. See :term:`Glossary
        <n_jobs>` for more details.

    random_state : int, RandomState instance or None, default=None
        Controls the generation of the random `y` used to fit the trees
        and the draw of the splits for each feature at the trees' nodes.
        See :term:`Glossary <random_state>` for details.

    verbose : int, default=0
        Controls the verbosity when fitting and predicting.

    warm_start : bool, default=False
        When set to ``True``, reuse the solution of the previous call to fit
        and add more estimators to the ensemble, otherwise, just fit a whole
        new forest. See :term:`Glossary <warm_start>` and
        :ref:`tree_ensemble_warm_start` for details.

    Attributes
    ----------
    estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance
        The child estimator template used to create the collection of fitted
        sub-estimators.

        .. versionadded:: 1.2
           `base_estimator_` was renamed to `estimator_`.

    estimators_ : list of :class:`~sklearn.tree.ExtraTreeRegressor` instances
        The collection of fitted sub-estimators.

    feature_importances_ : ndarray of shape (n_features,)
        The feature importances (the higher, the more important the feature).

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    n_outputs_ : int
        The number of outputs when ``fit`` is performed.

    one_hot_encoder_ : OneHotEncoder instance
        One-hot encoder used to create the sparse embedding.

    estimators_samples_ : list of arrays
        The subset of drawn samples (i.e., the in-bag samples) for each base
        estimator. Each subset is defined by an array of the indices selected.

        .. versionadded:: 1.4

    See Also
    --------
    ExtraTreesClassifier : An extra-trees classifier.
    ExtraTreesRegressor : An extra-trees regressor.
    RandomForestClassifier : A random forest classifier.
    RandomForestRegressor : A random forest regressor.
    sklearn.tree.ExtraTreeClassifier: An extremely randomized
        tree classifier.
    sklearn.tree.ExtraTreeRegressor : An extremely randomized
        tree regressor.

    References
    ----------
    .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
           Machine Learning, 63(1), 3-42, 2006.
    .. [2] Moosmann, F. and Triggs, B. and Jurie, F.  "Fast discriminative
           visual codebooks using randomized clustering forests"
           NIPS 2007

    Examples
    --------
    >>> from sklearn.ensemble import RandomTreesEmbedding
    >>> X = [[0,0], [1,0], [0,1], [-1,0], [0,-1]]
    >>> random_trees = RandomTreesEmbedding(
    ...    n_estimators=5, random_state=0, max_depth=1).fit(X)
    >>> X_sparse_embedding = random_trees.transform(X)
    >>> X_sparse_embedding.toarray()
    array([[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
           [0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
           [0., 1., 0., 1., 0., 1., 0., 1., 0., 1.],
           [1., 0., 1., 0., 1., 0., 1., 0., 1., 0.],
           [0., 1., 1., 0., 1., 0., 0., 1., 1., 0.]])
    r-   Nrs   rt   rm   rv   )rz   r|   rm   r}   sparse_outputr~   )re  rh  r]  ri  rr     r   rw   Tr   F)ra  rb  rc  rd  rf  rg  r~  r|   rG   rm   r}   c                    t         |   t               |ddd|	|
||d 
       || _        || _        || _        || _        || _        || _        || _	        y )N)	r   ra  rb  rc  rd  re  rf  rg  rG   Frv  )
r   r   r   ra  rb  rc  rd  rf  rg  r~  )r   rz   ra  rb  rc  rd  rf  rg  r~  r|   rG   rm   r}   r   s                r>   r   zRandomTreesEmbedding.__init__	  su      	(*%
 %!) 	 	
. #!2 0(@%,%:"*r@   c                     t        d      )Nz)OOB score not supported by tree embedding)NotImplementedErrorr   s       r>   r   z2RandomTreesEmbedding._set_oob_score_and_attributes8  s    !"MNNr@   c                 .    | j                  |||       | S )a  
        Fit estimator.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The input samples. Use ``dtype=np.float32`` for maximum
            efficiency. Sparse matrices are also supported, use sparse
            ``csc_matrix`` for maximum efficiency.

        y : Ignored
            Not used, present for API consistency by convention.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted. Splits
            that would create child nodes with net zero or negative weight are
            ignored while searching for a split in each node. In the case of
            classification, splits are also ignored if they would result in any
            single class carrying a negative weight in either child node.

        Returns
        -------
        self : object
            Returns the instance itself.
        r]   )fit_transform)r   ri   rj   r]   s       r>   r   zRandomTreesEmbedding.fit;  s    6 	1a}=r@   r   c                 >   t        | j                        }|j                  t        |            }t        |   |||       t        | j                        | _        | j                  j                  | j                  |            }|j                  d   | _        |S )a  
        Fit estimator and transform dataset.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Input data used to build forests. Use ``dtype=np.float32`` for
            maximum efficiency.

        y : Ignored
            Not used, present for API consistency by convention.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted. Splits
            that would create child nodes with net zero or negative weight are
            ignored while searching for a split in each node. In the case of
            classification, splits are also ignored if they would result in any
            single class carrying a negative weight in either child node.

        Returns
        -------
        X_transformed : sparse matrix of shape (n_samples, n_out)
            Transformed dataset.
        r   r  )r~  r-   )r   rG   uniformr*   r   r   r   r~  one_hot_encoder_r  r   ra   _n_features_out)r   ri   rj   r]   rndoutputr   s         r>   r  z"RandomTreesEmbedding.fit_transformY  s    4 !!2!23KK\!_K-Aq6 -D<N<N O&&44TZZ]C%||Ar@   c           	         t        | d       t        | |d       t        | j                        D cg c](  }| j                  j
                  |   D ]
  }d| d|  * }}}t        j                  |t              S c c}}w )aF  Get output feature names for transformation.

        Parameters
        ----------
        input_features : array-like of str or None, default=None
            Only used to validate feature names with the names seen in :meth:`fit`.

        Returns
        -------
        feature_names_out : ndarray of str objects
            Transformed feature names, in the format of
            `randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used
            to generate the leaf and `leaf` is the index of a leaf node
            in that tree. Note that the node indexing scheme is used to
            index both nodes with children (split nodes) and leaf nodes.
            Only the latter can be present as output features.
            As a consequence, there are missing indices in the output
            feature names.
        r  F)input_featuresgenerate_namesrandomtreesembedding_rA  rB   )	r+   r(   r   rz   r  categories_rE   r  object)r   r  rg   leaffeature_namess        r>   get_feature_names_outz*RandomTreesEmbedding.get_feature_names_out|  s    ( 	/0	
 d//0
0--99$? $D64&1? 20 	 

 zz-v66
s   -A>c                 l    t        |        | j                  j                  | j                  |            S )a  
        Transform dataset.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Input data to be transformed. Use ``dtype=np.float32`` for maximum
            efficiency. Sparse matrices are also supported, use sparse
            ``csr_matrix`` for maximum efficiency.

        Returns
        -------
        X_transformed : sparse matrix of shape (n_samples, n_out)
            Transformed dataset.
        )r+   r  	transformr   )r   ri   s     r>   r  zRandomTreesEmbedding.transform  s+      	$$..tzz!}==r@   r  r   )NN)r   r  r  r  r    r   r   r~   r	  r
  paramrp  r   re  r   r   r   r   r  r  r  r  r  s   @r>   r4   r4   F
  s    rj "(AtFCDT"; k	$
 
1
1$ 	)$D  L""5) L  IL -+ !$!-+^O< 5  6 D7@>r@   r4   )r   NNN)Sr  r?  abcr   r   numbersr   r   warningsr   r   r	   numpyrE   scipy.sparser
   r   r   baser   r   r   r   r   r   
exceptionsr   metricsr   r   preprocessingr   rg   r   r   r   r   r   
tree._treer   r   utilsr   r   utils._param_validationr    r!   r"   utils._tagsr#   utils.multiclassr$   r%   utils.parallelr&   r'   utils.validationr(   r)   r*   r+   r,   _baser.   r/   __all__iinforF   r9   r   r?   rK   rU   rp   rr   r  r  rK  r0   r1   r2   r3   r4   r   r@   r>   <module>r     s?   L  ' " 7 7  0 !  / . )  ' = F F " K .  7 "((288

 
 6B	, #'6rA!<7 AH( I
g IX^$njG ^$B@#- @#Ff+O f+Rr++ r+jQ+/ Q+h
g>+Z g>r@   