• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python fixes.unique函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.utils.fixes.unique函数的典型用法代码示例。如果您正苦于以下问题:Python unique函数的具体用法?Python unique怎么用?Python unique使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了unique函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(
        self, y, n_iter=10, test_size=0.1, train_size=None, indices=True, random_state=None, n_iterations=None
    ):

        super(StratifiedShuffleSplit, self).__init__(
            len(y), n_iter, test_size, train_size, indices, random_state, n_iterations
        )
        self.y = np.array(y)
        self.classes, self.y_indices = unique(y, return_inverse=True)
        n_cls = self.classes.shape[0]

        if np.min(np.bincount(self.y_indices)) < 2:
            raise ValueError(
                "The least populated class in y has only 1"
                " member, which is too few. The minimum"
                " number of labels for any class cannot"
                " be less than 2."
            )

        if self.n_train < n_cls:
            raise ValueError(
                "The train_size = %d should be greater or "
                "equal to the number of classes = %d" % (self.n_train, n_cls)
            )
        if self.n_test < n_cls:
            raise ValueError(
                "The test_size = %d should be greater or " "equal to the number of classes = %d" % (self.n_test, n_cls)
            )
开发者ID:jamartinb,项目名称:kaggle,代码行数:28,代码来源:cross_validation.py


示例2: test_compute_class_weight

def test_compute_class_weight():
    """Test (and demo) compute_class_weight."""
    y = np.asarray([2, 2, 2, 3, 3, 4])
    classes = unique(y)
    cw = compute_class_weight("auto", classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_true(cw[0] < cw[1] < cw[2])
开发者ID:garysieling,项目名称:scikit-learn,代码行数:7,代码来源:test_class_weight.py


示例3: test_stratified_shuffle_split_iter

def test_stratified_shuffle_split_iter():
    ys = [
        np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
        np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
        np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
        np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
        np.array([-1] * 800 + [1] * 50),
    ]

    for y in ys:
        sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0)
        for train, test in sss:
            assert_array_equal(unique(y[train]), unique(y[test]))
            # Checks if folds keep classes proportions
            p_train = np.bincount(unique(y[train], return_inverse=True)[1]) / float(len(y[train]))
            p_test = np.bincount(unique(y[test], return_inverse=True)[1]) / float(len(y[test]))
            assert_array_almost_equal(p_train, p_test, 1)
            assert_equal(y[train].size + y[test].size, y.size)
            assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
开发者ID:jayjhan8,项目名称:scikit-learn,代码行数:19,代码来源:test_cross_validation.py


示例4: _shuffle

def _shuffle(y, labels, random_state):
    """Return a shuffled copy of y eventually shuffle among same labels."""
    if labels is None:
        ind = random_state.permutation(len(y))
    else:
        ind = np.arange(len(labels))
        for label in unique(labels):
            this_mask = labels == label
            ind[this_mask] = random_state.permutation(ind[this_mask])
    return y[ind]
开发者ID:jamartinb,项目名称:kaggle,代码行数:10,代码来源:cross_validation.py


示例5: __iter__

 def __iter__(self):
     
     # We make a copy here to avoid side-effects during iteration
     labels = np.array(self.labels, copy=True)
     unique_labels = unique(labels)
     curr_iter = 0
     idx_cache = set()
     num_cache_hits = 0
     max_cache_hits = self.max_cache_hits
     
     #comb = combinations(range(self.n_unique_labels), self.p)
 
     while curr_iter < self.max_iters and num_cache_hits < max_cache_hits:
         
         idx = random_combination(range(self.n_unique_labels),
                                           self.p)
         if idx in idx_cache:
             num_cache_hits += 1
             if num_cache_hits >= max_cache_hits:
                 print "WARNING LeavePLabelOut: number of consecutive cache hits too high, bailing out after %d samples" % curr_iter
             continue
         else:
             num_cache_hits = 0
         idx_cache.add(idx)
         
         idx = np.array(idx)
         
         test_index = np.zeros(labels.size, dtype=np.bool)
         idx = np.array(idx)
         for l in unique_labels[idx]:
             test_index[labels == l] = True
         train_index = np.logical_not(test_index)
         if self.indices:
             ind = np.arange(labels.size)
             train_index = ind[train_index]
             test_index = ind[test_index]
         
         if len(unique(self.train_Y[train_index])) == 1:
             #prevent test sets with only one class
             continue
         
         curr_iter += 1
         yield train_index, test_index
开发者ID:IanTheEngineer,项目名称:Penn-haptics-bolt,代码行数:43,代码来源:safe_leave_p_out.py


示例6: __init__

 def __init__(self, labels, p, max_iters, train_Y, max_cache_hits = 100,
              indices=True):
     self.labels = labels
     self.unique_labels = unique(self.labels)
     self.n_unique_labels = self.unique_labels.size
     self.p = p
     self.indices = indices
     self.train_Y = train_Y
     self.max_iters = max_iters
     self.max_cache_hits = max_cache_hits
开发者ID:IanTheEngineer,项目名称:Penn-haptics-bolt,代码行数:10,代码来源:safe_leave_p_out.py


示例7: test_stratified_shuffle_split

def test_stratified_shuffle_split():
    y = np.asarray([0, 1, 1, 1, 2, 2, 2])
    # Check that error is raised if there is a class with only one sample
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)

    # Check that error is raised if the test set size is smaller than n_classes
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
    # Check that error is raised if the train set size is smaller than
    # n_classes
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)

    y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
    assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)

    ys = [
        np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
        np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
        np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
        np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
        np.array([-1] * 800 + [1] * 50)
        ]

    for y in ys:
        sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
                                          random_state=0, indices=True)
        for train, test in sss:
            assert_array_equal(unique(y[train]), unique(y[test]))
            # Checks if folds keep classes proportions
            p_train = np.bincount(
                unique(y[train], return_inverse=True)[1]
                ) / float(len(y[train]))
            p_test = np.bincount(
                unique(y[test], return_inverse=True)[1]
                ) / float(len(y[test]))
            assert_array_almost_equal(p_train, p_test, 1)
            assert_equal(y[train].size + y[test].size, y.size)
            assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
开发者ID:PepGardiola,项目名称:scikit-learn,代码行数:40,代码来源:test_cross_validation.py


示例8: fit

    def fit(self, inp, y):
        self.precomputed_probs_ = None
        self.precomputed_weights_ = None

        self.classes_, y = unique(y, return_inverse=True)
        self.n_classes_ = len(self.classes_)
        self.random_state_ = check_random_state(self.random_state)

        if self.pipeline is not None:
            inp = self.pipeline.fit_transform(inp)

        self.weighting_strategy.prepare(inp, y)
        self.classifiers_ = self.training_strategy.train_estimators(
            self.n_estimators, inp, y,
            self.weighting_strategy, self.random_state_
        )

        # Reset it to null because the previous line uses self.predict
        self.precomputed_probs_ = None
        self.precomputed_weights_ = None
        return self
开发者ID:etamponi,项目名称:resilient-protocol,代码行数:21,代码来源:ensemble.py


示例9: fit

 def fit(self, X, y, labels=None):
     """Fit a forest of trees from the training set X and y"""
     # Poll the randome state from the forest
     random_state = check_random_state(self.random_state)
     # Reshape y to preserve the data contiguity
     y = np.reshape(y, (-1, 1))
     # Get the dimentions of X
     n_samples, self.n_features_ = X.shape
     # Get the number of outputs for morphing y later
     self.n_outputs_ = y.shape[1]
     # Make a container for all unique classes
     self.classes_ = []
     # Make a container for number of instances of each unique classe
     self.n_classes_ = []
     # For each output of y
     for k in xrange(self.n_outputs_):
         # Get the unique classe lables and an array of indexs pointing to the lable
         classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
         # Store the unique classe lables
         self.classes_.append(classes_k)
         # And store the unique classe lables' length
         self.n_classes_.append(classes_k.shape[0])
     # Check if we need/can do OOB estimation
     if not self.bootstrap and self.oob_score:
         raise ValueError("Can't use OOB estimation " 
                          "if bootstraping is not enabled")
     # Precalculate the random seeds for all trees
     n_trees = self.n_estimators
     seeds = random_state.randint(MAX_INT, size=n_trees)
     # Grow the forest given the
     self.estimators_ = grow_forest(self, X, y, seeds, labels)
     # Check if we need/can do OOB estimation
     if self.oob_score:
         # If so, then do it
         self.get_oob_score(X, y)
     # Decapsulate attributes if only have one output to consider
     if hasattr(self, "classes_") and self.n_outputs_ == 1:
         self.n_classes_ = self.n_classes_[0]
         self.classes_ = self.classes_[0]
     return self
开发者ID:ruffsl,项目名称:CS7616P1,代码行数:40,代码来源:labeled_bootstraping.py


示例10: test_k_means_new_centers

def test_k_means_new_centers():
    # Explore the part of the code where a new center is reassigned
    X = np.array([[0, 0, 1, 1],
                  [0, 0, 0, 0],
                  [0, 1, 0, 0],
                  [0, 0, 0, 0],
                  [0, 0, 0, 0],
                  [0, 1, 0, 0]])
    labels = [0, 1, 2, 1, 1, 2]
    bad_centers = np.array([[+0,  1,  0,  0],
                            [.2,  0, .2, .2],
                            [+0,  0,  0,  0]])

    km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
                random_state=1)
    for this_X in (X, sp.coo_matrix(X)):
        km.fit(this_X)
        this_labels = km.labels_
        # Reorder the labels so that the first instance is in cluster 0,
        # the second in cluster 1, ...
        this_labels = unique(this_labels, return_index=True)[1][this_labels]
        np.testing.assert_array_equal(this_labels, labels)
开发者ID:Big-Data,项目名称:scikit-learn,代码行数:22,代码来源:test_k_means.py


示例11: test_auto_weight

def test_auto_weight():
    """Test class weights for imbalanced data"""
    from sklearn.linear_model import LogisticRegression

    # We take as dataset the two-dimensional projection of iris so
    # that it is not separable and remove half of predictors from
    # class 1.
    # We add one to the targets as a non-regression test: class_weight="auto"
    # used to work only when the labels where a range [0..K).
    from sklearn.utils import compute_class_weight

    X, y = iris.data[:, :2], iris.target + 1
    unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])

    classes, y_ind = unique(y[unbalanced], return_inverse=True)
    class_weights = compute_class_weight("auto", classes, y_ind)
    assert_true(np.argmax(class_weights) == 2)

    for clf in (svm.SVC(kernel="linear"), svm.LinearSVC(random_state=0), LogisticRegression()):
        # check that score is better when class='auto' is set.
        y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
        clf.set_params(class_weight="auto")
        y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
        assert_true(metrics.f1_score(y, y_pred) <= metrics.f1_score(y, y_pred_balanced))
开发者ID:dubourg,项目名称:scikit-learn,代码行数:24,代码来源:test_svm.py


示例12: unique

# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause

import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils.fixes import unique


X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)

# map labels from {-1, 1} to {0, 1}
labels, y = unique(y, return_inverse=True)

X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]

original_params = {'n_estimators': 1000, 'max_depth': 2, 'random_state': 1,
                   'min_samples_split': 5}

pl.figure()

for label, color, setting in [('No shrinkage', 'orange',
                               {'learning_rate': 1.0, 'subsample': 1.0}),
                              ('learning_rate=0.1', 'turquoise',
                               {'learning_rate': 0.1, 'subsample': 1.0}),
                              ('subsample=0.5', 'blue',
                               {'learning_rate': 1.0, 'subsample': 0.5}),
开发者ID:2011200799,项目名称:scikit-learn,代码行数:31,代码来源:plot_gradient_boosting_regularization.py


示例13: fit

    def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True, sample_weight=None):
        """Build a decision tree from the training set (X, y).

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            The training input samples. Use ``dtype=np.float32`` for maximum
            efficiency.

        y : array-like, shape = [n_samples] or [n_samples, n_outputs]
            The target values (integers that correspond to classes in
            classification, real numbers in regression).
            Use ``dtype=np.float64`` and ``order='C'`` for maximum
            efficiency.

        sample_weight : array-like, shape = [n_samples] or None
            Sample weights. If None, then samples are equally weighted. Splits
            that would create child nodes with net zero or negative weight are
            ignored while searching for a split in each node. In the case of
            classification, splits are also ignored if they would result in any
            single class carrying a negative weight in either child node.

        check_input : boolean, (default=True)
            Allow to bypass several input checking.
            Don't use this parameter unless you know what you do.

        Returns
        -------
        self : object
            Returns self.
        """
        random_state = check_random_state(self.random_state)

        # Deprecations
        if sample_mask is not None:
            warn(
                "The sample_mask parameter is deprecated as of version 0.14 " "and will be removed in 0.16.",
                DeprecationWarning,
            )

        if X_argsorted is not None:
            warn(
                "The X_argsorted parameter is deprecated as of version 0.14 " "and will be removed in 0.16.",
                DeprecationWarning,
            )

        # Convert data
        if check_input:
            X, = check_arrays(X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)

        # Determine output settings
        n_samples, self.n_features_ = X.shape
        is_classification = isinstance(self, ClassifierMixin)

        y = np.atleast_1d(y)

        if y.ndim == 1:
            # reshape is necessary to preserve the data contiguity against vs
            # [:, np.newaxis] that does not.
            y = np.reshape(y, (-1, 1))

        self.n_outputs_ = y.shape[1]

        if is_classification:
            y = np.copy(y)

            self.classes_ = []
            self.n_classes_ = []

            for k in xrange(self.n_outputs_):
                classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
                self.classes_.append(classes_k)
                self.n_classes_.append(classes_k.shape[0])

        else:
            self.classes_ = [None] * self.n_outputs_
            self.n_classes_ = [1] * self.n_outputs_

        self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)

        if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
            y = np.ascontiguousarray(y, dtype=DOUBLE)

        # Check parameters
        max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth

        if isinstance(self.max_features, six.string_types):
            if self.max_features == "auto":
                if is_classification:
                    max_features = max(1, int(np.sqrt(self.n_features_)))
                else:
                    max_features = self.n_features_
            elif self.max_features == "sqrt":
                max_features = max(1, int(np.sqrt(self.n_features_)))
            elif self.max_features == "log2":
                max_features = max(1, int(np.log2(self.n_features_)))
            else:
                raise ValueError(
                    "Invalid value for max_features. Allowed string " 'values are "auto", "sqrt" or "log2".'
                )
#.........这里部分代码省略.........
开发者ID:rexshihaoren,项目名称:MSPrediction-Python,代码行数:101,代码来源:tree.py


示例14: fit

    def fit(self, X, y):
        """
        Fit the Kernelized Fisher Discriminant model according to the given training data and parameters.
        Based on "Algorithm 5" in
        Zhang, et. al. 'Regularized Discriminant Analysis, Ridge Regression and Beyond' Journal of Machine Learning Research 11 (2010) 2199-2228
        NOTE: setting norm_covariance=False and use_total_scatter=True, and solution_norm = 'A' or 'B' will give the algorithm from paper

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.

        y : array, shape = [n_samples]
            Target values (integers)
        
        """
        X, y = check_arrays(X, y, sparse_format='dense')
        self.classes_, y = unique( y, return_inverse=True)
        n_samples, n_features = X.shape
        n_classes = len(self.classes_)
        n_samples_perclass = np.bincount(y)
        if n_classes < 2:
            raise ValueError('y has less than 2 classes')
        if self.priors is None:
            self.priors_ = np.bincount(y) / float(n_samples)
        else:
            self.priors_ = self.priors

        ts = time.time()
                    
        self.means_ = []
        for ind in xrange(n_classes):
            Xg = X[y == ind, :]
            meang = Xg.mean(0)
            self.means_.append(np.asarray(meang))
        if self.print_timing: print 'KernelFisher.fit: means took', time.time() - ts


        ts = time.time()
        PI_diag = np.diag( 1.0*n_samples_perclass )                                        # shape(PI_diag) = n_classes x n_classes
        PI_inv = np.diag( 1.0 / (1.0*n_samples_perclass) )                                 # shape(PI_inv) = n_classes x n_classes
        PI_sqrt_inv = np.sqrt( PI_inv )                                                    # shape(PI_sqrt_inv) = n_classes x n_classes
        #H = np.identity(n_samples) - (1.0/(1.0*n_samples))*np.ones((n_samples,n_samples))
        E=np.zeros( (n_samples,n_classes) )                                                # shape(E) = n_samples x n_classes
        E[[range(n_samples),y]]=1
        E_PIsi = np.dot(E, PI_sqrt_inv)
        One_minus_E_Pi_Et = np.identity(n_samples) - np.inner( E, np.inner(PI_diag, E).T ) # shape(One_minus_E_Pi_Et) = n_samples x n_samples
        if self.print_timing: print 'KernelFisher.fit: matrices took', time.time() - ts


        #####################################################################################################################
        #C = HKH = (I - 1/n 1x1.T) K (I - 1/n 1x1.T) = (K -  1xK_mean.T) * (I - 1/n 1x1.T)
        #        = K - K_meanx1.T - 1xK_mean.T + K_allmean 1x1
        #  --> which is the same as what self._centerer.fit_transform(C) performs
        #
        # if use_total_scatter=False,
        #      then using Sw which is (1-E*Pi*E.T)K(1-E*Pi*E.T)
        #####################################################################################################################
        ts = time.time()
        C = self._get_kernel(X) 
        K_mean = np.sum(C, axis=1) / (1.0*C.shape[1])

        if self.use_total_scatter:
            C = self._centerer.fit_transform(C)
        else:
            C = np.inner( One_minus_E_Pi_Et, np.inner(C, One_minus_E_Pi_Et).T)
        if self.print_timing: print 'KernelFisher.fit: Kernel Calculation took', time.time() - ts


        ts = time.time()
        Uc, Sc, Utc, Sc_norm = self.condensed_svd( C, self.tol, store_singular_vals=True )
        if self.print_timing: print 'KernelFisher.fit: Uc, Sc, Utc took', time.time() - ts


        ts = time.time()
        #scale up sigma to appropriate range of singular values
        reg_factor = self.sigma_sqrd * Sc_norm 
        St_reg_inv = np.inner( Uc, np.inner(np.diag(1.0/(Sc + reg_factor)), Utc.T).T )   
        if self.print_timing: print 'KernelFisher.fit: St_reg_inv took', time.time() - ts

        ts = time.time()
        R = np.inner(E_PIsi.T, np.inner(C, np.inner( St_reg_inv, E_PIsi.T ).T ).T )
        if self.print_timing: print 'KernelFisher.fit: R took', time.time() - ts


        ts = time.time()
        Vr, Lr, Vtr, Lr_norm =  self.condensed_svd( R, tol=1e-6 )                
        if self.print_timing: print 'KernelFisher.fit: Vr, Lr, Vtr took', time.time() - ts


        ts = time.time()
        #####################################################################################################################
        #This capital Z is Upsilon.T * H from equation (22)
        #####################################################################################################################
        #Z = np.inner( np.diag(1.0 / np.sqrt(Lr)), np.inner(Vtr, np.inner(E_PIsi.T, np.inner(C, St_reg_inv.T ).T ).T ).T )
        Z = np.inner( np.inner( np.inner( np.inner( np.diag(1.0 / np.sqrt(Lr)), Vtr.T), E_PIsi), C.T), St_reg_inv)

        Z = (Z.T - (Z.sum(axis=1) / (1.0*Z.shape[1])) ).T
        if self.print_timing: print 'KernelFisher.fit: Z took', time.time() - ts
#.........这里部分代码省略.........
开发者ID:andrewjohnlowe,项目名称:JetImages,代码行数:101,代码来源:fisher.py


示例15: fit_multiclass

    def fit_multiclass(self, X, y, use_total_scatter=False, solution_norm="N", sigma_sqrd=1e-8, tol=1.0e-3, print_timing=False):
        """
        Fit the Fisher Discriminant model according to the given training data and parameters.
        Based on (but depending on options not exactly the same as) "Algorithm 4" in
        Zhang, et. al. 'Regularized Discriminant Analysis, Ridge Regression and Beyond' Journal of Machine Learning Research 11 (2010) 2199-2228
        NOTE: setting norm_covariance=False and use_total_scatter=True, and solution_norm = 'A' or 'B' will give the algorithm from paper

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        y : array, shape = [n_samples]
            Target values (integers)
        use_total_scatter : boolean
            If True then use total scatter matrix St = Sum_i (x_i - m)(x_i - m).T instead of Sw
            If False, use Sw = Sum_{c=1... n_classes} Sum_{i; x in class c} norm_c (x_i - m_c)(x_i - m_c).T
                      where norm_c = 1/N_samples_class_c if norm_covariance=True, else norm_c = 1
        solution_norm: boolean
            3 kinds of norms, "A", "B", or "N", were "N" means normalize to 1.  "A" and "B" (see paper reference) have normalizations
            that may be important when consitering n_classes > 2
        sigma_sqrd:  float
            smooth regularization parameter, which is size of singular value where smoothing becomes important.
            NOTE: is fraction in case norm_covariance=False, as a priori the scale of the singular values is not known in this case
        tol:  float
            used for truncated SVD of Sw.  Essentially a form of regularization.  Tol for SVD(R) is 1e-6, fixed right now
        print_timing: boolean
            print time for several matrix operations in the algorithm
        """
        X, y = check_arrays(X, y, sparse_format='dense')
        self.classes_, y = unique( y, return_inverse=True)
        n_samples, n_features = X.shape
        n_classes = len(self.classes_)
        n_samples_perclass = np.bincount(y)
        if n_classes < 2:
            raise ValueError('y has less than 2 classes')
        if self.priors is None:
            self.priors_ = np.bincount(y) / float(n_samples)
        else:
            self.priors_ = self.priors

        if not any( np.array(["A","B","N"])==solution_norm ):
             print 'WARNING: solution_norm must be one of ["A","B","N"]! Exiting'
             sys.exit(2)

        ts = time.time()
                    
        self.means_ = []
        for ind in xrange(n_classes):
            Xg = X[y == ind, :]
            meang = Xg.mean(0)
            self.means_.append(np.asarray(meang))
        if print_timing: print 'fit_multiclass: means took', time.time() - ts

        ts = time.time()
        PI_diag = np.diag( 1.0*n_samples_perclass )                                       # shape(PI_diag) = n_classes x n_classes
        PI_inv = np.diag( 1.0 / (1.0*n_samples_perclass) )                                # shape(PI_inv) = n_classes x n_classes
        PI_sqrt_inv = np.sqrt( PI_inv )                                                   # shape(PI_sqrt_inv) = n_classes x n_classes
        #H = np.identity(n_samples) - (1.0/(1.0*n_samples))*np.ones((n_samples,n_samples))
        E=np.zeros( (n_samples,n_classes) )
        E[[range(n_samples),y]]=1
        if print_timing: print 'fit_multiclass: matrices took', time.time() - ts


        ts = time.time()
        #note: computation of this is fast, can always do it inline, if memory consumption gets large
        Xt_H = X.T - (1.0/(1.0*n_samples))*np.repeat( np.array([X.T.sum(1)]).T, n_samples, axis=1)    # shape(Xt_H) = n_features x n_samples
        if print_timing: print 'fit_multiclass: Xt_H took', time.time() - ts

        ts = time.time()
        #####################################################################################################################
        #Sb = X.T * H * E * PI_inv * E.T * H * X = (X.T * H * E * PI_sqrt_inv) * (X.T * H * E * PI_sqrt_inv).T
        #if norm_covariance: Sb = X.T * H * E * PI_inv * PI_inv * E.T * H * X = (X.T * H * E * PI_inv) * (X.T * H * E * PI_inv).T
        #This norm actually doesn't matter in 2-class, I think it jsut becomes an overall scaling, which gets normalized away
        #I expect id doesn't matter for multiclass either... but not sure
        #to be clear, multi-class fisher does not norm! but then its harder to set the regularization factor for Sw
        #####################################################################################################################

        Xt_H_E_PIsi = None                                                      # shape(Xt_H_E_PIsi) = n_features x n_classes
        if self.norm_covariance:
           Xt_H_E_PIsi =  np.dot(Xt_H, np.dot(E, PI_inv) )
        else:
           Xt_H_E_PIsi = np.dot(Xt_H, np.dot(E, PI_sqrt_inv) )
        if print_timing: print 'fit_multiclass: Xt_H_E_PIsi took', time.time() - ts

        
        #St_reg = ( np.dot(X.T np.dot(H, X)) - (sigma*sigma)*np.identity(n_features))

        ts = time.time()
        #####################################################################################################################
        #Sw = X.T * [ 1 - E*PI_inv*E.T ] * X = X.T * X - M.T * PI * M
        # if norm_covariance: Sw = X.T * [ P - E*PI_inv*PI_inv*E.T ] * X = X.T *P * X - M.T * M
        #####################################################################################################################
        M = np.asarray(self.means_)                                              # shape(M) = n_classes x n_features
        #P = np.diag( np.dot(E, 1.0/(1.0*n_samples_perclass)) )
        P_vec = np.array([np.dot(E, 1.0/(1.0*n_samples_perclass))]).T            # shape(P_vec) = n_samples x 1
        Sw=None                                                                  # shape(Sw) = n_features x n_features 
        if not use_total_scatter:
            if self.norm_covariance:
                #Sw = np.inner( np.inner(X.T, P), X.T) - np.dot( M.T, M)
#.........这里部分代码省略.........
开发者ID:andrewjohnlowe,项目名称:JetImages,代码行数:101,代码来源:fisher.py


示例16: _validate_y

    def _validate_y(self, y):
        y = column_or_1d(y, warn=True)
        self.classes_, y = unique(y, return_inverse=True)
        self.n_classes_ = len(self.classes_)

        return y
开发者ID:orazaro,项目名称:kgml,代码行数:6,代码来源:bag.py


示例17: fit

 def fit(self, X, y):
     self.classes_, indices = unique(y, return_inverse=True)
     self.theta_ = logistic.lbfgs_logistic_regression(X, y, alpha=self.alpha, n_iter=self.n_iter)
     return self
开发者ID:dr-dos-ok,项目名称:pulearning,代码行数:4,代码来源:lr.py


示例18: test_compute_class_weight

def test_compute_class_weight():
    """Test (and demo) compute_class_weight."""
    classes, y = unique(np.asarray([2, 2, 2, 3, 3, 4]), return_inverse=True)
    cw = compute_class_weight("auto", classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_true(cw[0] < cw[1] < cw[2])
开发者ID:2011200799,项目名称:scikit-learn,代码行数:6,代码来源:test_class_weight.py



注:本文中的sklearn.utils.fixes.unique函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linear_assignment_.linear_assignment函数代码示例发布时间:2022-05-27
下一篇:
Python fixes.bincount函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap