• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.gen_even_slices函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.utils.gen_even_slices函数的典型用法代码示例。如果您正苦于以下问题:Python gen_even_slices函数的具体用法?Python gen_even_slices怎么用?Python gen_even_slices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gen_even_slices函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_gen_even_slices

def test_gen_even_slices():
    # check that gen_even_slices contains all samples
    some_range = range(10)
    joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
    assert_array_equal(some_range, joined_range)

    # check that passing negative n_chunks raises an error
    slices = gen_even_slices(10, -1)
    assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be" " >=1", next, slices)
开发者ID:haadkhan,项目名称:cerebri,代码行数:9,代码来源:test_utils.py


示例2: _sequential_learning

    def _sequential_learning(self, X, Y, w):
        n_samples = len(X)
        objective, positive_slacks = 0, 0
        if self.batch_size in [None, 1]:
            # online learning
            for x, y in zip(X, Y):
                y_hat, delta_psi, slack, loss = find_constraint(self.model, x, y, w)
                objective += slack
                if slack > 0:
                    positive_slacks += 1
                self._solve_subgradient(delta_psi, n_samples, w)
        else:
            # mini batch learning
            if self.batch_size == -1:
                slices = [slice(0, len(X)), None]
            else:
                n_batches = int(np.ceil(float(len(X)) / self.batch_size))
                slices = gen_even_slices(n_samples, n_batches)
            for batch in slices:
                X_b = X[batch]
                Y_b = Y[batch]
                Y_hat = self.model.batch_loss_augmented_inference(X_b, Y_b, w, relaxed=True)
                delta_psi = self.model.batch_psi(X_b, Y_b) - self.model.batch_psi(X_b, Y_hat)
                loss = np.sum(self.model.batch_loss(Y_b, Y_hat))

                violation = np.maximum(0, loss - np.dot(w, delta_psi))
                objective += violation
                positive_slacks += self.batch_size
                self._solve_subgradient(delta_psi / len(X_b), n_samples, w)
        return objective, positive_slacks, w
开发者ID:huyng,项目名称:pystruct,代码行数:30,代码来源:subgradient_ssvm.py


示例3: _parallel_learning

    def _parallel_learning(self, X, Y, w):
        n_samples = len(X)
        objective, positive_slacks = 0, 0
        verbose = max(0, self.verbose - 3)
        if self.batch_size is not None:
            raise ValueError("If n_jobs != 1, batch_size needs to" "be None")
        # generate batches of size n_jobs
        # to speed up inference
        if self.n_jobs == -1:
            n_jobs = cpu_count()
        else:
            n_jobs = self.n_jobs

        n_batches = int(np.ceil(float(len(X)) / n_jobs))
        slices = gen_even_slices(n_samples, n_batches)
        for batch in slices:
            X_b = X[batch]
            Y_b = Y[batch]
            candidate_constraints = Parallel(n_jobs=self.n_jobs, verbose=verbose)(
                delayed(find_constraint)(self.model, x, y, w) for x, y in zip(X_b, Y_b)
            )
            dpsi = np.zeros(self.model.size_psi)
            for x, y, constraint in zip(X_b, Y_b, candidate_constraints):
                y_hat, delta_psi, slack, loss = constraint
                if slack > 0:
                    objective += slack
                    dpsi += delta_psi
                    positive_slacks += 1
            w = self._solve_subgradient(dpsi, n_samples, w)
        return objective, positive_slacks, w
开发者ID:huyng,项目名称:pystruct,代码行数:30,代码来源:subgradient_ssvm.py


示例4: train

    def train(self, X):
        n_samples, n_features = X.shape
        
        for i in range(self.numepochs):
            kk = np.random.permutation(m)
            err = 0

            for l in gen_even_slices(n_samples, self.batchsize):
                batch = x[l, :]
                v1 = batch # n_samples X n_visible
                h1 = sigmrnd(np.dot(v1, self.W.T) + self.c)  # n_samples X n_hidden
                v2 = sigmrnd(np.dot(h1, self.W) + self.b) # n_samples X n_visible
                h2 = sigm(np.dot(v2, self.W.T) + self.c) # n_samples X n_hidden

                c1 = np.dot(h1.T, v1) # n_hidden X n_visible
                c2 = np.dot(h2.T, v2) # n_hidden X n_visible

                self.vW = self.momentum * self.vW + self.alpha * (c1 - c2) / self.batchsize  # n_hidden X n_visible
                self.vb = self.momentum * self.vb + self.alpha * np.sum(v1 - v2, axis=0) / self.batchsize # n_visible X 1
                self.vc = self.momentum * self.vc + self.alpha * np.sum(h1 - h2, axis=0) / self.batchsize # n_hidden X 1

                self.W = self.W + self.vW # n_hidden X n_visible
                self.b = self.b + self.vb # n_visible X 1
                self.c = self.c + self.vc # n_visible X 1

                err = err + np.sum(np.power(v1 - v2), 2) / self.batchsize
            
            print 'epoch '+ str(i) + '/' + str(self.numepochs) + '. Average reconstruction error is: ' + str(err / numbatches)
开发者ID:maniteja123,项目名称:DeepLearning,代码行数:28,代码来源:rbm.py


示例5: fit

    def fit(self, X, y=None):
        """Fit the model to the data X.

        Parameters
        ----------
        X : {array-like, sparse matrix} shape (n_samples, n_features)
            Training data.

        Returns
        -------
        self : BernoulliRBM
            The fitted model.
        """
        X, = check_arrays(X, sparse_format='csr', dtype=np.float)
        n_samples = X.shape[0]
        rng = check_random_state(self.random_state)

        self.components_ = np.asarray(
            rng.normal(0, 0.01, (self.n_components, X.shape[1])),
            order='fortran')
        self.intercept_hidden_ = np.zeros(self.n_components, )
        self.intercept_visible_ = np.zeros(X.shape[1], )
        self.h_samples_ = np.zeros((self.batch_size, self.n_components))

        n_batches = int(np.ceil(float(n_samples) / self.batch_size))
        batch_slices = list(gen_even_slices(n_batches * self.batch_size,
                                            n_batches, n_samples))

        for iteration in xrange(1, self.n_iter + 1):
            for batch_slice in batch_slices:
                self._fit(X[batch_slice], rng)
        return self
开发者ID:Markusjsommer,项目名称:classnotes,代码行数:32,代码来源:rbms.py


示例6: _mean_of_squares

def _mean_of_squares(signals, n_batches=20):
    """Compute mean of squares for each signal.
    This function is equivalent to

        var = np.copy(signals)
        var **= 2
        var = var.mean(axis=0)

    but uses a lot less memory.

    Parameters
    ==========
    signals : numpy.ndarray, shape (n_samples, n_features)
        signal whose mean of squares must be computed.

    n_batches : int, optional
        number of batches to use in the computation. Tweaking this value
        can lead to variation of memory usage and computation time. The higher
        the value, the lower the memory consumption.

    """
    # No batching for small arrays
    if signals.shape[1] < 500:
        n_batches = 1

    # Fastest for C order
    var = np.empty(signals.shape[1])
    for batch in gen_even_slices(signals.shape[1], n_batches):
        tvar = np.copy(signals[:, batch])
        tvar **= 2
        var[batch] = tvar.mean(axis=0)

    return var
开发者ID:mikbuch,项目名称:pymri,代码行数:33,代码来源:signal.py


示例7: _mini_batch_compute

 def _mini_batch_compute(self, n_samples):
     ''' 
     Compute equal sized minibatches ( indexes )
     This method is taken from sklearn/neural_network/rbm.py
     '''
     n_batches = int(np.ceil(float(n_samples) / self.batch_size))
     batch_slices = list(gen_even_slices(n_batches * self.batch_size,n_batches, n_samples))
     return batch_slices  
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:8,代码来源:rbm.py


示例8: fit

 def fit(self, data):
   num_examples = data.shape[0]
   self.h_samples_ = np.zeros((self.batch_size, self.num_hidden))
   n_batches = int(np.ceil(float(num_examples) / self.batch_size))
   batch_slices = list(gen_even_slices(n_batches * self.batch_size,
                                       n_batches, num_examples))
   
   for iteration in xrange(1, self.max_epochs + 1):
       for batch_slice in batch_slices:
           self._fit(data[batch_slice])
开发者ID:Markusjsommer,项目名称:classnotes,代码行数:10,代码来源:rbmp.py


示例9: check_gen_even_slices

def check_gen_even_slices():

    even = csr_matrix((1032,1030))
    odd = csr_matrix((1033,1033))
    batch_size = 100

    even_batches = int(np.ceil(float(even.shape[0]) / batch_size))
    odd_batches = int(np.ceil(float(odd.shape[0]) / batch_size))

    odd_slices = list(gen_even_slices(odd_batches * batch_size,
                                            odd_batches, odd.shape[0]))

    even_slices = list(gen_even_slices(even_batches * batch_size,
                                            even_batches, even.shape[0]))

    assert slices_bounds_check(even,even_slices) == "passes", "Fails on Even number of rows"
    assert slices_bounds_check(odd,odd_slices) == "passes", "Fails on Odd number of rows" 

    print("OK")
开发者ID:jfelectron,项目名称:scikit-learn,代码行数:19,代码来源:test_gen_even_slices.py


示例10: _detrend

def _detrend(signals, inplace=False, type="linear", n_batches=10):
    """Detrend columns of input array.

    Signals are supposed to be columns of `signals`.
    This function is significantly faster than scipy.signal.detrend on this
    case and uses a lot less memory.

    Parameters
    ==========
    signals : numpy.ndarray
        This parameter must be two-dimensional.
        Signals to detrend. A signal is a column.

    inplace : bool, optional
        Tells if the computation must be made inplace or not (default
        False).

    type : str, optional
        Detrending type ("linear" or "constant").
        See also scipy.signal.detrend.

    n_batches : int, optional
        number of batches to use in the computation. Tweaking this value
        can lead to variation of memory usage and computation time. The higher
        the value, the lower the memory consumption.

    Returns
    =======
    detrended_signals: numpy.ndarray
        Detrended signals. The shape is that of 'signals'.
    """
    if not inplace:
        signals = signals.copy()

    signals -= np.mean(signals, axis=0)
    if type == "linear":
        # Keeping "signals" dtype avoids some type conversion further down,
        # and can save a lot of memory if dtype is single-precision.
        regressor = np.arange(signals.shape[0], dtype=signals.dtype)
        regressor -= regressor.mean()
        std = np.sqrt((regressor ** 2).sum())
        # avoid numerical problems
        if not std < np.finfo(np.float).eps:
            regressor /= std
        regressor = regressor[:, np.newaxis]

        # No batching for small arrays
        if signals.shape[1] < 500:
            n_batches = 1

        # This is fastest for C order.
        for batch in gen_even_slices(signals.shape[1], n_batches):
            signals[:, batch] -= np.dot(regressor[:, 0], signals[:, batch]
                                        ) * regressor
    return signals
开发者ID:douardda,项目名称:nilearn,代码行数:55,代码来源:signal.py


示例11: load_data

def load_data(name, partition_id, n_partitions):
    """load partition of data into global var `name`"""
    from sklearn.datasets import fetch_20newsgroups_vectorized
    from sklearn.utils import gen_even_slices
    dataset = fetch_20newsgroups_vectorized('test')
    size = dataset.data.shape[0]
    slices = list(gen_even_slices(size, n_partitions))
    
    part = dataset.data[slices[partition_id]]
    # put it in globals
    globals().update({name : part})
    return part.shape
开发者ID:minrk,项目名称:pycon-pydata-sprint,代码行数:12,代码来源:parallel_kmeans.py


示例12: _parallel_inner_prod

def _parallel_inner_prod(X, Y, func, n_jobs, **kwds):
    """Break the pairwise matrix in n_jobs even slices
    and compute them in parallel"""
    if n_jobs < 0:
        n_jobs = max(cpu_count() + 1 + n_jobs, 1)

    if Y is None:
        Y = X

    ret = Parallel(n_jobs=n_jobs, verbose=0)(
        delayed(func)(X[s], Y, **kwds)
        for s in gen_even_slices(len(X), n_jobs))

    return np.hstack(ret)
开发者ID:EderSantana,项目名称:adaptive_kernel_methods,代码行数:14,代码来源:slash2.py


示例13: fit

    def fit(self, X):
        """Fit SGVB to the data

        Parameters
        ----------
        X : array-like, shape (N, n_features)
            The data that the SGVB needs to fit on

        Returns
        -------
        list_lowerbound : list of int
        list of lowerbound over time
        """
        X, = check_arrays(X, sparse_format='csr', dtype=np.float)
        [N, dimX] = X.shape
        rng = check_random_state(self.random_state)

        self._initParams(dimX, rng)
        list_lowerbound = np.array([])

        n_batches = int(np.ceil(float(N) / self.batch_size))
        batch_slices = list(gen_even_slices(n_batches * self.batch_size,
                                            n_batches, N))

        if self.verbose:
            print "Initializing gradients for AdaGrad"
        for i in xrange(10):
            self._initH(X[batch_slices[i]], rng)

        begin = time.time()
        for iteration in xrange(1, self.n_iter + 1):
            iteration_lowerbound = 0

            for batch_slice in batch_slices:
                lowerbound = self._updateParams(X[batch_slice], N, rng)
                iteration_lowerbound += lowerbound

            if self.verbose:
                end = time.time()
                print("[%s] Iteration %d, lower bound = %.2f,"
                      " time = %.2fs"
                      % (self.__class__.__name__, iteration,
                         iteration_lowerbound / N, end - begin))
                begin = end

            list_lowerbound = np.append(
                list_lowerbound, iteration_lowerbound / N)
        return list_lowerbound
开发者ID:adaixiaoshuai,项目名称:Variational-Autoencoder,代码行数:48,代码来源:VariationalAutoencoder_withSK.py


示例14: fit

    def fit(self, X, y=None):
        """Fit the model to the data X.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.

        Returns
        -------
        self : BernoulliRBM
            The fitted model.
        """
        X, = check_arrays(X, sparse_format='csc', dtype=np.float)
        n_samples = X.shape[0]
        rng = check_random_state(self.random_state)

        self.components_ = np.asarray(
            rng.normal(0, 0.01, (self.n_components, X.shape[1])),
            order='fortran')
        self.intercept_hidden_ = np.zeros(self.n_components, )
        self.intercept_visible_ = np.zeros(X.shape[1], )
        self.h_samples_ = np.zeros((self.batch_size, self.n_components))

        n_batches = int(np.ceil(float(n_samples) / self.batch_size))
        batch_slices = list(gen_even_slices(n_batches * self.batch_size,
                                            n_batches))
        verbose = self.verbose
        for iteration in xrange(self.n_iter):
            pl = 0.
            if verbose:
                begin = time.time()

            for batch_slice in batch_slices:
                pl_batch = self._fit(X[batch_slice], rng)

                if verbose:
                    pl += pl_batch.sum()

            if verbose:
                pl /= n_samples
                end = time.time()
                print("Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
                      % (iteration, pl, end - begin))

        return self
开发者ID:shuokay,项目名称:spark-sklearn,代码行数:46,代码来源:rbm.py


示例15: fit

    def fit(self, X, y, max_epochs, shuffle_data, staged_sample=None, verbose=0):
        # get all sizes
        n_samples, n_features = X.shape
        if y.shape[0] != n_samples:
            raise ValueError("Shapes of X and y don't fit.")
        self.n_outs = y.shape[1]
        # n_batches = int(np.ceil(float(n_samples) / self.batch_size))
        n_batches = n_samples / self.batch_size
        if n_samples % self.batch_size != 0:
            warnings.warn("Discarding some samples: \
                sample size not divisible by chunk size.")
        n_iterations = int(max_epochs * n_batches)

        if shuffle_data:
            X, y = shuffle(X, y)

        # generate batch slices
        batch_slices = list(
            gen_even_slices(n_batches * self.batch_size, n_batches))

        # generate weights.
        # TODO: smart initialization
        self.weights1_ = np.random.uniform(
            size=(n_features, self.n_hidden)) / np.sqrt(n_features)
        self.bias1_ = np.zeros(self.n_hidden)
        self.weights2_ = np.random.uniform(
            size=(self.n_hidden, self.n_outs)) / np.sqrt(self.n_hidden)
        self.bias2_ = np.zeros(self.n_outs)

        # preallocate memory
        x_hidden = np.empty((self.batch_size, self.n_hidden))
        delta_h = np.empty((self.batch_size, self.n_hidden))
        x_output = np.empty((self.batch_size, self.n_outs))
        delta_o = np.empty((self.batch_size, self.n_outs))

        self.oo_score = []
        # main loop
        for i, batch_slice in izip(xrange(n_iterations), cycle(batch_slices)):
            self._forward(i, X, batch_slice, x_hidden, x_output, testing=False)
            self._backward(
                i, X, y, batch_slice, x_hidden, x_output, delta_o, delta_h)
            if staged_sample is not None:
                self.oo_score.append(self.predict(staged_sample))
        return self
开发者ID:JakeMick,项目名称:sk-mlp,代码行数:44,代码来源:mlp.py


示例16: fit

    def fit(self, X, y=None):
        """Fit the model to the data X.

        Parameters
        ----------
        X : {array-like, sparse matrix} shape (n_samples, n_features)
            Training data.

        Returns
        -------
        self : BernoulliRBM
            The fitted model.
        """
        X = check_array(X, accept_sparse='csr', dtype=np.float64)
        n_samples = X.shape[0]
        rng = check_random_state(self.random_state)

        self.components_ = np.asarray(
            rng.normal(0, 0.01, (self.n_components, X.shape[1])),
            order='fortran')
        self.intercept_hidden_ = np.zeros(self.n_components, )
        self.intercept_visible_ = np.zeros(X.shape[1], )
        self.h_samples_ = np.zeros((self.batch_size, self.n_components))

        n_batches = int(np.ceil(float(n_samples) / self.batch_size))
        batch_slices = list(gen_even_slices(n_batches * self.batch_size,
                                            n_batches, n_samples))
        verbose = self.verbose
        begin = time.time()
        for iteration in xrange(1, self.n_iter + 1):
            for batch_slice in batch_slices:
                self._fit(X[batch_slice], rng)

            if verbose:
                end = time.time()
                print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
                      " time = %.2fs"
                      % (type(self).__name__, iteration,
                         self.score_samples(X).mean(), end - begin))
                begin = end

        return self
开发者ID:Meyenhofer,项目名称:pattern-recognition-2016,代码行数:42,代码来源:rbm.py


示例17: fit

    def fit(self, X, y, max_epochs, shuffle_data, verbose=0):
        # get all sizes
        n_samples, n_features = X.shape
        if y.shape[0] != n_samples:
            raise ValueError("Shapes of X and y don't fit.")
        self.n_outs = y.shape[1]
        #n_batches = int(np.ceil(float(n_samples) / self.batch_size))
        n_batches = n_samples / self.batch_size
        if n_samples % self.batch_size != 0:
            warnings.warn("Discarding some samples: \
                sample size not divisible by chunk size.")
        n_iterations = int(max_epochs * n_batches)

        if shuffle_data:
            X, y = shuffle(X, y)

        # generate batch slices
        batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches))

        # generate weights.
        unif_param = np.sqrt(6) / np.sqrt(n_features+self.n_hidden) #as per Bengio & Glorot, AIStats 2010
        self.weights1_ = np.random.uniform(low=-unif_param, high=unif_param, size=(n_features, self.n_hidden))
        #self.weights1_ = np.random.uniform(size=(n_features, self.n_hidden))/np.sqrt(n_features)
        self.bias1_ = np.zeros(self.n_hidden)
        unif_param = np.sqrt(6) / np.sqrt(self.n_hidden+self.n_outs) #as per Bengio & Glorot, AIStats 2010
        self.weights2_ = np.random.uniform(low=-unif_param, high=unif_param, size=(self.n_hidden, self.n_outs))
        #self.weights2_ = np.random.uniform(size=(self.n_hidden, self.n_outs))/np.sqrt(self.n_hidden)
        self.bias2_ = np.zeros(self.n_outs)

        # preallocate memory
        x_hidden = np.empty((self.batch_size, self.n_hidden))
        delta_h = np.empty((self.batch_size, self.n_hidden))
        x_output = np.empty((self.batch_size, self.n_outs))
        delta_o = np.empty((self.batch_size, self.n_outs))

        # main loop
        for i, batch_slice in izip(xrange(n_iterations), cycle(batch_slices)):
            self._forward(i, X, batch_slice, x_hidden, x_output)
            self._backward(i, X, y, batch_slice, x_hidden, x_output, delta_o, delta_h)
        return self
开发者ID:asaluja,项目名称:cca-mt,代码行数:40,代码来源:mlp.py


示例18: _e_step

    def _e_step(self, X, cal_delta):
        """
        E-step

        set `cal_delta == True` when we need to run _m_step
        for inference, set it to False
        """

        # parell run e-step
        if self.n_jobs == -1:
            n_jobs = cpu_count()
        else:
            n_jobs = self.n_jobs

        results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
            delayed(_update_gamma)
            (X[idx_slice, :], self.expElogbeta, self.alpha,
             self.rng, 100, self.mean_change_tol, cal_delta)
            for idx_slice in gen_even_slices(X.shape[0], n_jobs))

        # merge result
        gammas, deltas = zip(*results)
        gamma = np.vstack(gammas)

        if cal_delta:
            # This step finishes computing the sufficient statistics for the
            # M step, so that
            # sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
            # = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
            delta_component = np.zeros(self.components_.shape)
            for delta in deltas:
                delta_component += delta
            delta_component *= self.expElogbeta
        else:
            delta_component = None

        return (gamma, delta_component)
开发者ID:praveenkottayi,项目名称:topicModels,代码行数:37,代码来源:lda.py


示例19: radius_neighbors

def radius_neighbors(X=None, radius=None, return_distance=True):
    """Finds the neighbors within a given radius of a point or points.
    Return the indices and distances of each point from the dataset
    lying in a ball with size ``radius`` around the points of the query
    array. Points lying on the boundary are included in the results.
    The result points are *not* necessarily sorted by distance to their
    query point.
    Parameters
    ----------
    X : array-like, (n_samples, n_features), optional
        The query point or points.
        If not provided, neighbors of each indexed point are returned.
        In this case, the query point is not considered its own neighbor.
    radius : float
        Limiting distance of neighbors to return.
        (default is the value passed to the constructor).
    return_distance : boolean, optional. Defaults to True.
        If False, distances will not be returned
    Returns
    -------
    dist : array, shape (n_samples,) of arrays
        Array representing the distances to each point, only present if
        return_distance=True. The distance values are computed according
        to the ``metric`` constructor parameter.
    ind : array, shape (n_samples,) of arrays
        An array of arrays of indices of the approximate nearest points
        from the population matrix that lie within a ball of size
        ``radius`` around the query points.
    Examples
    --------
    In the following example, we construct a NeighborsClassifier
    class from an array representing our data set and ask who's
    the closest point to [1, 1, 1]:
    >>> import numpy as np
    >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
    >>> from sklearn.neighbors import NearestNeighbors
    >>> neigh = NearestNeighbors(radius=1.6)
    >>> neigh.fit(samples) # doctest: +ELLIPSIS
    NearestNeighbors(algorithm='auto', leaf_size=30, ...)
    >>> rng = neigh.radius_neighbors([[1., 1., 1.]])
    >>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
    [ 1.5  0.5]
    >>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
    [1 2]
    The first array returned contains the distances to all points which
    are closer than 1.6, while the second array returned contains their
    indices.  In general, multiple points can be queried at the same time.
    Notes
    -----
    Because the number of neighbors of each point is not necessarily
    equal, the results for multiple query points cannot be fit in a
    standard data array.
    For efficiency, `radius_neighbors` returns arrays of objects, where
    each object is a 1D array of indices or distances.
    """
    n_samples = X.shape[0]
    from joblib import delayed, Parallel
    from sklearn.utils import gen_even_slices
    n_jobs = max(mp.cpu_count(), n_samples)
    print(gen_even_slices(X.shape[0], n_jobs))
    fd = delayed(_func)
    dist = Parallel(n_jobs=n_jobs, verbose=0)(
        fd(X, X[s])
        for s in gen_even_slices(X.shape[0], n_jobs))

    neigh_ind_list = [np.where(d > 0)[0] for d in np.hstack(dist)]
    # print(neigh_ind_list)


    # See https://github.com/numpy/numpy/issues/5456
    # if you want to understand why this is initialized this way.
    neigh_ind = np.empty(n_samples, dtype='object')
    neigh_ind[:] = neigh_ind_list

    return neigh_ind
开发者ID:slipguru,项目名称:icing,代码行数:75,代码来源:neighbors.py


示例20: fit

    def fit(self, X, Y):
        """Fit the model to the data X and target y.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training data, where n_samples in the number of samples
            and n_features is the number of features.

        Y : numpy array of shape [n_samples]
            Subset of the target values.

        Returns
        -------
        self
        """
        self.n_layers = len(self.n_hidden)
        X = atleast2d_or_csr(X, dtype=np.float64, order="C")
        n_outputs = Y.shape[1]
        n_samples, n_features = X.shape
        self._init_fit(X, Y, n_features, n_outputs)
        self._init_param()
        if self.shuffle_data:
            X, Y = shuffle(X, Y, random_state=self.random_state)
        self.batch_size = np.clip(self.batch_size, 0, n_samples)
        n_batches = n_samples / self.batch_size
        batch_slices = list(
            gen_even_slices(
                n_batches *
                self.batch_size,
                n_batches))
        # l-bfgs does not work well with batches
        if self.algorithm == 'l-bfgs':
            self.batch_size = n_samples
        # preallocate memory
        a_hidden = [0] * self.n_layers
        a_output = np.empty((self.batch_size, n_outputs))
        delta_o = np.empty((self.batch_size, n_outputs))
        # print 'Fine tuning...'
        if self.algorithm is 'sgd':
            eta = self.eta0
            t = 1
            prev_cost = np.inf
            for i in xrange(self.max_iter):
                for batch_slice in batch_slices:
                    cost, eta = self.backprop_sgd(
                        X[batch_slice],
                        Y[batch_slice],
                        self.batch_size,
                        a_hidden,
                        a_output,
                        delta_o,
                        t,
                        eta)
                if self.verbose:
                        print("Iteration %d, cost = %.2f"
                              % (i, cost))
                if abs(cost - prev_cost) < self.tol:
                    break
                prev_cost = cost
                t += 1
        elif 'l-bfgs':
                self._backprop_lbfgs(
                    X, Y, n_features, n_outputs, n_samples, a_hidden,
                    a_output,
                    delta_o)
        return self
开发者ID:ddofer,项目名称:NeuralNetworks,代码行数:67,代码来源:dbn.py



注:本文中的sklearn.utils.gen_even_slices函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.resample函数代码示例发布时间:2022-05-27
下一篇:
Python utils.compute_class_weight函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap