• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python extmath.fast_dot函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.utils.extmath.fast_dot函数的典型用法代码示例。如果您正苦于以下问题:Python fast_dot函数的具体用法?Python fast_dot怎么用?Python fast_dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fast_dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: gain

    def gain(self, X, y):
        H = np.zeros(X.shape[1])
        y = np.array(y)

        i = 0
        batch = 100000
        n_features = X.shape[1]
        n_examples = X.shape[0]
        p = np.zeros(shape=(2, 2, X.shape[1]))
        while i < n_features:
            if n_features - i < batch:
                batch = n_features - i
            X_batch_raw = X[:, i : (i + batch)]
            X_batch = X_batch_raw.toarray()

            p[1, 1, i : (i + batch)] = y * X_batch_raw
            p[1, 0, i : (i + batch)] = np.fabs(y - 1) * X_batch_raw
            p[0, 1, i : (i + batch)] = fast_dot(y, np.fabs(X_batch - 1))
            p[0, 0, i : (i + batch)] = fast_dot(np.fabs(y - 1), np.fabs(X_batch - 1))

            p_batch = p[:, :, i : (i + batch)] / n_examples
            p_sum = np.sum(p_batch, axis=0)
            s = X_batch_raw.sum(axis=0)
            p_x = np.array([s, 1 - s])
            H[i : (i + batch)] = np.sum(p_batch * np.log(p_batch + self.smoother)) - 4 * np.sum(np.multiply(p_x, p_sum))
            i += batch
            print(i / X.shape[1])
        return H
开发者ID:cfga,项目名称:DDE,代码行数:28,代码来源:ig_modified.py


示例2: svm_gradient_batch_fast

def svm_gradient_batch_fast(X_pred, X_exp, y, X_pred_ids, X_exp_ids, w, C=.0001, sigma=1.):
    # sample Kernel
    rnpred = X_pred_ids#sp.random.randint(low=0,high=len(y),size=n_pred_samples)
    rnexpand = X_exp_ids#sp.random.randint(low=0,high=len(y),size=n_expand_samples)
    #K = GaussKernMini_fast(X_pred.T,X_exp.T,sigma)
    X1 = X_pred.T
    X2 = X_exp.T
    if sp.sparse.issparse(X1):
        G = sp.outer(X1.multiply(X1).sum(axis=0), sp.ones(X2.shape[1]))
    else:
        G = sp.outer((X1 * X1).sum(axis=0), sp.ones(X2.shape[1]))
    if sp.sparse.issparse(X2):
        H = sp.outer(X2.multiply(X2).sum(axis=0), sp.ones(X1.shape[1]))
    else:
        H = sp.outer((X2 * X2).sum(axis=0), sp.ones(X1.shape[1]))
    K = sp.exp(-(G + H.T - 2. * fast_dot(X1.T, X2)) / (2. * sigma ** 2))
    # K = sp.exp(-(G + H.T - 2.*(X1.T.dot(X2)))/(2.*sigma**2))
    if sp.sparse.issparse(X1) | sp.sparse.issparse(X2): K = sp.array(K)

    # compute predictions
    yhat = fast_dot(K,w[rnexpand])
    # compute whether or not prediction is in margin
    inmargin = (yhat * y[rnpred]) <= 1
    # compute gradient
    G = C * w[rnexpand] - fast_dot((y[rnpred] * inmargin), K)
    return G,rnexpand
开发者ID:nikste,项目名称:doubly_random_svm,代码行数:26,代码来源:dsekl.py


示例3: svm_gradient_batch

def svm_gradient_batch(X_pred,X_exp,y,X_pred_ids,X_exp_ids,w,C=.0001,sigma=1.):
    # sample Kernel
    rnpred = X_pred_ids#sp.random.randint(low=0,high=len(y),size=n_pred_samples)
    rnexpand = X_exp_ids#sp.random.randint(low=0,high=len(y),size=n_expand_samples)
    K = GaussKernMini(X_pred.T,X_exp.T,sigma)
    # compute predictions

    yhat = fast_dot(K,w[rnexpand])
    # compute whether or not prediction is in margin
    inmargin = (yhat * y[rnpred]) <= 1
    # compute gradient
    G = C * w[rnexpand] - fast_dot((y[rnpred] * inmargin), K)
    return G,rnexpand
开发者ID:nikste,项目名称:doubly_random_svm,代码行数:13,代码来源:dsekl.py


示例4: _update_coordinate_descent

def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
                               random_state):
    """Helper function for _fit_coordinate_descent

    Update W to minimize the objective function, iterating once over all
    coordinates. By symmetry, to update H, one can call
    _update_coordinate_descent(X.T, Ht, W, ...)

    """
    n_components = Ht.shape[1]

    HHt = fast_dot(Ht.T, Ht)
    XHt = safe_sparse_dot(X, Ht)

    # L2 regularization corresponds to increase of the diagonal of HHt
    if l2_reg != 0.:
        # adds l2_reg only on the diagonal
        HHt.flat[::n_components + 1] += l2_reg
    # L1 regularization corresponds to decrease of each element of XHt
    if l1_reg != 0.:
        XHt -= l1_reg

    if shuffle:
        permutation = random_state.permutation(n_components)
    else:
        permutation = np.arange(n_components)
    # The following seems to be required on 64-bit Windows w/ Python 3.5.
    permutation = np.asarray(permutation, dtype=np.intp)
    return _update_cdnmf_fast(W, HHt, XHt, permutation)
开发者ID:dkoes,项目名称:notebooks,代码行数:29,代码来源:nmf.py


示例5: _update_delta

    def _update_delta(self, m, mask=None, drop_diag=False):
        self.delta_DK_M[m][:, :] = self.alpha * self.beta_M[m]
        if mask is None and not drop_diag:
            self.sumE_MK[m, :] = 1.
            self.delta_DK_M[m][:, :] += self.sumE_MK.prod(axis=0)
            assert np.isfinite(self.delta_DK_M[m]).all()

        elif mask is None and drop_diag:
            assert self.mode_dims[0] == self.mode_dims[1]
            mask = np.abs(np.identity(self.mode_dims[0]) - 1)
            if m > 1:
                tmp = np.zeros(self.n_components)
                for k in xrange(self.n_components):
                    tmp[k] = (mask * np.outer(self.E_DK_M[0][:, k], self.E_DK_M[1][:, k])).sum()
                assert tmp.shape == (self.n_components,)
                self.sumE_MK[m, :] = 1.
            else:
                tmp = np.dot(mask, self.E_DK_M[np.abs(m-1)])
                assert tmp.shape == self.E_DK_M[m].shape
            self.delta_DK_M[m][:, :] += self.sumE_MK[2:].prod(axis=0) * tmp
            assert np.isfinite(self.delta_DK_M[m]).all()

        else:
            if drop_diag:
                diag_idx = np.identity(self.mode_dims[0]).astype(bool)
                assert (mask[diag_idx] == 0).all()
            tmp = mask.copy()
            tmp, order = make_first_mode(tmp, m)
            tmp = fast_dot(tmp, self.E_DK_M[order[-1]])
            for i in xrange(self.n_modes - 2, 0, -1):
                tmp *= self.E_DK_M[order[i]]
                tmp = tmp.sum(axis=-2)
            self.delta_DK_M[m][:, :] += tmp
            assert np.isfinite(self.delta_DK_M[m]).all()
开发者ID:harry-chen-1116,项目名称:bptf,代码行数:34,代码来源:bptf.py


示例6: repeated_corr

def repeated_corr(X, y, dtype=float):
    """Computes pearson correlations between a vector and a matrix.

    Adapted from Jona-Sassenhagen's PR #L1772 on mne-python.

    Parameters
    ----------
        y : np.array, shape (n_samples)
            Data vector.
        X : np.array, shape (n_samples, n_measures)
            Data matrix onto which the vector is correlated.
        dtype : type, optional
            Data type used to compute correlation values to optimize memory.

    Returns
    -------
        rho : np.array, shape (n_measures)
    """
    from sklearn.utils.extmath import fast_dot
    if X.ndim not in [1, 2] or y.ndim != 1 or X.shape[0] != y.shape[0]:
        raise ValueError('y must be a vector, and X a matrix with an equal'
                         'number of rows.')
    if X.ndim == 1:
        X = X[:, None]
    y -= np.array(y.mean(0), dtype=dtype)
    X -= np.array(X.mean(0), dtype=dtype)
    y_sd = y.std(0, ddof=1)
    X_sd = X.std(0, ddof=1)[:, None if y.shape == X.shape else Ellipsis]
    return (fast_dot(y.T, X) / float(len(y) - 1)) / (y_sd * X_sd)
开发者ID:SherazKhan,项目名称:Paris_orientation-decoding,代码行数:29,代码来源:base.py


示例7: _beta_divergence_dense

def _beta_divergence_dense(X, W, H, beta):
    """Compute the beta-divergence of X and W.H for dense array only.

    Used as a reference for testing nmf._beta_divergence.
    """
    if isinstance(X, numbers.Number):
        W = np.array([[W]])
        H = np.array([[H]])
        X = np.array([[X]])

    WH = fast_dot(W, H)

    if beta == 2:
        return squared_norm(X - WH) / 2

    WH_Xnonzero = WH[X != 0]
    X_nonzero = X[X != 0]
    np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)

    if beta == 1:
        res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
        res += WH.sum() - X.sum()

    elif beta == 0:
        div = X_nonzero / WH_Xnonzero
        res = np.sum(div) - X.size - np.sum(np.log(div))
    else:
        res = (X_nonzero ** beta).sum()
        res += (beta - 1) * (WH ** beta).sum()
        res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
        res /= beta * (beta - 1)

    return res
开发者ID:AlexandreAbraham,项目名称:scikit-learn,代码行数:33,代码来源:test_nmf.py


示例8: _gradient_func

 def _gradient_func(self, w):
     # sum over columns without running into overflow problems
     # scipy.sparse.spmatrix.sum uses dtype of matrix, which is too small
     col_sum = numpy.asmatrix(numpy.ones((1, self.Aw.shape[0]), dtype=numpy.int_)) * self.Aw
     v = numpy.asarray(col_sum).squeeze()
     z = fast_dot(self.data_x.T, self.Aw.T.dot(self.AXw) - v)
     return w + self.alpha * z
开发者ID:tum-camp,项目名称:survival-support-vector-machine,代码行数:7,代码来源:survival_svm.py


示例9: get_bmu

 def get_bmu(self, yn):
     """Returns the ID of the best matching unit.
     Best is determined from the cosine similarity of the
     sample with the normalized Kohonen network.
 
     See https://en.wikipedia.org/wiki/Cosine_similarity
     for cosine similarity documentation.
     TODO: make possible the finding the second best matching unit
 
     Parameters
     ----------
     KN : sparse matrix
         Shape = [n_nodes, n_features] must be normalized according to
         l2 norm as used in the sklearn Normalizer()
     y : vector of dimension 1 x nfeatures
         Target sample.
 
     Returns
     -------
     tuple : (loc, cosine_distance)
         index of the matching unit, with the corresponding cosine distance
     """
     #d = ((self.K_-y)**2).sum(axis=1)
     #loc = np.argmin(d)
     #qe = np.sqrt(d[loc])
     similarity = fast_dot(self.KN_, yn.T)
     loc = np.argmax(similarity)
     qe = 1/(1.0e-4+similarity[loc])-1
     return loc, qe
开发者ID:fredatshift,项目名称:collective-intelligence,代码行数:29,代码来源:som.py


示例10: _special_dot_X

def _special_dot_X(W, H, X):
    """Computes np.dot(W, H) in a special way:

    - If X is sparse, np.dot(W, H) is computed only where X is non zero,
    and a sparse matrix is returned, with the same sparsity as X.
    - If X is masked, np.dot(W, H) is computed entirely, and a masked array is
    returned, with the same mask as X.
    - If X is dense, np.dot(W, H) is computed entirely, and returned as a dense
    array.
    """
    if sp.issparse(X):
        ii, jj = X.nonzero()
        dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
        WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
        return WH.tocsr()
    elif isinstance(X, np.ma.masked_array):
        WH = np.ma.masked_array(fast_dot(W, H), mask=X.mask)
        WH.unshare_mask()
        return WH
    else:
        return fast_dot(W, H)
开发者ID:dkoes,项目名称:notebooks,代码行数:21,代码来源:nmf.py


示例11: get_bmu

def get_bmu(kn, yn, epsilon=1.0e-6):
    """Returns the ID of the best matching unit.
    Best is determined from the cosine similarity of the
    sample with the normalized Kohonen network.

    See https://en.wikipedia.org/wiki/Cosine_similarity
    for cosine similarity documentation.
    """
    similarity = fast_dot(kn, yn.T)
    loc = np.argmax(similarity)
    qe = 1 / (epsilon + similarity[loc]) - 1
    return loc, qe
开发者ID:fredhusser,项目名称:som,代码行数:12,代码来源:som.py


示例12: _hessian_func

    def _hessian_func(self, w, s):
        s_bias, s_feat = self._split_coefficents(s)

        l_plus, xv_plus, l_minus, xv_minus = self._counter.calculate(s_feat)
        x = self._counter.x

        xs = numpy.dot(x, s_feat)
        xs = numexpr.evaluate('(l_plus + l_minus) * xs - xv_plus - xv_minus')

        hessp = s_feat + self._rank_penalty * fast_dot(x.T, xs)
        if self._has_time:
            xc = x.compress(self.regr_mask, axis=0)
            hessp += self._regr_penalty * fast_dot(xc.T, numpy.dot(xc, s_feat))

            # intercept
            if self._fit_intercept:
                xsum = xc.sum(axis=0)
                hessp += self._regr_penalty * xsum * s_bias
                hessp_intercept = self._regr_penalty * xc.shape[0] * s_bias + self._regr_penalty * numpy.dot(xsum, s_feat)
                hessp = numpy.concatenate(([hessp_intercept], hessp))

        return hessp
开发者ID:tum-camp,项目名称:survival-support-vector-machine,代码行数:22,代码来源:survival_svm.py


示例13: GaussKernMini_fast

def GaussKernMini_fast(X1,X2,sigma):
    if sp.sparse.issparse(X1):
        G = sp.outer(X1.multiply(X1).sum(axis=0),sp.ones(X2.shape[1]))
    else:
        G = sp.outer((X1 * X1).sum(axis=0),sp.ones(X2.shape[1]))
    if sp.sparse.issparse(X2):
        H = sp.outer(X2.multiply(X2).sum(axis=0),sp.ones(X1.shape[1]))
    else:
        H = sp.outer((X2 * X2).sum(axis=0),sp.ones(X1.shape[1]))
    K = sp.exp(-(G + H.T - 2.*fast_dot(X1.T,X2))/(2.*sigma**2))
    # K = sp.exp(-(G + H.T - 2.*(X1.T.dot(X2)))/(2.*sigma**2))
    if sp.sparse.issparse(X1) | sp.sparse.issparse(X2): K = sp.array(K)
    return K
开发者ID:nikste,项目名称:doubly_random_svm,代码行数:13,代码来源:dsekl.py


示例14: nmf_predict_direct

def nmf_predict_direct(rate_matrix,user_distribution,item_distribution,user_ids_list,item_ids_list,top_n,fout_str):
    fout = open(fout_str,'w')
    #method 1 : w*h
    for u_ix,u in enumerate(user_distribution):
        predict_vec = fast_dot(u,item_distribution)
        filter_vec = np.where(rate_matrix.getrow(u_ix).toarray()>0,0,1)
        predict_vec = predict_vec * filter_vec
        sort_ix_vec = np.argpartition(-predict_vec[0],top_n)[:top_n]
        candidate_item_list = list()
        for i_ix in sort_ix_vec:
            item_id = item_ids_list[i_ix]
            candidate_item_list.append(item_id)
        user_id = user_ids_list[u_ix]
        print >> fout,'%s,%s' %(user_id,'#'.join(candidate_item_list))
    fout.close()
开发者ID:shitaixiaoniu,项目名称:RS_GP,代码行数:15,代码来源:build_candidate_cf.py


示例15: transform_PCA

def transform_PCA(pca, k, X):
    X_reduced = X - pca.mean_

    X_reduced = fast_dot(X_reduced, pca.components_[0:k].T)

    # Transform test data with principal components:
    #X_reduced = pca.transform(test_X)

    # Reconstruct:
    X_rec = np.dot(X_reduced, pca.components_[0:k])

    # Restore mean:
    X_rec += pca.mean_
    print "Variance Explained: {}".format(np.sum(pca.explained_variance_ratio_[:k]))
    return X_reduced, X_rec
开发者ID:TIGRLab,项目名称:NI-ML,代码行数:15,代码来源:PCA_Utils.py


示例16: fit_transform

    def fit_transform(self, X, y=None):
        """Fit the model with X and apply the dimensionality reduction on X.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        self._fit(X)
        if self.copy and self.center_ is not None:
            X = X - self.center_
        return fast_dot(X, self.components_.T)
开发者ID:amueller,项目名称:pca,代码行数:17,代码来源:tga.py


示例17: _reorth

def _reorth(basis, target, rows=None, alpha=0.5):
    """Reorthogonalize a vector using iterated Gram-Schmidt

    Parameters
    ----------
    basis: ndarray, shape (n_features, n_basis)
        The matrix whose rows are a set of basis to reorthogonalize against

    target: ndarray, shape (n_features,)
        The target vector to be reorthogonalized

    rows: {array-like, None}, default None
        Indices of rows from basis to use. Use all if None

    alpha: float, default 0.5
        Parameter for determining whether to do a second reorthogonalization.

    Returns
    -------
    reorthed_target: ndarray, shape (n_features,)
        The reorthogonalized vector
    """
    if rows is not None:
        basis = basis[rows]
    norm_target = norm(target)

    norm_target_old = 0
    n_reorth = 0

    while norm_target < alpha * norm_target_old or n_reorth == 0:
        for row in basis:
            t = fast_dot(row, target)
            target = target - t * row

        norm_target_old = norm_target
        norm_target = norm(target)
        n_reorth += 1

        if n_reorth > 4:
            # target in span(basis) => accpet target = 0
            target = np.zeros(basis.shape[0])
            break

    return target
开发者ID:amueller,项目名称:pca,代码行数:44,代码来源:tga.py


示例18: inverse_transform

    def inverse_transform(self, X):
        """Transform data back to its original space, i.e.,
        return an input X_original whose transform would be X

        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples is the number of samples
            and n_components is the number of components.

        Returns
        -------
        X_original: array-like, shape (n_samples, n_features)
        """
        check_is_fitted(self, 'center_')

        X_original = fast_dot(X, self.components_)
        if self.center_ is not None:
            X_original = X_original + self.center_
        return X_original
开发者ID:amueller,项目名称:pca,代码行数:20,代码来源:tga.py


示例19: inverse_transform

    def inverse_transform(self, X):
        """Transform data back to its original space, i.e.,
        return an input X_original whose transform would be X

        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples is the number of samples
            and n_components is the number of components.

        Returns
        -------
        X_original array-like, shape (n_samples, n_features)

        Notes
        -----
        If whitening is enabled, inverse_transform does not compute the
        exact inverse operation as transform.
        """
        return fast_dot(X, self.components_) + self.mean_
开发者ID:roy651,项目名称:tm_spring_2016,代码行数:20,代码来源:pca_mod.py


示例20: transform

    def transform(self, X):
        """Apply the dimensionality reduction on X.

        X is projected on the first principal components previous extracted
        from a training set.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            New data, where n_samples is the number of samples
            and n_features is the number of features.

        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)

        """
        X = array2d(X)
        if self.mean_ is not None:
            X = X - self.mean_
        X_transformed = fast_dot(X, self.components_.T)
        return X_transformed
开发者ID:roy651,项目名称:tm_spring_2016,代码行数:22,代码来源:pca_mod.py



注:本文中的sklearn.utils.extmath.fast_dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python extmath.logsumexp函数代码示例发布时间:2022-05-27
下一篇:
Python extmath.density函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap