• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ getServiceContext函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中getServiceContext函数的典型用法代码示例。如果您正苦于以下问题:C++ getServiceContext函数的具体用法?C++ getServiceContext怎么用?C++ getServiceContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了getServiceContext函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: getServiceContext

Status OperationContext::checkForInterruptNoAssert() noexcept {
    // TODO: Remove the MONGO_likely(getClient()) once all operation contexts are constructed with
    // clients.
    if (MONGO_likely(getClient() && getServiceContext()) &&
        getServiceContext()->getKillAllOperations()) {
        return Status(ErrorCodes::InterruptedAtShutdown, "interrupted at shutdown");
    }

    if (hasDeadlineExpired()) {
        if (!_hasArtificialDeadline) {
            markKilled(_timeoutError);
        }
        return Status(_timeoutError, "operation exceeded time limit");
    }

    if (_ignoreInterrupts) {
        return Status::OK();
    }

    MONGO_FAIL_POINT_BLOCK(checkForInterruptFail, scopedFailPoint) {
        if (opShouldFail(getClient(), scopedFailPoint.getData())) {
            log() << "set pending kill on op " << getOpID() << ", for checkForInterruptFail";
            markKilled();
        }
    }

    const auto killStatus = getKillStatus();
    if (killStatus != ErrorCodes::OK) {
        return Status(killStatus, "operation was interrupted");
    }

    return Status::OK();
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:33,代码来源:operation_context.cpp


示例2: getServiceContext

void SyncTailTest::setUp() {
    ServiceContextMongoDTest::setUp();

    auto service = getServiceContext();
    ReplicationCoordinator::set(service, stdx::make_unique<ReplicationCoordinatorMock>(service));
    auto storageInterface = stdx::make_unique<StorageInterfaceMock>();
    _storageInterface = storageInterface.get();
    storageInterface->insertDocumentsFn =
        [](OperationContext*, const NamespaceString&, const std::vector<InsertStatement>&) {
            return Status::OK();
        };
    StorageInterface::set(service, std::move(storageInterface));
    DropPendingCollectionReaper::set(
        service, stdx::make_unique<DropPendingCollectionReaper>(_storageInterface));

    _replicationProcess = new ReplicationProcess(
        _storageInterface, stdx::make_unique<ReplicationConsistencyMarkersMock>());
    ReplicationProcess::set(cc().getServiceContext(),
                            std::unique_ptr<ReplicationProcess>(_replicationProcess));


    _opCtx = cc().makeOperationContext();
    _opsApplied = 0;
    _applyOp = [](OperationContext* opCtx,
                  Database* db,
                  const BSONObj& op,
                  bool inSteadyStateReplication,
                  stdx::function<void()>) { return Status::OK(); };
    _applyCmd = [](OperationContext* opCtx, const BSONObj& op, bool) { return Status::OK(); };
    _incOps = [this]() { _opsApplied++; };
}
开发者ID:vnvizitiu,项目名称:mongo,代码行数:31,代码来源:sync_tail_test_fixture.cpp


示例3: TEST_F

TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightOldKey) {
    keyManager()->startMonitoring(getServiceContext());

    KeysCollectionDocument origKey1(
        1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
    ASSERT_OK(insertToConfigCollection(
        operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
    KeysCollectionDocument origKey2(
        2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
    ASSERT_OK(insertToConfigCollection(
        operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));

    keyManager()->refreshNow(operationContext());

    {
        auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 0)));
        ASSERT_OK(keyStatus.getStatus());

        auto key = keyStatus.getValue();
        ASSERT_EQ(1, key.getKeyId());
        ASSERT_EQ(origKey1.getKey(), key.getKey());
        ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp());
    }

    {
        auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(105, 0)));
        ASSERT_OK(keyStatus.getStatus());

        auto key = keyStatus.getValue();
        ASSERT_EQ(2, key.getKeyId());
        ASSERT_EQ(origKey2.getKey(), key.getKey());
        ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp());
    }
}
开发者ID:ShaneHarvey,项目名称:mongo,代码行数:34,代码来源:keys_collection_manager_sharding_test.cpp


示例4: StorageInterfaceRollback

void RollbackTest::setUp() {
    _storageInterface = new StorageInterfaceRollback();
    auto serviceContext = getServiceContext();
    auto consistencyMarkers = stdx::make_unique<ReplicationConsistencyMarkersMock>();
    auto recovery =
        stdx::make_unique<ReplicationRecoveryImpl>(_storageInterface, consistencyMarkers.get());
    _replicationProcess = stdx::make_unique<ReplicationProcess>(
        _storageInterface, std::move(consistencyMarkers), std::move(recovery));
    _dropPendingCollectionReaper = new DropPendingCollectionReaper(_storageInterface);
    DropPendingCollectionReaper::set(
        serviceContext, std::unique_ptr<DropPendingCollectionReaper>(_dropPendingCollectionReaper));
    StorageInterface::set(serviceContext, std::unique_ptr<StorageInterface>(_storageInterface));
    _coordinator = new ReplicationCoordinatorRollbackMock(serviceContext);
    ReplicationCoordinator::set(serviceContext,
                                std::unique_ptr<ReplicationCoordinator>(_coordinator));
    setOplogCollectionName(serviceContext);

    _opCtx = makeOperationContext();
    _replicationProcess->getConsistencyMarkers()->clearAppliedThrough(_opCtx.get(), {});
    _replicationProcess->getConsistencyMarkers()->setMinValid(_opCtx.get(), OpTime{});
    _replicationProcess->initializeRollbackID(_opCtx.get()).transitional_ignore();

    // Increase rollback log component verbosity for unit tests.
    mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
        logger::LogComponent::kReplicationRollback, logger::LogSeverity::Debug(2));

    auto observerRegistry = checked_cast<OpObserverRegistry*>(serviceContext->getOpObserver());
    observerRegistry->addObserver(std::make_unique<RollbackTestOpObserver>());
}
开发者ID:hanumantmk,项目名称:mongo,代码行数:29,代码来源:rollback_test_fixture.cpp


示例5: _tempDir

ServiceContextMongoDTest::ServiceContextMongoDTest(std::string engine, RepairAction repair)
    : _tempDir("service_context_d_test_fixture") {

    _stashedStorageParams.engine = std::exchange(storageGlobalParams.engine, std::move(engine));
    _stashedStorageParams.engineSetByUser =
        std::exchange(storageGlobalParams.engineSetByUser, true);
    _stashedStorageParams.repair =
        std::exchange(storageGlobalParams.repair, (repair == RepairAction::kRepair));

    auto const serviceContext = getServiceContext();
    serviceContext->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>(serviceContext));
    auto logicalClock = std::make_unique<LogicalClock>(serviceContext);
    LogicalClock::set(serviceContext, std::move(logicalClock));

    // Set up a fake no-op PeriodicRunner. No jobs will ever get run, which is
    // desired behavior for unit tests unrelated to background jobs.
    auto runner = std::make_unique<MockPeriodicRunnerImpl>();
    serviceContext->setPeriodicRunner(std::move(runner));

    storageGlobalParams.dbpath = _tempDir.path();

    initializeStorageEngine(serviceContext, StorageEngineInitFlags::kNone);

    // Set up UUID Catalog observer. This is necessary because the Collection destructor contains an
    // invariant to ensure the UUID corresponding to that Collection object is no longer associated
    // with that Collection object in the UUIDCatalog. UUIDs may be registered in the UUIDCatalog
    // directly in certain code paths, but they can only be removed from the UUIDCatalog via a
    // UUIDCatalogObserver. It is therefore necessary to install the observer to ensure the
    // invariant in the Collection destructor is not triggered.
    auto observerRegistry = checked_cast<OpObserverRegistry*>(serviceContext->getOpObserver());
    observerRegistry->addObserver(std::make_unique<UUIDCatalogObserver>());
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:32,代码来源:service_context_d_test_fixture.cpp


示例6: getDeadline

Milliseconds OperationContext::getRemainingMaxTimeMillis() const {
    if (!hasDeadline()) {
        return Milliseconds::max();
    }

    return std::max(Milliseconds{0},
                    getDeadline() - getServiceContext()->getFastClockSource()->now());
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:8,代码来源:operation_context.cpp


示例7: invariant

// Theory of operation for waitForConditionOrInterruptNoAssertUntil and markKilled:
//
// An operation indicates to potential killers that it is waiting on a condition variable by setting
// _waitMutex and _waitCV, while holding the lock on its parent Client. It then unlocks its Client,
// unblocking any killers, which are required to have locked the Client before calling markKilled.
//
// When _waitMutex and _waitCV are set, killers must lock _waitMutex before setting the _killCode,
// and must signal _waitCV before releasing _waitMutex. Unfortunately, they must lock _waitMutex
// without holding a lock on Client to avoid a deadlock with callers of
// waitForConditionOrInterruptNoAssertUntil(). So, in the event that _waitMutex is set, the killer
// increments _numKillers, drops the Client lock, acquires _waitMutex and then re-acquires the
// Client lock. We know that the Client, its OperationContext and _waitMutex will remain valid
// during this period because the caller of waitForConditionOrInterruptNoAssertUntil will not return
// while _numKillers > 0 and will not return until it has itself reacquired _waitMutex. Instead,
// that caller will keep waiting on _waitCV until _numKillers drops to 0.
//
// In essence, when _waitMutex is set, _killCode is guarded by _waitMutex and _waitCV, but when
// _waitMutex is not set, it is guarded by the Client spinlock. Changing _waitMutex is itself
// guarded by the Client spinlock and _numKillers.
//
// When _numKillers does drop to 0, the waiter will null out _waitMutex and _waitCV.
//
// This implementation adds a minimum of two spinlock acquire-release pairs to every condition
// variable wait.
StatusWith<stdx::cv_status> OperationContext::waitForConditionOrInterruptNoAssertUntil(
    stdx::condition_variable& cv, stdx::unique_lock<stdx::mutex>& m, Date_t deadline) noexcept {
    invariant(getClient());
    {
        stdx::lock_guard<Client> clientLock(*getClient());
        invariant(!_waitMutex);
        invariant(!_waitCV);
        invariant(0 == _numKillers);

        // This interrupt check must be done while holding the client lock, so as not to race with a
        // concurrent caller of markKilled.
        auto status = checkForInterruptNoAssert();
        if (!status.isOK()) {
            return status;
        }
        _waitMutex = m.mutex();
        _waitCV = &cv;
    }

    if (hasDeadline()) {
        deadline = std::min(deadline, getDeadline());
    }

    const auto waitStatus = [&] {
        if (Date_t::max() == deadline) {
            cv.wait(m);
            return stdx::cv_status::no_timeout;
        }
        return getServiceContext()->getPreciseClockSource()->waitForConditionUntil(cv, m, deadline);
    }();

    // Continue waiting on cv until no other thread is attempting to kill this one.
    cv.wait(m, [this] {
        stdx::lock_guard<Client> clientLock(*getClient());
        if (0 == _numKillers) {
            _waitMutex = nullptr;
            _waitCV = nullptr;
            return true;
        }
        return false;
    });

    auto status = checkForInterruptNoAssert();
    if (!status.isOK()) {
        return status;
    }
    if (hasDeadline() && waitStatus == stdx::cv_status::timeout && deadline == getDeadline()) {
        // It's possible that the system clock used in stdx::condition_variable::wait_until
        // is slightly ahead of the FastClock used in checkForInterrupt. In this case,
        // we treat the operation as though it has exceeded its time limit, just as if the
        // FastClock and system clock had agreed.
        markKilled(ErrorCodes::ExceededTimeLimit);
        return Status(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit");
    }
    return waitStatus;
}
开发者ID:bjori,项目名称:mongo,代码行数:80,代码来源:operation_context.cpp


示例8: lk

void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const {
    auto client = opCtx->getClient();
    auto service = client->getServiceContext();
    {
        stdx::lock_guard<Client> lk(*client);
        client->resetOperationContext();
    }
    onDestroy(opCtx, service->_clientObservers);
    delete opCtx;
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:10,代码来源:service_context.cpp


示例9: getServiceContext

void OperationContext::setDeadlineByDate(Date_t when) {
    Microseconds maxTime;
    if (when == Date_t::max()) {
        maxTime = Microseconds::max();
    } else {
        maxTime = when - getServiceContext()->getFastClockSource()->now();
        if (maxTime < Microseconds::zero()) {
            maxTime = Microseconds::zero();
        }
    }
    setDeadlineAndMaxTime(when, maxTime);
}
开发者ID:DreamerKing,项目名称:mongo,代码行数:12,代码来源:operation_context.cpp


示例10: getClient

ServiceContextMongoDTest::~ServiceContextMongoDTest() {
    {
        auto opCtx = getClient()->makeOperationContext();
        Lock::GlobalLock glk(opCtx.get(), MODE_X);
        DatabaseHolder::getDatabaseHolder().closeAll(opCtx.get(), "all databases dropped");
    }

    shutdownGlobalStorageEngineCleanly(getServiceContext());

    std::swap(storageGlobalParams.engine, _stashedStorageParams.engine);
    std::swap(storageGlobalParams.engineSetByUser, _stashedStorageParams.engineSetByUser);
    std::swap(storageGlobalParams.repair, _stashedStorageParams.repair);
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:13,代码来源:service_context_d_test_fixture.cpp


示例11: Seconds

void ClusterCommandTestFixture::setUp() {
    CatalogCacheTestFixture::setUp();
    CatalogCacheTestFixture::setupNShards(numShards);

    // Set up a logical clock with an initial time.
    auto logicalClock = stdx::make_unique<LogicalClock>(getServiceContext());
    logicalClock->setClusterTimeFromTrustedSource(kInMemoryLogicalTime);
    LogicalClock::set(getServiceContext(), std::move(logicalClock));

    auto keysCollectionClient = stdx::make_unique<KeysCollectionClientSharded>(
        Grid::get(operationContext())->catalogClient());

    auto keyManager = std::make_shared<KeysCollectionManager>(
        "dummy", std::move(keysCollectionClient), Seconds(KeysRotationIntervalSec));

    auto validator = stdx::make_unique<LogicalTimeValidator>(keyManager);
    LogicalTimeValidator::set(getServiceContext(), std::move(validator));

    LogicalSessionCache::set(getServiceContext(), stdx::make_unique<LogicalSessionCacheNoop>());

    loadRoutingTableWithTwoChunksAndTwoShards(kNss);
}
开发者ID:hanumantmk,项目名称:mongo,代码行数:22,代码来源:cluster_command_test_fixture.cpp


示例12: getLogicalSessionUserDigestForLoggedInUser

SHA256Block getLogicalSessionUserDigestForLoggedInUser(const OperationContext* opCtx) {
    auto client = opCtx->getClient();
    ServiceContext* serviceContext = client->getServiceContext();

    if (AuthorizationManager::get(serviceContext)->isAuthEnabled()) {
        UserName userName;

        const auto user = AuthorizationSession::get(client)->getSingleUser();
        invariant(user);

        return user->getDigest();
    } else {
        return kNoAuthDigest;
    }
}
开发者ID:i80and,项目名称:mongo,代码行数:15,代码来源:logical_session_id_helpers.cpp


示例13: lk

void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const {
    auto client = opCtx->getClient();
    auto service = client->getServiceContext();
    {
        stdx::lock_guard<Client> lk(*client);
        client->resetOperationContext();
    }
    try {
        for (const auto& observer : service->_clientObservers) {
            observer->onDestroyOperationContext(opCtx);
        }
    } catch (...) {
        std::terminate();
    }
    delete opCtx;
}
开发者ID:gormanb,项目名称:mongo,代码行数:16,代码来源:service_context.cpp


示例14: run

    virtual bool run(OperationContext* opCtx,
                     const std::string& db,
                     const BSONObj& cmdObj,
                     BSONObjBuilder& result) override {
        auto client = opCtx->getClient();
        ServiceContext* serviceContext = client->getServiceContext();

        auto lsCache = LogicalSessionCache::get(serviceContext);
        boost::optional<LogicalSessionRecord> record =
            makeLogicalSessionRecord(opCtx, lsCache->now());
        uassertStatusOK(lsCache->startSession(opCtx, record.get()));

        makeLogicalSessionToClient(record->getId()).serialize(&result);

        return true;
    }
开发者ID:asya999,项目名称:mongo,代码行数:16,代码来源:start_session_command.cpp


示例15: TEST_F

TEST_F(KeysManagerShardedTest, ShouldNotReturnKeysInFeatureCompatibilityVersion34) {
    serverGlobalParams.featureCompatibility.version.store(
        ServerGlobalParams::FeatureCompatibility::Version::k34);

    keyManager()->startMonitoring(getServiceContext());
    keyManager()->enableKeyGenerator(operationContext(), true);

    KeysCollectionDocument origKey(
        1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
    ASSERT_OK(insertToConfigCollection(
        operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey.toBSON()));

    keyManager()->refreshNow(operationContext());

    auto keyStatus =
        keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
    ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus());
}
开发者ID:DINKIN,项目名称:mongo,代码行数:18,代码来源:keys_collection_manager_sharding_test.cpp


示例16: makeLogicalSessionRecord

LogicalSessionRecord makeLogicalSessionRecord(OperationContext* opCtx,
                                              const LogicalSessionId& lsid,
                                              Date_t lastUse) {
    auto lsr = makeLogicalSessionRecord(lsid, lastUse);

    auto client = opCtx->getClient();
    ServiceContext* serviceContext = client->getServiceContext();
    if (AuthorizationManager::get(serviceContext)->isAuthEnabled()) {
        auto user = AuthorizationSession::get(client)->getSingleUser();
        invariant(user);

        if (user->getDigest() == lsid.getUid()) {
            lsr.setUser(StringData(user->getName().toString()));
        }
    }

    return lsr;
}
开发者ID:i80and,项目名称:mongo,代码行数:18,代码来源:logical_session_id_helpers.cpp


示例17: getServiceContext

DbResponse ClusterCommandTestFixture::runCommand(BSONObj cmd) {
    // Create a new client/operation context per command
    auto client = getServiceContext()->makeClient("ClusterCmdClient");
    auto opCtx = client->makeOperationContext();

    const auto opMsgRequest = OpMsgRequest::fromDBAndBody(kNss.db(), cmd);

    // Ensure the clusterGLE on the Client has not yet been initialized.
    ASSERT(!ClusterLastErrorInfo::get(client.get()));

    // Initialize the cluster last error info for the client with a new request.
    ClusterLastErrorInfo::get(client.get()) = std::make_shared<ClusterLastErrorInfo>();
    ASSERT(ClusterLastErrorInfo::get(client.get()));
    auto clusterGLE = ClusterLastErrorInfo::get(client.get());
    clusterGLE->newRequest();

    return Strategy::clientCommand(opCtx.get(), opMsgRequest.serialize());
}
开发者ID:hanumantmk,项目名称:mongo,代码行数:18,代码来源:cluster_command_test_fixture.cpp


示例18: catch

void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const {
    auto client = opCtx->getClient();
    auto service = client->getServiceContext();
    // // TODO(schwerin): When callers no longer construct their own OperationContexts directly,
    // // but only through the ServiceContext, uncomment the following.  Until then, it must
    // // be done in the operation context destructors, which introduces a potential race.
    // {
    //     stdx::lock_guard<Client> lk(*client);
    //     client->resetOperationContext();
    // }
    try {
        for (const auto& observer : service->_clientObservers) {
            observer->onDestroyOperationContext(opCtx);
        }
    } catch (...) {
        std::terminate();
    }
    delete opCtx;
}
开发者ID:AnkyrinRepeat,项目名称:mongo,代码行数:19,代码来源:service_context.cpp


示例19: cc

void MockReplCoordServerFixture::setUp() {
    ServiceContextMongoDTest::setUp();

    _opCtx = cc().makeOperationContext();

    auto service = getServiceContext();

    _storageInterface = new repl::StorageInterfaceMock();
    repl::StorageInterface::set(service,
                                std::unique_ptr<repl::StorageInterface>(_storageInterface));
    ASSERT_TRUE(_storageInterface == repl::StorageInterface::get(service));

    repl::ReplicationProcess::set(service,
                                  stdx::make_unique<repl::ReplicationProcess>(
                                      _storageInterface,
                                      stdx::make_unique<repl::ReplicationConsistencyMarkersMock>(),
                                      stdx::make_unique<repl::ReplicationRecoveryMock>()));

    ASSERT_OK(repl::ReplicationProcess::get(service)->initializeRollbackID(opCtx()));

    // Insert code path assumes existence of repl coordinator!
    repl::ReplSettings replSettings;
    replSettings.setReplSetString(
        ConnectionString::forReplicaSet("sessionTxnStateTest", {HostAndPort("a:1")}).toString());

    repl::ReplicationCoordinator::set(
        service, stdx::make_unique<repl::ReplicationCoordinatorMock>(service, replSettings));
    ASSERT_OK(
        repl::ReplicationCoordinator::get(service)->setFollowerMode(repl::MemberState::RS_PRIMARY));

    // Note: internal code does not allow implicit creation of non-capped oplog collection.
    DBDirectClient client(opCtx());
    ASSERT_TRUE(
        client.createCollection(NamespaceString::kRsOplogNamespace.ns(), 1024 * 1024, true));

    repl::setOplogCollectionName(service);
    repl::acquireOplogCollectionForLogging(opCtx());

    repl::DropPendingCollectionReaper::set(
        service,
        stdx::make_unique<repl::DropPendingCollectionReaper>(repl::StorageInterface::get(service)));
}
开发者ID:EvgeniyPatlan,项目名称:percona-server-mongodb,代码行数:42,代码来源:mock_repl_coord_server_fixture.cpp



注:本文中的getServiceContext函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ getSetting函数代码示例发布时间:2022-05-28
下一篇:
C++ getService函数代码示例发布时间:2022-05-28
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap