本文整理汇总了Java中org.apache.hadoop.hbase.client.RetriesExhaustedException类的典型用法代码示例。如果您正苦于以下问题:Java RetriesExhaustedException类的具体用法?Java RetriesExhaustedException怎么用?Java RetriesExhaustedException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RetriesExhaustedException类属于org.apache.hadoop.hbase.client包,在下文中一共展示了RetriesExhaustedException类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testSocketClosed
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
String tableName = "testSocketClosed";
TableName name = TableName.valueOf(tableName);
UTIL.createTable(name, fam1).close();
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
MyRpcClientImpl.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName));
table.get(new Get("asd".getBytes()));
connection.close();
for (Socket socket : MyRpcClientImpl.savedSockets) {
assertTrue("Socket + " + socket + " is not closed", socket.isClosed());
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestRpcClientLeaks.java
示例2: testRegionServerCoprocessorServiceError
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test
public void testRegionServerCoprocessorServiceError() throws Exception {
final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
DummyRegionServerEndpointProtos.DummyRequest request =
DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance();
try {
admin
.<DummyRegionServerEndpointProtos.DummyService.Stub, DummyRegionServerEndpointProtos.DummyResponse> coprocessorService(
DummyRegionServerEndpointProtos.DummyService::newStub,
(s, c, done) -> s.dummyThrow(c, request, done), serverName).get();
fail("Should have thrown an exception");
} catch (Exception e) {
assertTrue(e.getCause() instanceof RetriesExhaustedException);
assertTrue(e.getCause().getMessage().contains(WHAT_TO_THROW.getClass().getName().trim()));
}
}
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:TestAsyncCoprocessorEndpoint.java
示例3: testFailedOpen
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
private void testFailedOpen(final TableName tableName,
final MockRSExecutor executor) throws Exception {
final RegionInfo hri = createRegionInfo(tableName, 1);
// Test Assign operation failure
rsDispatcher.setMockRsExecutor(executor);
try {
waitOnFuture(submitProcedure(am.createAssignProcedure(hri)));
fail("unexpected assign completion");
} catch (RetriesExhaustedException e) {
// expected exception
LOG.info("REGION STATE " + am.getRegionStates().getRegionStateNode(hri));
LOG.info("expected exception from assign operation: " + e.getMessage(), e);
assertEquals(true, am.getRegionStates().getRegionState(hri).isFailedOpen());
}
}
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:TestAssignmentManager.java
示例4: testSocketClosed
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
TableName tableName = TableName.valueOf(name.getMethodName());
UTIL.createTable(tableName, fam1).close();
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
MyRpcClientImpl.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(name.getMethodName()));
table.get(new Get(Bytes.toBytes("asd")));
connection.close();
for (Socket socket : MyRpcClientImpl.savedSockets) {
assertTrue("Socket + " + socket + " is not closed", socket.isClosed());
}
}
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestRpcClientLeaks.java
示例5: causeIsPleaseHold
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
private boolean causeIsPleaseHold(Throwable e) {
if (e instanceof PleaseHoldException)
return true;
if (e instanceof TableNotEnabledException)
return true;
if (e instanceof RegionOfflineException)
return true;
if (e instanceof RetriesExhaustedException || e instanceof SocketTimeoutException) {
if (e.getCause() instanceof RemoteException) {
RemoteException re = (RemoteException) e.getCause();
if (PleaseHoldException.class.getName().equals(re.getClassName()))
return true;
}
}
return false;
}
开发者ID:splicemachine,项目名称:spliceengine,代码行数:17,代码来源:RegionServerLifecycle.java
示例6: handleConnectionException
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
/**
* @param retries current retried times.
* @param maxAttmpts max attempts
* @param protocol protocol interface
* @param addr address of remote service
* @param ce ConnectException
* @throws RetriesExhaustedException
*/
private static void handleConnectionException(int retries, int maxAttmpts,
Class<?> protocol, InetSocketAddress addr, ConnectException ce)
throws RetriesExhaustedException {
if (maxAttmpts >= 0 && retries >= maxAttmpts) {
LOG.info("Server at " + addr + " could not be reached after "
+ maxAttmpts + " tries, giving up.");
throw new RetriesExhaustedException("Failed setting up proxy " + protocol
+ " to " + addr.toString() + " after attempts=" + maxAttmpts, ce);
}
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:HBaseRPC.java
示例7: testTimeoutWaitForMeta
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test (expected = RetriesExhaustedException.class)
public void testTimeoutWaitForMeta()
throws IOException, InterruptedException {
HConnection connection =
HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
ct.waitForMeta(100);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:9,代码来源:TestCatalogTracker.java
示例8: testRetriesExhaustedFailure
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
private void testRetriesExhaustedFailure(final TableName tableName,
final MockRSExecutor executor) throws Exception {
final RegionInfo hri = createRegionInfo(tableName, 1);
// collect AM metrics before test
collectAssignmentManagerMetrics();
// Test Assign operation failure
rsDispatcher.setMockRsExecutor(executor);
try {
waitOnFuture(submitProcedure(am.createAssignProcedure(hri)));
fail("unexpected assign completion");
} catch (RetriesExhaustedException e) {
// expected exception
LOG.info("expected exception from assign operation: " + e.getMessage(), e);
}
// Assign the region (without problems)
rsDispatcher.setMockRsExecutor(new GoodRsExecutor());
waitOnFuture(submitProcedure(am.createAssignProcedure(hri)));
// TODO: Currently unassign just keeps trying until it sees a server crash.
// There is no count on unassign.
/*
// Test Unassign operation failure
rsDispatcher.setMockRsExecutor(executor);
waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, false)));
assertEquals(assignSubmittedCount + 2, assignProcMetrics.getSubmittedCounter().getCount());
assertEquals(assignFailedCount + 1, assignProcMetrics.getFailedCounter().getCount());
assertEquals(unassignSubmittedCount + 1, unassignProcMetrics.getSubmittedCounter().getCount());
// TODO: We supposed to have 1 failed assign, 1 successful assign and a failed unassign
// operation. But ProcV2 framework marks aborted unassign operation as success. Fix it!
assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount());
*/
}
开发者ID:apache,项目名称:hbase,代码行数:38,代码来源:TestAssignmentManager.java
示例9: callRefreshRegionHFilesEndPoint
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
private void callRefreshRegionHFilesEndPoint() throws IOException {
try {
RefreshHFilesClient refreshHFilesClient = new RefreshHFilesClient(CONF);
refreshHFilesClient.refreshHFiles(TABLE_NAME);
} catch (RetriesExhaustedException rex) {
if (rex.getCause() instanceof IOException)
throw new IOException();
} catch (Throwable ex) {
LOG.error(ex.toString(), ex);
fail("Couldn't call the RefreshRegionHFilesEndpoint");
}
}
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:TestRefreshHFilesEndpoint.java
示例10: testTimeoutWaitForMeta
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test (expected = RetriesExhaustedException.class)
public void testTimeoutWaitForMeta()
throws IOException, InterruptedException {
HConnection connection =
HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
try {
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
ct.waitForMeta(100);
} finally {
HConnectionManager.deleteConnection(UTIL.getConfiguration());
}
}
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:13,代码来源:TestCatalogTracker.java
示例11: handleConnectionException
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
/**
* @param retries current retried times.
* @param maxAttmpts max attempts
* @param protocol protocol interface
* @param addr address of remote service
* @param ce ConnectException
* @throws org.apache.hadoop.hbase.client.RetriesExhaustedException
*
*/
private static void handleConnectionException(int retries,
int maxAttmpts,
Class<?> protocol,
InetSocketAddress addr,
ConnectException ce)
throws RetriesExhaustedException {
if (maxAttmpts >= 0 && retries >= maxAttmpts) {
LOG.info("Server at " + addr + " could not be reached after "
+ maxAttmpts + " tries, giving up.");
throw new RetriesExhaustedException("Failed setting up proxy " + protocol
+ " to " + addr.toString() + " after attempts=" + maxAttmpts, ce);
}
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:23,代码来源:HBaseClientRPC.java
示例12: testTimeoutWaitForMeta
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Test (expected = RetriesExhaustedException.class)
public void testTimeoutWaitForMeta()
throws IOException, InterruptedException {
HConnection connection =
HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
try {
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
ct.waitForMeta(100);
} finally {
HConnectionManager.deleteConnection(UTIL.getConfiguration(), true);
}
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:13,代码来源:TestCatalogTracker.java
示例13: startTransition
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
@Override
protected boolean startTransition(final MasterProcedureEnv env, final RegionStateNode regionNode)
throws IOException {
// If the region is already open we can't do much...
if (regionNode.isInState(State.OPEN) && isServerOnline(env, regionNode)) {
LOG.info("Assigned, not reassigning; " + this + "; " + regionNode.toShortString());
return false;
}
// Don't assign if table is in disabling of disabled state.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableName tn = regionNode.getRegionInfo().getTable();
if (tsm.isTableState(tn, TableState.State.DISABLING, TableState.State.DISABLED)) {
LOG.info("Table " + tn + " state=" + tsm.getTableState(tn) + ", skipping " + this);
return false;
}
// If the region is SPLIT, we can't assign it. But state might be CLOSED, rather than
// SPLIT which is what a region gets set to when Unassigned as part of SPLIT. FIX.
if (regionNode.isInState(State.SPLIT) ||
(regionNode.getRegionInfo().isOffline() && regionNode.getRegionInfo().isSplit())) {
LOG.info("SPLIT, cannot be assigned; " + this + "; " + regionNode +
"; hri=" + regionNode.getRegionInfo());
return false;
}
// If we haven't started the operation yet, we can abort
if (aborted.get() && regionNode.isInState(State.CLOSED, State.OFFLINE)) {
if (incrementAndCheckMaxAttempts(env, regionNode)) {
regionNode.setState(State.FAILED_OPEN);
setFailure(getClass().getSimpleName(),
new RetriesExhaustedException("Max attempts exceeded"));
} else {
setAbortFailure(getClass().getSimpleName(), "Abort requested");
}
return false;
}
// Send assign (add into assign-pool). Region is now in OFFLINE state. Setting offline state
// scrubs what was the old region location. Setting a new regionLocation here is how we retain
// old assignment or specify target server if a move or merge. See
// AssignmentManager#processAssignQueue. Otherwise, balancer gives us location.
ServerName lastRegionLocation = regionNode.offline();
boolean retain = false;
if (!forceNewPlan) {
if (this.targetServer != null) {
retain = targetServer.equals(lastRegionLocation);
regionNode.setRegionLocation(targetServer);
} else {
if (lastRegionLocation != null) {
// Try and keep the location we had before we offlined.
retain = true;
regionNode.setRegionLocation(lastRegionLocation);
} else if (regionNode.getLastHost() != null) {
retain = true;
LOG.info("Setting lastHost as the region location " + regionNode.getLastHost());
regionNode.setRegionLocation(regionNode.getLastHost());
}
}
}
LOG.info("Start " + this + "; " + regionNode.toShortString() +
"; forceNewPlan=" + this.forceNewPlan +
", retain=" + retain);
env.getAssignmentManager().queueAssign(regionNode);
return true;
}
开发者ID:apache,项目名称:hbase,代码行数:65,代码来源:AssignProcedure.java
示例14: waitForProxy
import org.apache.hadoop.hbase.client.RetriesExhaustedException; //导入依赖的package包/类
/**
* @param protocol protocol interface
* @param clientVersion which client version we expect
* @param addr address of remote service
* @param conf configuration
* @param maxAttempts max attempts
* @param rpcTimeout timeout for each RPC
* @param timeout timeout in milliseconds
* @return proxy
* @throws IOException e
*/
@SuppressWarnings("unchecked")
public static VersionedProtocol waitForProxy(Class protocol,
long clientVersion,
InetSocketAddress addr,
Configuration conf,
int maxAttempts,
int rpcTimeout,
long timeout
) throws IOException {
// HBase does limited number of reconnects which is different from hadoop.
long startTime = System.currentTimeMillis();
IOException ioe;
int reconnectAttempts = 0;
while (true) {
try {
return getProxy(protocol, clientVersion, addr, conf, rpcTimeout);
} catch(ConnectException se) { // namenode has not been started
ioe = se;
if (maxAttempts >= 0 && ++reconnectAttempts >= maxAttempts) {
LOG.info("Server at " + addr + " could not be reached after " +
reconnectAttempts + " tries, giving up.");
throw new RetriesExhaustedException("Failed setting up proxy " +
protocol + " to " + addr.toString() + " after attempts=" +
reconnectAttempts, se);
}
} catch(SocketTimeoutException te) { // namenode is busy
LOG.info("Problem connecting to server: " + addr);
ioe = te;
}
// check if timed out
if (System.currentTimeMillis()-timeout >= startTime) {
throw ioe;
}
// wait for retry
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
// IGNORE
}
}
}
开发者ID:lifeng5042,项目名称:RStore,代码行数:54,代码来源:HBaseRPC.java
注:本文中的org.apache.hadoop.hbase.client.RetriesExhaustedException类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论