Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 2FEF7200C68 for ; Wed, 3 May 2017 16:59:01 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 2E9E2160BCB; Wed, 3 May 2017 14:59:01 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id B0E4E160BCD for ; Wed, 3 May 2017 16:58:58 +0200 (CEST) Received: (qmail 95146 invoked by uid 500); 3 May 2017 14:58:56 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 94137 invoked by uid 99); 3 May 2017 14:58:56 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 03 May 2017 14:58:56 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 0283BE3823; Wed, 3 May 2017 14:58:56 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Wed, 03 May 2017 14:59:15 -0000 Message-Id: <03c9fbc4e48841478dc44c995bfa67f4@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd. archived-at: Wed, 03 May 2017 14:59:01 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html index f3f7a46..8750fa2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html @@ -56,2015 +56,2125 @@ 048import org.apache.hadoop.hbase.MetaTableAccessor; 049import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; 050import org.apache.hadoop.hbase.NotServingRegionException; -051import org.apache.hadoop.hbase.RegionLocations; -052import org.apache.hadoop.hbase.ServerName; -053import org.apache.hadoop.hbase.NamespaceDescriptor; -054import org.apache.hadoop.hbase.HConstants; -055import org.apache.hadoop.hbase.TableExistsException; -056import org.apache.hadoop.hbase.TableName; -057import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -058import org.apache.hadoop.hbase.TableNotDisabledException; -059import org.apache.hadoop.hbase.TableNotFoundException; -060import org.apache.hadoop.hbase.UnknownRegionException; -061import org.apache.hadoop.hbase.classification.InterfaceAudience; -062import org.apache.hadoop.hbase.classification.InterfaceStability; -063import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -064import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -065import org.apache.hadoop.hbase.client.Scan.ReadType; -066import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -067import org.apache.hadoop.hbase.client.replication.TableCFs; -068import org.apache.hadoop.hbase.exceptions.DeserializationException; -069import org.apache.hadoop.hbase.ipc.HBaseRpcController; -070import org.apache.hadoop.hbase.quotas.QuotaFilter; -071import org.apache.hadoop.hbase.quotas.QuotaSettings; -072import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -073import org.apache.hadoop.hbase.replication.ReplicationException; -074import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -075import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -076import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -077import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -078import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; -162import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; -163import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -164import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -165import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -166import org.apache.hadoop.hbase.util.Bytes; -167import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -168import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -169import org.apache.hadoop.hbase.util.Pair; -170 -171/** -172 * The implementation of AsyncAdmin. -173 */ -174@InterfaceAudience.Private -175@InterfaceStability.Evolving -176public class AsyncHBaseAdmin implements AsyncAdmin { -177 -178 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); -179 -180 private final AsyncConnectionImpl connection; +051import org.apache.hadoop.hbase.ProcedureInfo; +052import org.apache.hadoop.hbase.RegionLocations; +053import org.apache.hadoop.hbase.ServerName; +054import org.apache.hadoop.hbase.NamespaceDescriptor; +055import org.apache.hadoop.hbase.HConstants; +056import org.apache.hadoop.hbase.TableExistsException; +057import org.apache.hadoop.hbase.TableName; +058import org.apache.hadoop.hbase.AsyncMetaTableAccessor; +059import org.apache.hadoop.hbase.TableNotDisabledException; +060import org.apache.hadoop.hbase.TableNotFoundException; +061import org.apache.hadoop.hbase.UnknownRegionException; +062import org.apache.hadoop.hbase.classification.InterfaceAudience; +063import org.apache.hadoop.hbase.classification.InterfaceStability; +064import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; +065import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; +066import org.apache.hadoop.hbase.client.Scan.ReadType; +067import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; +068import org.apache.hadoop.hbase.client.replication.TableCFs; +069import org.apache.hadoop.hbase.exceptions.DeserializationException; +070import org.apache.hadoop.hbase.ipc.HBaseRpcController; +071import org.apache.hadoop.hbase.quotas.QuotaFilter; +072import org.apache.hadoop.hbase.quotas.QuotaSettings; +073import org.apache.hadoop.hbase.quotas.QuotaTableUtil; +074import org.apache.hadoop.hbase.replication.ReplicationException; +075import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +076import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +077import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; +078import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +079import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; +084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; +085import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +086import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; +087import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; +088import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; +089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; +090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; +091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; +092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; +093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; +094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; +095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; +096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; +097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; +098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; +099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; +100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; +101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; +102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; +103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; +104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; +105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; +106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; +107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; +108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; +109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; +110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; +111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; +112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; +116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; +118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; +119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; +120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; +121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; +122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; +123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; +124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; +125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; +126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; +127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; +129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; +130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; +131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; +132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; +134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; +135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; +136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; +137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; +138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; +139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; +140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; +141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; +142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; +143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; +144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; +145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; +146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; +147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; +148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; +149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; +153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; +154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; +155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; +156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; +160import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; +161import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; +162import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; +163import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; +164import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; +165import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; +166import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; +167import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +168import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +169import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; +170import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; +171import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; +172import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; +173import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +174import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +175import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +176import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +177import org.apache.hadoop.hbase.util.Bytes; +178import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +179import org.apache.hadoop.hbase.util.ForeignExceptionUtil; +180import org.apache.hadoop.hbase.util.Pair; 181 -182 private final RawAsyncTable metaTable; -183 -184 private final long rpcTimeoutNs; -185 -186 private final long operationTimeoutNs; -187 -188 private final long pauseNs; -189 -190 private final int maxAttempts; -191 -192 private final int startLogErrorsCnt; -193 -194 private final NonceGenerator ng; -195 -196 AsyncHBaseAdmin(AsyncConnectionImpl connection) { -197 this.connection = connection; -198 this.metaTable = connection.getRawTable(META_TABLE_NAME); -199 this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs(); -200 this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs(); -201 this.pauseNs = connection.connConf.getPauseNs(); -202 this.maxAttempts = connection.connConf.getMaxRetries(); -203 this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt(); -204 this.ng = connection.getNonceGenerator(); -205 } +182/** +183 * The implementation of AsyncAdmin. +184 */ +185@InterfaceAudience.Private +186@InterfaceStability.Evolving +187public class AsyncHBaseAdmin implements AsyncAdmin { +188 +189 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); +190 +191 private final AsyncConnectionImpl connection; +192 +193 private final RawAsyncTable metaTable; +194 +195 private final long rpcTimeoutNs; +196 +197 private final long operationTimeoutNs; +198 +199 private final long pauseNs; +200 +201 private final int maxAttempts; +202 +203 private final int startLogErrorsCnt; +204 +205 private final NonceGenerator ng; 206 -207 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { -208 return this.connection.callerFactory.<T> masterRequest() -209 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -210 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -211 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -212 .startLogErrorsCnt(startLogErrorsCnt); -213 } -214 -215 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { -216 return this.connection.callerFactory.<T> adminRequest() -217 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -218 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -219 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -220 .startLogErrorsCnt(startLogErrorsCnt); -221 } -222 -223 @FunctionalInterface -224 private interface MasterRpcCall<RESP, REQ> { -225 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, -226 RpcCallback<RESP> done); -227 } -228 -229 @FunctionalInterface -230 private interface AdminRpcCall<RESP, REQ> { -231 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, -232 RpcCallback<RESP> done); -233 } -234 -235 @FunctionalInterface -236 private interface Converter<D, S> { -237 D convert(S src) throws IOException; +207 AsyncHBaseAdmin(AsyncConnectionImpl connection) { +208 this.connection = connection; +209 this.metaTable = connection.getRawTable(META_TABLE_NAME); +210 this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs(); +211 this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs(); +212 this.pauseNs = connection.connConf.getPauseNs(); +213 this.maxAttempts = connection.connConf.getMaxRetries(); +214 this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt(); +215 this.ng = connection.getNonceGenerator(); +216 } +217 +218 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { +219 return this.connection.callerFactory.<T> masterRequest() +220 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +221 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +222 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +223 .startLogErrorsCnt(startLogErrorsCnt); +224 } +225 +226 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { +227 return this.connection.callerFactory.<T> adminRequest() +228 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +229 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +230 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +231 .startLogErrorsCnt(startLogErrorsCnt); +232 } +233 +234 @FunctionalInterface +235 private interface MasterRpcCall<RESP, REQ> { +236 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, +237 RpcCallback<RESP> done); 238 } 239 -240 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, -241 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, -242 Converter<RESP, PRESP> respConverter) { -243 CompletableFuture<RESP> future = new CompletableFuture<>(); -244 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +240 @FunctionalInterface +241 private interface AdminRpcCall<RESP, REQ> { +242 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, +243 RpcCallback<RESP> done); +244 } 245 -246 @Override -247 public void run(PRESP resp) { -248 if (controller.failed()) { -249 future.completeExceptionally(controller.getFailed()); -250 } else { -251 try { -252 future.complete(respConverter.convert(resp)); -253 } catch (IOException e) { -254 future.completeExceptionally(e); -255 } -256 } -257 } -258 }); -259 return future; -260 } -261 -262 //TODO abstract call and adminCall into a single method. -263 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, -264 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, -265 Converter<RESP, PRESP> respConverter) { -266 -267 CompletableFuture<RESP> future = new CompletableFuture<>(); -268 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -269 -270 @Override -271 public void run(PRESP resp) { -272 if (controller.failed()) { -273 future.completeExceptionally(new IOException(controller.errorText())); -274 } else { -275 try { -276 future.complete(respConverter.convert(resp)); -277 } catch (IOException e) { -278 future.completeExceptionally(e); -279 } -280 } -281 } -282 }); -283 return future; -284 } -285 -286 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, -287 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, -288 ProcedureBiConsumer consumer) { -289 CompletableFuture<Long> procFuture = this -290 .<Long> newMasterCaller() -291 .action( -292 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, -293 respConverter)).call(); -294 return waitProcedureResult(procFuture).whenComplete(consumer); +246 @FunctionalInterface +247 private interface Converter<D, S> { +248 D convert(S src) throws IOException; +249 } +250 +251 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, +252 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, +253 Converter<RESP, PRESP> respConverter) { +254 CompletableFuture<RESP> future = new CompletableFuture<>(); +255 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +256 +257 @Override +258 public void run(PRESP resp) { +259 if (controller.failed()) { +260 future.completeExceptionally(controller.getFailed()); +261 } else { +262 try { +263 future.complete(respConverter.convert(resp)); +264 } catch (IOException e) { +265 future.completeExceptionally(e); +266 } +267 } +268 } +269 }); +270 return future; +271 } +272 +273 //TODO abstract call and adminCall into a single method. +274 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, +275 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, +276 Converter<RESP, PRESP> respConverter) { +277 +278 CompletableFuture<RESP> future = new CompletableFuture<>(); +279 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +280 +281 @Override +282 public void run(PRESP resp) { +283 if (controller.failed()) { +284 future.completeExceptionally(new IOException(controller.errorText())); +285 } else { +286 try { +287 future.complete(respConverter.convert(resp)); +288 } catch (IOException e) { +289 future.completeExceptionally(e); +290 } +291 } +292 } +293 }); +294 return future; 295 } 296 -297 @FunctionalInterface -298 private interface TableOperator { -299 CompletableFuture<Void> operate(TableName table); -300 } -301 -302 private CompletableFuture<TableDescriptor[]> batchTableOperations(Pattern pattern, -303 TableOperator operator, String operationType) { -304 CompletableFuture<TableDescriptor[]> future = new CompletableFuture<>(); -305 List<TableDescriptor> failed = new LinkedList<>(); -306 listTables(pattern, false).whenComplete( -307 (tables, error) -> { -308 if (error != null) { -309 future.completeExceptionally(error); -310 return; -311 } -312 CompletableFuture[] futures = Arrays.stream(tables) -313 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { -314 if (ex != null) { -315 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); -316 failed.add(table); -317 } -318 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); -319 CompletableFuture.allOf(futures).thenAccept((v) -> { -320 future.complete(failed.toArray(new TableDescriptor[failed.size()])); -321 }); -322 }); -323 return future; -324 } -325 -326 @Override -327 public AsyncConnectionImpl getConnection() { -328 return this.connection; -329 } -330 -331 @Override -332 public CompletableFuture<Boolean> tableExists(TableName tableName) { -333 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); -334 } -335 -336 @Override -337 public CompletableFuture<TableDescriptor[]> listTables() { -338 return listTables((Pattern) null, false); -339 } -340 -341 @Override -342 public CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables) { -343 return listTables(Pattern.compile(regex), false); -344 } -345 -346 @Override -347 public CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) { -348 return this -349 .<TableDescriptor[]>newMasterCaller() -350 .action( -351 (controller, stub) -> this -352 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, TableDescriptor[]> call( -353 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(pattern, -354 includeSysTables), (s, c, req, done) -> s.getTableDescriptors(c, req, done), ( -355 resp) -> ProtobufUtil.getTableDescriptorArray(resp))).call(); -356 } -357 -358 @Override -359 public CompletableFuture<TableName[]> listTableNames() { -360 return listTableNames((Pattern) null, false); -361 } -362 -363 @Override -364 public CompletableFuture<TableName[]> listTableNames(String regex, boolean includeSysTables) { -365 return listTableNames(Pattern.compile(regex), false); -366 } -367 -368 @Override -369 public CompletableFuture<TableName[]> listTableNames(Pattern pattern, boolean includeSysTables) { -370 return this -371 .<TableName[]>newMasterCaller() -372 .action( -373 (controller, stub) -> this -374 .<GetTableNamesRequest, GetTableNamesResponse, TableName[]> call(controller, stub, -375 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), (s, c, req, -376 done) -> s.getTableNames(c, req, done), (resp) -> ProtobufUtil -377 .getTableNameArray(resp.getTableNamesList()))).call(); -378 } -379 -380 @Override -381 public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) { -382 CompletableFuture<TableDescriptor> future = new CompletableFuture<>(); -383 this.<List<TableSchema>> newMasterCaller() -384 .action( -385 (controller, stub) -> this -386 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>> call( -387 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, -388 c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp -389 .getTableSchemaList())).call().whenComplete((tableSchemas, error) -> { -390 if (error != null) { -391 future.completeExceptionally(error); -392 return; -393 } -394 if (!tableSchemas.isEmpty()) { -395 future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0))); -396 } else { -397 future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); -398 } -399 }); -400 return future; -401 } -402 -403 @Override -404 public CompletableFuture<Void> createTable(TableDescriptor desc) { -405 return createTable(desc, null); -406 } -407 -408 @Override -409 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, -410 int numRegions) { -411 try { -412 return createTable(desc, getSplitKeys(startKey, endKey, numRegions)); -413 } catch (IllegalArgumentException e) { -414 return failedFuture(e); -415 } -416 } -417 -418 @Override -419 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) { -420 if (desc.getTableName() == null) { -421 return failedFuture(new IllegalArgumentException("TableName cannot be null")); -422 } -423 if (splitKeys != null && splitKeys.length > 0) { -424 Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); -425 // Verify there are no duplicate split keys -426 byte[] lastKey = null; -427 for (byte[] splitKey : splitKeys) { -428 if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { -429 return failedFuture(new IllegalArgumentException( -430 "Empty split key must not be passed in the split keys.")); -431 } -432 if (lastKey != null && Bytes.equals(splitKey, lastKey)) { -433 return failedFuture(new IllegalArgumentException("All split keys must be unique, " -434 + "found duplicate: " + Bytes.toStringBinary(splitKey) + ", " -435 + Bytes.toStringBinary(lastKey))); -436 } -437 lastKey = splitKey; -438 } -439 } -440 -441 return this.<CreateTableRequest, CreateTableResponse> procedureCall( -442 RequestConverter.buildCreateTableRequest(desc, splitKeys, ng.getNonceGroup(), ng.newNonce()), -443 (s, c, req, done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(), -444 new CreateTableProcedureBiConsumer(this, desc.getTableName())); -445 } -446 -447 @Override -448 public CompletableFuture<Void> deleteTable(TableName tableName) { -449 return this.<DeleteTableRequest, DeleteTableResponse> procedureCall(RequestConverter -450 .buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -451 (s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(), -452 new DeleteTableProcedureBiConsumer(this, tableName)); -453 } -454 -455 @Override -456 public CompletableFuture<TableDescriptor[]> deleteTables(String regex) { -457 return deleteTables(Pattern.compile(regex)); -458 } -459 -460 @Override -461 public CompletableFuture<TableDescriptor[]> deleteTables(Pattern pattern) { -462 return batchTableOperations(pattern, (table) -> deleteTable(table), "DELETE"); -463 } -464 -465 @Override -466 public CompletableFuture<Void> truncateTable(TableName tableName, boolean preserveSplits) { -467 return this.<TruncateTableRequest, TruncateTableResponse> procedureCall( -468 RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), -469 ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), -470 (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(this, tableName)); -471 } -472 -473 @Override -474 public CompletableFuture<Void> enableTable(TableName tableName) { -475 return this.<EnableTableRequest, EnableTableResponse> procedureCall(RequestConverter -476 .buildEnableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -477 (s, c, req, done) -> s.enableTable(c, req, done), (resp) -> resp.getProcId(), -478 new EnableTableProcedureBiConsumer(this, tableName)); -479 } -480 -481 @Override -482 public CompletableFuture<TableDescriptor[]> enableTables(String regex) { -483 return enableTables(Pattern.compile(regex)); -484 } -485 -486 @Override -487 public CompletableFuture<TableDescriptor[]> enableTables(Pattern pattern) { -488 return batchTableOperations(pattern, (table) -> enableTable(table), "ENABLE"); -489 } -490 -491 @Override -492 public CompletableFuture<Void> disableTable(TableName tableName) { -493 return this.<DisableTableRequest, DisableTableResponse> procedureCall(RequestConverter -494 .buildDisableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -495 (s, c, req, done) -> s.disableTable(c, req, done), (resp) -> resp.getProcId(), -496 new DisableTableProcedureBiConsumer(this, tableName)); -497 } -498 -499 @Override -500 public CompletableFuture<TableDescriptor[]> disableTables(String regex) { -501 return disableTables(Pattern.compile(regex)); -502 } -503 -504 @Override -505 public CompletableFuture<TableDescriptor[]> disableTables(Pattern pattern) { -506 return batchTableOperations(pattern, (table) -> disableTable(table), "DISABLE"); -507 } -508 +297 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, +298 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, +299 ProcedureBiConsumer consumer) { +300 CompletableFuture<Long> procFuture = this +301 .<Long> newMasterCaller() +302 .action( +303 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, +304 respConverter)).call(); +305 return waitProcedureResult(procFuture).whenComplete(consumer); +306 } +307 +308 @FunctionalInterface +309 private interface TableOperator { +310 CompletableFuture<Void> operate(TableName table); +311 } +312 +313 private CompletableFuture<TableDescriptor[]> batchTableOperations(Pattern pattern, +314 TableOperator operator, String operationType) { +315 CompletableFuture<TableDescriptor[]> future = new CompletableFuture<>(); +316 List<TableDescriptor> failed = new LinkedList<>(); +317 listTables(pattern, false).whenComplete( +318 (tables, error) -> { +319 if (error != null) { +320 future.completeExceptionally(error); +321 return; +322 } +323 CompletableFuture[] futures = Arrays.stream(tables) +324 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { +325 if (ex != null) { +326 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); +327 failed.add(table); +328 } +329 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); +330 CompletableFuture.allOf(futures).thenAccept((v) -> { +331 future.complete(failed.toArray(new TableDescriptor[failed.size()])); +332 }); +333 }); +334 return future; +335 } +336 +337 @Override +338 public AsyncConnectionImpl getConnection() { +339 return this.connection; +340 } +341 +342 @Override +343 public CompletableFuture<Boolean> tableExists(TableName tableName) { +344 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); +345 } +346 +347 @Override +348 public CompletableFuture<TableDescriptor[]> listTables() { +349 return listTables((Pattern) null, false); +350 } +351 +352 @Override +353 public CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables) { +354 return listTables(Pattern.compile(regex), false); +355 } +356 +357 @Override +358 public CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) { +359 return this +360 .<TableDescriptor[]>newMasterCaller() +361 .action( +362 (controller, stub) -> this +363 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, TableDescriptor[]> call( +364 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(pattern, +365 includeSysTables), (s, c, req, done) -> s.getTableDescriptors(c, req, done), ( +366 resp) -> ProtobufUtil.getTableDescriptorArray(resp))).call(); +367 } +368 +369 @Override +370 public