Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 798BC200CB3 for ; Mon, 26 Jun 2017 17:01:20 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 78332160C01; Mon, 26 Jun 2017 15:01:20 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 513DB160BDE for ; Mon, 26 Jun 2017 17:01:18 +0200 (CEST) Received: (qmail 75804 invoked by uid 500); 26 Jun 2017 15:01:17 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 75788 invoked by uid 99); 26 Jun 2017 15:01:17 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 26 Jun 2017 15:01:17 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 454B2E080D; Mon, 26 Jun 2017 15:01:17 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Mon, 26 Jun 2017 15:01:18 -0000 Message-Id: <7973c71714f24612a0a67e7037bbe847@git.apache.org> In-Reply-To: <7f7d7944d7124677bf1e8939f6b5d72a@git.apache.org> References: <7f7d7944d7124677bf1e8939f6b5d72a@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd. archived-at: Mon, 26 Jun 2017 15:01:20 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html index 01496d6..dc12c09 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html @@ -48,2406 +48,2267 @@ 040 041import io.netty.util.Timeout; 042import io.netty.util.TimerTask; -043import java.util.stream.Stream; -044import org.apache.commons.io.IOUtils; -045import org.apache.commons.logging.Log; -046import org.apache.commons.logging.LogFactory; -047import org.apache.hadoop.hbase.HRegionInfo; -048import org.apache.hadoop.hbase.HRegionLocation; -049import org.apache.hadoop.hbase.MetaTableAccessor; -050import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -051import org.apache.hadoop.hbase.NotServingRegionException; -052import org.apache.hadoop.hbase.ProcedureInfo; -053import org.apache.hadoop.hbase.RegionLocations; -054import org.apache.hadoop.hbase.ServerName; -055import org.apache.hadoop.hbase.NamespaceDescriptor; -056import org.apache.hadoop.hbase.HConstants; -057import org.apache.hadoop.hbase.TableExistsException; -058import org.apache.hadoop.hbase.TableName; -059import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -060import org.apache.hadoop.hbase.TableNotDisabledException; -061import org.apache.hadoop.hbase.TableNotEnabledException; -062import org.apache.hadoop.hbase.TableNotFoundException; -063import org.apache.hadoop.hbase.UnknownRegionException; -064import org.apache.hadoop.hbase.classification.InterfaceAudience; -065import org.apache.hadoop.hbase.classification.InterfaceStability; -066import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -068import org.apache.hadoop.hbase.client.Scan.ReadType; -069import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -070import org.apache.hadoop.hbase.client.replication.TableCFs; -071import org.apache.hadoop.hbase.exceptions.DeserializationException; -072import org.apache.hadoop.hbase.ipc.HBaseRpcController; -073import org.apache.hadoop.hbase.quotas.QuotaFilter; -074import org.apache.hadoop.hbase.quotas.QuotaSettings; -075import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -076import org.apache.hadoop.hbase.replication.ReplicationException; -077import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -078import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -079import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -081import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; -170import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; -171import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; -172import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; -173import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -174import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; -175import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; -176import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; -177import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; -178import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; -179import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -181import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -182import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -183import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -184import org.apache.hadoop.hbase.util.Bytes; -185import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -186import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -187import org.apache.hadoop.hbase.util.Pair; -188 -189/** -190 * The implementation of AsyncAdmin. -191 */ -192@InterfaceAudience.Private -193@InterfaceStability.Evolving -194public class AsyncHBaseAdmin implements AsyncAdmin { -195 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; -196 -197 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); +043 +044import java.util.stream.Stream; +045 +046import org.apache.commons.io.IOUtils; +047import org.apache.commons.logging.Log; +048import org.apache.commons.logging.LogFactory; +049import org.apache.directory.api.util.OptionalComponentsMonitor; +050import org.apache.hadoop.hbase.HRegionInfo; +051import org.apache.hadoop.hbase.HRegionLocation; +052import org.apache.hadoop.hbase.MetaTableAccessor; +053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; +054import org.apache.hadoop.hbase.NotServingRegionException; +055import org.apache.hadoop.hbase.ProcedureInfo; +056import org.apache.hadoop.hbase.RegionLocations; +057import org.apache.hadoop.hbase.ServerName; +058import org.apache.hadoop.hbase.NamespaceDescriptor; +059import org.apache.hadoop.hbase.HConstants; +060import org.apache.hadoop.hbase.TableExistsException; +061import org.apache.hadoop.hbase.TableName; +062import org.apache.hadoop.hbase.AsyncMetaTableAccessor; +063import org.apache.hadoop.hbase.TableNotDisabledException; +064import org.apache.hadoop.hbase.TableNotEnabledException; +065import org.apache.hadoop.hbase.TableNotFoundException; +066import org.apache.hadoop.hbase.UnknownRegionException; +067import org.apache.hadoop.hbase.classification.InterfaceAudience; +068import org.apache.hadoop.hbase.classification.InterfaceStability; +069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; +070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; +071import org.apache.hadoop.hbase.client.Scan.ReadType; +072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; +073import org.apache.hadoop.hbase.client.replication.TableCFs; +074import org.apache.hadoop.hbase.exceptions.DeserializationException; +075import org.apache.hadoop.hbase.ipc.HBaseRpcController; +076import org.apache.hadoop.hbase.quotas.QuotaFilter; +077import org.apache.hadoop.hbase.quotas.QuotaSettings; +078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; +079import org.apache.hadoop.hbase.replication.ReplicationException; +080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; +083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; +089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; +091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; +095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; +096import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; +097import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; +098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; +099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; +100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; +101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; +102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; +103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; +104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; +105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; +106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; +107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; +108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; +109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; +110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; +111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; +112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; +113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; +114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; +115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; +116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; +117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; +118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; +119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; +120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; +121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; +125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; +127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; +128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; +129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; +130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; +131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; +132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; +133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; +134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; +135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; +136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; +138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; +139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; +140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; +141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; +143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; +144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; +145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; +146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; +147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; +148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; +149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; +150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; +151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; +152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; +153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; +154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; +155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; +156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; +157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; +158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; +162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; +163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; +164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; +165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; +169import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; +170import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; +171import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; +172import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; +173import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; +174import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; +175import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; +176import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +177import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +178import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; +179import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +184import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +185import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +186import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +187import org.apache.hadoop.hbase.util.Bytes; +188import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +189import org.apache.hadoop.hbase.util.ForeignExceptionUtil; +190import org.apache.hadoop.hbase.util.Pair; +191 +192/** +193 * The implementation of AsyncAdmin. +194 */ +195@InterfaceAudience.Private +196public class AsyncHBaseAdmin implements AsyncAdmin { +197 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; 198 -199 private final AsyncConnectionImpl connection; +199 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); 200 -201 private final RawAsyncTable metaTable; +201 private final AsyncConnectionImpl connection; 202 -203 private final long rpcTimeoutNs; +203 private final RawAsyncTable metaTable; 204 -205 private final long operationTimeoutNs; +205 private final long rpcTimeoutNs; 206 -207 private final long pauseNs; +207 private final long operationTimeoutNs; 208 -209 private final int maxAttempts; +209 private final long pauseNs; 210 -211 private final int startLogErrorsCnt; +211 private final int maxAttempts; 212 -213 private final NonceGenerator ng; +213 private final int startLogErrorsCnt; 214 -215 AsyncHBaseAdmin(AsyncConnectionImpl connection) { -216 this.connection = connection; -217 this.metaTable = connection.getRawTable(META_TABLE_NAME); -218 this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs(); -219 this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs(); -220 this.pauseNs = connection.connConf.getPauseNs(); -221 this.maxAttempts = connection.connConf.getMaxRetries(); -222 this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt(); -223 this.ng = connection.getNonceGenerator(); -224 } -225 -226 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { -227 return this.connection.callerFactory.<T> masterRequest() -228 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -229 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -230 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -231 .startLogErrorsCnt(startLogErrorsCnt); -232 } -233 -234 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { -235 return this.connection.callerFactory.<T> adminRequest() -236 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -237 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -238 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -239 .startLogErrorsCnt(startLogErrorsCnt); -240 } -241 -242 @FunctionalInterface -243 private interface MasterRpcCall<RESP, REQ> { -244 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, -245 RpcCallback<RESP> done); -246 } -247 -248 @FunctionalInterface -249 private interface AdminRpcCall<RESP, REQ> { -250 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, -251 RpcCallback<RESP> done); -252 } -253 -254 @FunctionalInterface -255 private interface Converter<D, S> { -256 D convert(S src) throws IOException; -257 } -258 -259 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, -260 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, -261 Converter<RESP, PRESP> respConverter) { -262 CompletableFuture<RESP> future = new CompletableFuture<>(); -263 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -264 -265 @Override -266 public void run(PRESP resp) { -267 if (controller.failed()) { -268 future.completeExceptionally(controller.getFailed()); -269 } else { -270 try { -271 future.complete(respConverter.convert(resp)); -272 } catch (IOException e) { -273 future.completeExceptionally(e); -274 } -275 } -276 } -277 }); -278 return future; -279 } -280 -281 //TODO abstract call and adminCall into a single method. -282 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, -283 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, -284 Converter<RESP, PRESP> respConverter) { -285 -286 CompletableFuture<RESP> future = new CompletableFuture<>(); -287 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -288 -289 @Override -290 public void run(PRESP resp) { -291 if (controller.failed()) { -292 future.completeExceptionally(new IOException(controller.errorText())); -293 } else { -294 try { -295 future.complete(respConverter.convert(resp)); -296 } catch (IOException e) { -297 future.completeExceptionally(e); -298 } -299 } -300 } -301 }); -302 return future; -303 } -304 -305 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, -306 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, -307 ProcedureBiConsumer consumer) { -308 CompletableFuture<Long> procFuture = this -309 .<Long> newMasterCaller() -310 .action( -311 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, -312 respConverter)).call(); -313 return waitProcedureResult(procFuture).whenComplete(consumer); -314 } -315 -316 @FunctionalInterface -317 private interface TableOperator { -318 CompletableFuture<Void> operate(TableName table); -319 } -320 -321 private CompletableFuture<TableDescriptor[]> batchTableOperations(Pattern pattern, -322 TableOperator operator, String operationType) { -323 CompletableFuture<TableDescriptor[]> future = new CompletableFuture<>(); -324 List<TableDescriptor> failed = new LinkedList<>(); -325 listTables(pattern, false).whenComplete( -326 (tables, error) -> { -327 if (error != null) { -328 future.completeExceptionally(error); -329 return; -330 } -331 CompletableFuture[] futures = Arrays.stream(tables) -332 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { -333 if (ex != null) { -334 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); -335 failed.add(table); -336 } -337 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); -338 CompletableFuture.allOf(futures).thenAccept((v) -> { -339 future.complete(failed.toArray(new TableDescriptor[failed.size()])); -340 }); -341 }); -342 return future; -343 } -344 -345 @Override -346 public AsyncConnectionImpl getConnection() { -347 return this.connection; -348 } -349 -350 @Override -351 public CompletableFuture<Boolean> tableExists(TableName tableName) { -352 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); -353 } -354 -355 @Override -356 public CompletableFuture<TableDescriptor[]> listTables() { -357 return listTables((Pattern) null, false); -358 } -359 -360 @Override -361 public CompletableFuture<TableDescriptor[]> listTables(String regex, boolean includeSysTables) { -362 return listTables(Pattern.compile(regex), false); -363 } -364 -365 @Override -366 public CompletableFuture<TableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) { -367 return this -368 .<TableDescriptor[]>newMasterCaller() -369 .action( -370 (controller, stub) -> this -371 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, TableDescriptor[]> call( -372 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(pattern, -373 includeSysTables), (s, c, req, done) -> s.getTableDescriptors(c, req, done), ( -374 resp) -> ProtobufUtil.getTableDescriptorArray(resp))).call(); -375 } -376 -377 @Override -378 public CompletableFuture<TableName[]> listTableNames() { -379 return listTableNames((Pattern) null, false); +215 private final NonceGenerator ng; +216 +217 AsyncHBaseAdmin(AsyncConnectionImpl connection) { +218 this.connection = connection; +219 this.metaTable = connection.getRawTable(META_TABLE_NAME); +220 this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs(); +221 this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs(); +222 this.pauseNs = connection.connConf.getPauseNs(); +223 this.maxAttempts = connection.connConf.getMaxRetries(); +224 this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt(); +225 this.ng = connection.getNonceGenerator(); +226 } +227 +228 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { +229 return this.connection.callerFactory.<T> masterRequest() +230 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +231 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +232 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +233 .startLogErrorsCnt(startLogErrorsCnt); +234 } +235 +236 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { +237 return this.connection.callerFactory.<T> adminRequest() +238 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +239 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +240 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +241 .startLogErrorsCnt(startLogErrorsCnt); +242 } +243 +244 @FunctionalInterface +245 private interface MasterRpcCall<RESP, REQ> { +246 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, +247 RpcCallback<RESP> done); +248 } +249 +250 @FunctionalInterface +251 private interface AdminRpcCall<RESP, REQ> { +252 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, +253 RpcCallback<RESP> done); +254 } +255 +256 @FunctionalInterface +257 private interface Converter<D, S> { +258 D convert(S src) throws IOException; +259 } +260 +261 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, +262 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, +263 Converter<RESP, PRESP> respConverter) { +264 CompletableFuture<RESP> future = new CompletableFuture<>(); +265 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +266 +267 @Override +268 public void run(PRESP resp) { +269 if (controller.failed()) { +270 future.completeExceptionally(controller.getFailed()); +271 } else { +272 try { +273 future.complete(respConverter.convert(resp)); +274 } catch (IOException e) { +275 future.completeExceptionally(e); +276 } +277 } +278 } +279 }); +280 return future; +281 } +282 +283 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, +284 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, +285 Converter<RESP, PRESP> respConverter) { +286 +287 CompletableFuture<RESP> future = new CompletableFuture<>(); +288 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +289 +290 @Override +291 public void run(PRESP resp) { +292 if (controller.failed()) { +293 future.completeExceptionally(new IOException(controller.errorText())); +294 } else { +295 try { +296 future.complete(respConverter.convert(resp)); +297 } catch (IOException e) { +298 future.completeExceptionally(e); +299 } +300 } +301 } +302 }); +303 return future; +304 } +305 +306 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, +307 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, +308 ProcedureBiConsumer consumer) { +309 CompletableFuture<Long> procFuture = this +310 .<Long> newMasterCaller() +311 .action( +312 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, +313 respConverter)).call(); +314 return waitProcedureResult(procFuture).whenComplete(consumer); +315 } +316 +317 @FunctionalInterface +318 private interface TableOperator { +319 CompletableFuture<Void> operate(TableName table); +320 } +321 +322 private CompletableFuture<List<TableDescriptor>> batchTableOperations(Pattern pattern, +323 TableOperator operator, String operationType) { +324 CompletableFuture<List<TableDescriptor>> future = new CompletableFuture<>(); +325 List<TableDescriptor> failed = new LinkedList<>(); +326 listTables(Optional.ofNullable(pattern), false).whenComplete( +327 (tables, error) -> { +328 if (error != null) { +329 future.completeExceptionally(error); +330 return; +331 } +332 CompletableFuture[] futures = +333 tables.stream() +334 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { +335 if (ex != null) { +336 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); +337 failed.add(table); +338 } +339 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); +340 CompletableFuture.allOf(futures).thenAccept((v) -> { +341 future.complete(failed); +342 }); +343 }); +344 return future; +345 } +346 +347 @Override +348 public AsyncConnectionImpl getConnection() { +349 return this.connection; +350 } +351 +352 @Override +353 public CompletableFuture<Boolean> tableExists(TableName tableName) { +354 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); +355 } +356 +357 @Override +358 public CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern, +359 boolean includeSysTables) { +360 return this.<List<TableDescriptor>> newMasterCaller() +361 .action((controller, stub) -> this +362 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableDescriptor>> call( +363 controller, stub, +364 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables), +365 (s, c, req, done) -> s.getTableDescriptors(c, req, done), +366 (resp) -> ProtobufUtil.toTableDescriptorList(resp))) +367 .call(); +368 } +369 +370 @Override +371 public CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern, +372 boolean includeSysTables) { +373 return this.<List<TableName>> newMasterCaller() +374 .action((controller, stub) -> this +375 .<GetTableNamesRequest, GetTableNamesResponse, List<TableName>> call(controller, stub, +376 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), +377 (s, c, req, done) -> s.getTableNames(c, req, done), +378 (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) +379 .call(); 380 } 381 382 @Override -383 public CompletableFuture<TableName[]> listTableNames(String regex, boolean includeSysTables) { -384 return listTableNames(Pattern.compile(regex), false); -385 } -386 -387 @Override -388 public CompletableFuture<TableName[]> listTableNames(Pattern pattern, boolean includeSysTables) { -389 return this -390 .<TableName[]>newMasterCaller() -391 .action( -392 (controller, stub) -> this -393 .<GetTableNamesRequest, GetTableNamesResponse, TableName[]> call(controller, stub, -394 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), (s, c, req, -395 done) -> s.getTableNames(c, req, done), (resp) -> ProtobufUtil -396 .getTableNameArray(resp.getTableNamesList()))).call(); -397 } -398 -399 @Override -400 public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) { -401 CompletableFuture<TableDescriptor> future = new CompletableFuture<>(); -402 this.<List<TableSchema>> newMasterCaller() -403 .action( -404 (controller, stub) -> this -405 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>> call( -406 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, -407 c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp -408 .getTableSchemaList())).call().whenComplete((tableSchemas, error) -> { -409 if (error != null) { -410 future.completeExceptionally(error); -411 return; -412 } -413 if (!tableSchemas.isEmpty()) { -414 future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0))); -415 } else { -416 future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); -417 } -418 }); -419 return future; -420 } -421 -422 @Override -423 public CompletableFuture<Void> createTable(TableDescriptor desc) { -424 return createTable(desc, null); -425 } -426 -427 @Override -428 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, -429 int numRegions) { -430 try { -431 return createTable(desc, getSplitKeys(startKey, endKey, numRegions)); -432 } catch (IllegalArgumentException e) { -433 return failedFuture(e); -434 } -435 } -436 -437 @Override -438 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) { -439 if (desc.getTableName() == null) { -440 return failedFuture(new IllegalArgumentException("TableName cannot be null")); +383 public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) { +384 CompletableFuture<TableDescriptor> future = new CompletableFuture<>(); +385 this.<List<TableSchema>> newMasterCaller() +386 .action( +387 (controller, stub) -> this +388 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>> call( +389 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, +390 c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp +391 .getTableSchemaList())).call().whenComplete((tableSchemas, error) -> { +392 if (error != null) { +393 future.completeExceptionally(error); +394 return; +395 } +396 if (!tableSchemas.isEmpty()) { +397 future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0))); +398 } else { +399 future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); +400 } +401 }); +402 return future; +403 } +404 +405 @Override +406 public CompletableFuture<Void> createTable(TableDescriptor desc) { +407 return createTable(desc, null); +408 } +409 +410 @Override +411 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, +412 int numRegions) { +413 try { +