Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 6E6BF200CB4 for ; Tue, 27 Jun 2017 17:01:28 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 6D4C0160BFE; Tue, 27 Jun 2017 15:01:28 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 18DEB160BC6 for ; Tue, 27 Jun 2017 17:01:25 +0200 (CEST) Received: (qmail 27241 invoked by uid 500); 27 Jun 2017 15:01:21 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 24948 invoked by uid 99); 27 Jun 2017 15:01:20 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 27 Jun 2017 15:01:20 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 00F8BF2139; Tue, 27 Jun 2017 15:01:16 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Tue, 27 Jun 2017 15:01:37 -0000 Message-Id: In-Reply-To: <226601fc29d5464ba43fac0105ae50b9@git.apache.org> References: <226601fc29d5464ba43fac0105ae50b9@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd. archived-at: Tue, 27 Jun 2017 15:01:28 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html index dc12c09..82506d2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html @@ -54,2261 +54,2259 @@ 046import org.apache.commons.io.IOUtils; 047import org.apache.commons.logging.Log; 048import org.apache.commons.logging.LogFactory; -049import org.apache.directory.api.util.OptionalComponentsMonitor; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLocations; -057import org.apache.hadoop.hbase.ServerName; -058import org.apache.hadoop.hbase.NamespaceDescriptor; -059import org.apache.hadoop.hbase.HConstants; -060import org.apache.hadoop.hbase.TableExistsException; -061import org.apache.hadoop.hbase.TableName; -062import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -063import org.apache.hadoop.hbase.TableNotDisabledException; -064import org.apache.hadoop.hbase.TableNotEnabledException; -065import org.apache.hadoop.hbase.TableNotFoundException; -066import org.apache.hadoop.hbase.UnknownRegionException; -067import org.apache.hadoop.hbase.classification.InterfaceAudience; -068import org.apache.hadoop.hbase.classification.InterfaceStability; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; -162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; -170import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; -171import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; -172import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; -173import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; -174import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; -175import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; -176import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -177import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; -178import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; -179import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -184import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -185import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -186import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -187import org.apache.hadoop.hbase.util.Bytes; -188import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -189import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -190import org.apache.hadoop.hbase.util.Pair; -191 -192/** -193 * The implementation of AsyncAdmin. -194 */ -195@InterfaceAudience.Private -196public class AsyncHBaseAdmin implements AsyncAdmin { -197 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; +049import org.apache.hadoop.hbase.HRegionInfo; +050import org.apache.hadoop.hbase.HRegionLocation; +051import org.apache.hadoop.hbase.MetaTableAccessor; +052import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; +053import org.apache.hadoop.hbase.NotServingRegionException; +054import org.apache.hadoop.hbase.ProcedureInfo; +055import org.apache.hadoop.hbase.RegionLocations; +056import org.apache.hadoop.hbase.ServerName; +057import org.apache.hadoop.hbase.NamespaceDescriptor; +058import org.apache.hadoop.hbase.HConstants; +059import org.apache.hadoop.hbase.TableExistsException; +060import org.apache.hadoop.hbase.TableName; +061import org.apache.hadoop.hbase.AsyncMetaTableAccessor; +062import org.apache.hadoop.hbase.TableNotDisabledException; +063import org.apache.hadoop.hbase.TableNotEnabledException; +064import org.apache.hadoop.hbase.TableNotFoundException; +065import org.apache.hadoop.hbase.UnknownRegionException; +066import org.apache.hadoop.hbase.classification.InterfaceAudience; +067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; +068import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; +069import org.apache.hadoop.hbase.client.Scan.ReadType; +070import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; +071import org.apache.hadoop.hbase.client.replication.TableCFs; +072import org.apache.hadoop.hbase.exceptions.DeserializationException; +073import org.apache.hadoop.hbase.ipc.HBaseRpcController; +074import org.apache.hadoop.hbase.quotas.QuotaFilter; +075import org.apache.hadoop.hbase.quotas.QuotaSettings; +076import org.apache.hadoop.hbase.quotas.QuotaTableUtil; +077import org.apache.hadoop.hbase.replication.ReplicationException; +078import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +079import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +080import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; +081import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +082import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; +087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; +089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; +093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; +094import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; +095import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; +096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; +097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; +098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; +099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; +100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; +101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; +102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; +103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; +104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; +105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; +106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; +107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; +108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; +109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; +110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; +111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; +112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; +113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; +114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; +115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; +116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; +117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; +118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; +119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; +123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; +125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; +126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; +127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; +128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; +129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; +130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; +131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; +132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; +133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; +134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; +136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; +137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; +138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; +139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; +141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; +142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; +143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; +144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; +145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; +146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; +147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; +148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; +149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; +150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; +151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; +152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; +153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; +154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; +155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; +156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; +160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; +161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; +162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; +163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; +167import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; +168import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; +169import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; +170import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; +171import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; +172import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; +173import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; +174import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +175import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +176import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; +177import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; +178import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; +179import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +182import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +183import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +184import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +185import org.apache.hadoop.hbase.util.Bytes; +186import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +187import org.apache.hadoop.hbase.util.ForeignExceptionUtil; +188import org.apache.hadoop.hbase.util.Pair; +189 +190/** +191 * The implementation of AsyncAdmin. +192 */ +193@InterfaceAudience.Private +194public class AsyncHBaseAdmin implements AsyncAdmin { +195 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; +196 +197 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); 198 -199 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); +199 private final AsyncConnectionImpl connection; 200 -201 private final AsyncConnectionImpl connection; +201 private final RawAsyncTable metaTable; 202 -203 private final RawAsyncTable metaTable; +203 private final long rpcTimeoutNs; 204 -205 private final long rpcTimeoutNs; +205 private final long operationTimeoutNs; 206 -207 private final long operationTimeoutNs; +207 private final long pauseNs; 208 -209 private final long pauseNs; +209 private final int maxAttempts; 210 -211 private final int maxAttempts; +211 private final int startLogErrorsCnt; 212 -213 private final int startLogErrorsCnt; +213 private final NonceGenerator ng; 214 -215 private final NonceGenerator ng; -216 -217 AsyncHBaseAdmin(AsyncConnectionImpl connection) { -218 this.connection = connection; -219 this.metaTable = connection.getRawTable(META_TABLE_NAME); -220 this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs(); -221 this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs(); -222 this.pauseNs = connection.connConf.getPauseNs(); -223 this.maxAttempts = connection.connConf.getMaxRetries(); -224 this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt(); -225 this.ng = connection.getNonceGenerator(); -226 } -227 -228 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { -229 return this.connection.callerFactory.<T> masterRequest() -230 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -231 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -232 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -233 .startLogErrorsCnt(startLogErrorsCnt); -234 } -235 -236 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { -237 return this.connection.callerFactory.<T> adminRequest() -238 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -239 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -240 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -241 .startLogErrorsCnt(startLogErrorsCnt); -242 } -243 -244 @FunctionalInterface -245 private interface MasterRpcCall<RESP, REQ> { -246 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, -247 RpcCallback<RESP> done); -248 } -249 -250 @FunctionalInterface -251 private interface AdminRpcCall<RESP, REQ> { -252 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, -253 RpcCallback<RESP> done); -254 } -255 -256 @FunctionalInterface -257 private interface Converter<D, S> { -258 D convert(S src) throws IOException; -259 } -260 -261 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, -262 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, -263 Converter<RESP, PRESP> respConverter) { -264 CompletableFuture<RESP> future = new CompletableFuture<>(); -265 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -266 -267 @Override -268 public void run(PRESP resp) { -269 if (controller.failed()) { -270 future.completeExceptionally(controller.getFailed()); -271 } else { -272 try { -273 future.complete(respConverter.convert(resp)); -274 } catch (IOException e) { -275 future.completeExceptionally(e); -276 } -277 } -278 } -279 }); -280 return future; -281 } -282 -283 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, -284 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, -285 Converter<RESP, PRESP> respConverter) { -286 -287 CompletableFuture<RESP> future = new CompletableFuture<>(); -288 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -289 -290 @Override -291 public void run(PRESP resp) { -292 if (controller.failed()) { -293 future.completeExceptionally(new IOException(controller.errorText())); -294 } else { -295 try { -296 future.complete(respConverter.convert(resp)); -297 } catch (IOException e) { -298 future.completeExceptionally(e); -299 } -300 } -301 } -302 }); -303 return future; -304 } -305 -306 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, -307 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, -308 ProcedureBiConsumer consumer) { -309 CompletableFuture<Long> procFuture = this -310 .<Long> newMasterCaller() -311 .action( -312 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, -313 respConverter)).call(); -314 return waitProcedureResult(procFuture).whenComplete(consumer); -315 } -316 -317 @FunctionalInterface -318 private interface TableOperator { -319 CompletableFuture<Void> operate(TableName table); -320 } -321 -322 private CompletableFuture<List<TableDescriptor>> batchTableOperations(Pattern pattern, -323 TableOperator operator, String operationType) { -324 CompletableFuture<List<TableDescriptor>> future = new CompletableFuture<>(); -325 List<TableDescriptor> failed = new LinkedList<>(); -326 listTables(Optional.ofNullable(pattern), false).whenComplete( -327 (tables, error) -> { -328 if (error != null) { -329 future.completeExceptionally(error); -330 return; -331 } -332 CompletableFuture[] futures = -333 tables.stream() -334 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { -335 if (ex != null) { -336 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); -337 failed.add(table); -338 } -339 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); -340 CompletableFuture.allOf(futures).thenAccept((v) -> { -341 future.complete(failed); -342 }); -343 }); -344 return future; -345 } -346 -347 @Override -348 public AsyncConnectionImpl getConnection() { -349 return this.connection; -350 } -351 -352 @Override -353 public CompletableFuture<Boolean> tableExists(TableName tableName) { -354 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); -355 } -356 -357 @Override -358 public CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern, -359 boolean includeSysTables) { -360 return this.<List<TableDescriptor>> newMasterCaller() -361 .action((controller, stub) -> this -362 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableDescriptor>> call( -363 controller, stub, -364 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables), -365 (s, c, req, done) -> s.getTableDescriptors(c, req, done), -366 (resp) -> ProtobufUtil.toTableDescriptorList(resp))) -367 .call(); -368 } -369 -370 @Override -371 public CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern, -372 boolean includeSysTables) { -373 return this.<List<TableName>> newMasterCaller() -374 .action((controller, stub) -> this -375 .<GetTableNamesRequest, GetTableNamesResponse, List<TableName>> call(controller, stub, -376 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), -377 (s, c, req, done) -> s.getTableNames(c, req, done), -378 (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) -379 .call(); -380 } -381 -382 @Override -383 public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) { -384 CompletableFuture<TableDescriptor> future = new CompletableFuture<>(); -385 this.<List<TableSchema>> newMasterCaller() -386 .action( -387 (controller, stub) -> this -388 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>> call( -389 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, -390 c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp -391 .getTableSchemaList())).call().whenComplete((tableSchemas, error) -> { -392 if (error != null) { -393 future.completeExceptionally(error); -394 return; -395 } -396 if (!tableSchemas.isEmpty()) { -397 future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0))); -398 } else { -399 future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); -400 } -401 }); -402 return future; -403 } -404 -405 @Override -406 public CompletableFuture<Void> createTable(TableDescriptor desc) { -407 return createTable(desc, null); -408 } -409 -410 @Override -411 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, -412 int numRegions) { -413 try { -414 return createTable(desc, getSplitKeys(startKey, endKey, numRegions)); -415 } catch (IllegalArgumentException e) { -416 return failedFuture(e); -417 } -418 } -419 -420 @Override -421 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) { -422 if (desc.getTableName() == null) { -423 return failedFuture(new IllegalArgumentException("TableName cannot be null")); -424 } -425 if (splitKeys != null && splitKeys.length > 0) { -426 Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); -427 // Verify there are no duplicate split keys -428 byte[] lastKey = null; -429 for (byte[] splitKey : splitKeys) { -430 if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { -431 return failedFuture(new IllegalArgumentException( -432 "Empty split key must not be passed in the split keys.")); -433 } -434 if (lastKey != null && Bytes.equals(splitKey, lastKey)) { -435 return failedFuture(new IllegalArgumentException("All split keys must be unique, " -436 + "found duplicate: " + Bytes.toStringBinary(splitKey) + ", " -437 + Bytes.toStringBinary(lastKey))); -438 } -439 lastKey = splitKey; -440 } -441 } -442 -443 return this.<CreateTableRequest, CreateTableResponse> procedureCall( -444 RequestConverter.buildCreateTableRequest(desc, splitKeys, ng.getNonceGroup(), ng.newNonce()), -445 (s, c, req, done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(), -446 new CreateTableProcedureBiConsumer(this, desc.getTableName())); -447 } -448 -449 @Override -450 public CompletableFuture<Void> deleteTable(TableName tableName) { -451 return this.<DeleteTableRequest, DeleteTableResponse> procedureCall(RequestConverter -452 .buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -453 (s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(), -454 new DeleteTableProcedureBiConsumer(this, tableName)); -455 } -456 -457 @Override -458 public CompletableFuture<List<TableDescriptor>> deleteTables(Pattern pattern) { -459 return batchTableOperations(pattern, (table) -> deleteTable(table), "DELETE"); -460 } -461 -462 @Override -463 public CompletableFuture<Void> truncateTable(TableName tableName, boolean preserveSplits) { -464 return this.<TruncateTableRequest, TruncateTableResponse> procedureCall( -465 RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), -466 ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), -467 (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(this, tableName)); -468 } -469 -470 @Override -471 public CompletableFuture<Void> enableTable(TableName tableName) { -472 return this.<EnableTableRequest, EnableTableResponse> procedureCall(RequestConverter -473 .buildEnableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -474 (s, c, req, done) -> s.enableTable(c, req, done), (resp) -> resp.getProcId(), -475 new EnableTableProcedureBiConsumer(this, tableName)); -476 } -477 -478 @Override -479 public CompletableFuture<List<TableDescriptor>> enableTables(Pattern pattern) { -480 return batchTableOperations(pattern, (table) -> enableTable(table), "ENABLE"); -481 } -482 -483 @Override -484 public CompletableFuture<Void> disableTable(TableName tableName) { -485 return this.<DisableTableRequest, DisableTableResponse> procedureCall(RequestConverter -486 .buildDisableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -487 (s, c, req, done) -> s.disableTable(c, req, done), (resp) -> resp.getProcId(), -488 new DisableTableProcedureBiConsumer(this, tableName)); -489 } -490 -491 @Override -492 public CompletableFuture<List<TableDescriptor>> disableTables(Pattern pattern) { -493 return batchTableOperations(pattern, (table) -> disableTable(table), "DISABLE"); -494 } -495 -496 @Override -497 public CompletableFuture<Boolean> isTableEnabled(TableName tableName) { -498 CompletableFuture<Boolean> future = new CompletableFuture<>(); -499 AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> { -500 if (error != null) { -501 future.completeExceptionally(error); -502 return; -503 } -504 if (state.isPresent()) { -505 future.complete(state.get().inStates(TableState.State.ENABLED)); -506 } else { -507 future.completeExceptionally(new TableNotFoundException(tableName)); -508 } -509 }); -510 return future; -511 } -512 -513 @Override -514 public CompletableFuture<Boolean> isTableDisabled(TableName tableName) { -515 CompletableFuture<Boolean> future = new CompletableFuture<>(); -516 AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> { -517 if (error != null) { -518 future.completeExceptionally(error); -519 return; -520 } -521 if (state.isPresent()) { -522 future.complete(state.get().inStates(TableState.State.DISABLED)); -523 } else { -524 future.completeExceptionally(new TableNotFoundException(tableName)); -525 } -526 }); -527 return future; -528 } -529 -530 @Override -531 public CompletableFuture<Boolean> isTableAvailable(TableName tableName) { -532 return isTableAvailable(tableName, null); -533 } -534 -535 @Override -536 public CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys) { -537 CompletableFuture<Boolean> future = new CompletableFuture<>(); -538 isTableEnabled(tableName).whenComplete( -539 (enabled, error) -> { -540 if (error != null) { -541 future.completeExceptionally(error); -542 return; -543 } -544 if (!enabled) { -545 future.complete(false); -546 } else { -547 AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)) -548 .whenComplete( -549 (locations, error1) -> { -550 if (error1 != null) { -551 future.completeExceptionally(error1); -552 return; -553 } -554 int notDeployed = 0; -555 int regionCount = 0; -556 for (HRegionLocation location : locations) { -557 HRegionInfo info = location.getRegionInfo(); -558 if (location.getServerName() == null) { -559 if (LOG.isDebugEnabled()) { -560 LOG.debug("Table " + tableName + " has not deployed region " -561 + info.getEncodedName()); -562 } -563 notDeployed++; -564 } else if (splitKeys != null -565 && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { -566 for (byte[] splitKey : splitKeys) { -567 // Just check if the splitkey is available -568 if (Bytes.equals(info.getStartKey(), splitKey)) { -569 regionCount++; -570 break; -571 } -572 } -573 } else { -574 // Always empty start row should be counted -575 regionCount++; -576 } -577 } -578 if (notDeployed > 0) { -579 if (LOG.isDebugEnabled()) { -580 LOG.debug("Table " + tableName + " has " + notDeployed + " regions"); -581 } -582 future.complete(false); -583 } else if (splitKeys != null && regionCount != splitKeys.length + 1) { -584 if (LOG.isDebugEnabled()) { -585 LOG.debug("Table " + tableName + " expected to have " -586 + (splitKeys.length + 1) + " regions, but only " + regionCount -587 + " available"); -588 } -589 future.complete(false); -590 } else { -591 if (LOG.isDebugEnabled()) { -592 LOG.debug("Table " + tableName + " should be available"); -593 } -594 future.complete(true); -595 } -596 }); -597 } -598 }); -599 return future; -600 } -601 -602 @Override -603 public CompletableFuture<Pair<Integer, Integer>> getAlterStatus(TableName tableName) { -604 return this -605 .<Pair<Integer, Integer>>newMasterCaller() -606 .action( -607 (controller, stub) -> this -608 .<GetSchemaAlterStatusRequest, GetSchemaAlterStatusResponse, Pair<Integer, Integer>> call( -609 controller, stub, RequestConverter.buildGetSchemaAlterStatusRequest(tableName), (s, -610 c, req, done) -> s.getSchemaAlterStatus(c, req, done), (resp) -> new Pair<>( -611 resp.getYetToUpdateRegions(), resp.getTotalRegions()))).call(); -612 } -613 -614 @Override -615 public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) { -616 return this.<AddColumnRequest, AddColumnResponse> procedureCall( -617 RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -618 ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), -619 new AddColumnFamilyProcedureBiConsumer(this, tableName)); -620 } -621 -622 @Override -623 public CompletableFuture<Void> deleteColumnFamily(TableName tableName, byte[] columnFamily) { -624 return this.<DeleteColumnRequest, DeleteColumnResponse> procedureCall( -625 RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -626 ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), -627 (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(this, tableName)); -628 } -629 -630 @Override -631 public CompletableFuture<Void> modifyColumnFamily(TableName tableName, -632 ColumnFamilyDescriptor columnFamily) { -633 return this.<ModifyColumnRequest, ModifyColumnResponse> procedureCall( -634 RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -635 ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), -636 (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(this, tableName)); -