Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id CFFE9200CC0 for ; Sun, 9 Jul 2017 17:01:58 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id CE7ED16A12A; Sun, 9 Jul 2017 15:01:58 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 7C74A16A106 for ; Sun, 9 Jul 2017 17:01:56 +0200 (CEST) Received: (qmail 76154 invoked by uid 500); 9 Jul 2017 15:01:54 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 75756 invoked by uid 99); 9 Jul 2017 15:01:53 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 09 Jul 2017 15:01:53 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 7A800F21A8; Sun, 9 Jul 2017 15:01:53 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Sun, 09 Jul 2017 15:02:02 -0000 Message-Id: <4bcc2a230ed54c5cab53ffb27cbb53cf@git.apache.org> In-Reply-To: <74374d495ef643a5be8965892b791015@git.apache.org> References: <74374d495ef643a5be8965892b791015@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd. archived-at: Sun, 09 Jul 2017 15:01:59 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html index 16c0042..71844ce 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html @@ -126,2499 +126,2543 @@ 118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; -162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; -163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; -170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; -171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; -172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; -173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; -174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; -175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest; -176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse; -177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; -178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; -179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; -188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse; -189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; -190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; -191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; -192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; -194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; -199import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; -200import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; -201import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; -202import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; -203import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; -204import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; -205import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; -206import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -207import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; -208import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; -209import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; -210import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; -211import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; -212import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; -213import org.apache.hadoop.hbase.shaded.protobuf.generated.*; -214import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -215import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -216import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -217import org.apache.hadoop.hbase.util.Bytes; -218import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -219import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -220import org.apache.hadoop.hbase.util.Pair; -221 -222/** -223 * The implementation of AsyncAdmin. -224 */ -225@InterfaceAudience.Private -226public class RawAsyncHBaseAdmin implements AsyncAdmin { -227 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; -228 -229 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); -230 -231 private final AsyncConnectionImpl connection; -232 -233 private final RawAsyncTable metaTable; +121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; +122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; +123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; +124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; +125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; +126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; +127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; +128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; +129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; +130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; +131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; +132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; +136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; +138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; +139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; +140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; +141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; +142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; +143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; +144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; +145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; +146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; +147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; +149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; +150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; +151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; +152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; +153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; +154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; +155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; +156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; +157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; +158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; +159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; +160import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +161import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest; +162import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse; +163import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; +164import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; +165import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; +166import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse; +167import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; +168import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; +169import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; +170import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; +171import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; +172import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; +173import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; +174import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; +175import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; +176import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse; +177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; +178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse; +179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; +190import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse; +191import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +192import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +193import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; +194import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse; +195import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; +196import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; +197import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; +198import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; +199import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; +200import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; +201import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +202import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +203import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +204import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse; +205import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; +206import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; +207import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; +208import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse; +209import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; +210import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; +211import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; +212import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +213import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +214import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; +215import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; +216import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; +217import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; +218import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +219import org.apache.hadoop.hbase.shaded.protobuf.generated.*; +220import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +221import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +222import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +223import org.apache.hadoop.hbase.util.Bytes; +224import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +225import org.apache.hadoop.hbase.util.ForeignExceptionUtil; +226import org.apache.hadoop.hbase.util.Pair; +227 +228/** +229 * The implementation of AsyncAdmin. +230 */ +231@InterfaceAudience.Private +232public class RawAsyncHBaseAdmin implements AsyncAdmin { +233 public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; 234 -235 private final long rpcTimeoutNs; +235 private static final Log LOG = LogFactory.getLog(AsyncHBaseAdmin.class); 236 -237 private final long operationTimeoutNs; +237 private final AsyncConnectionImpl connection; 238 -239 private final long pauseNs; +239 private final RawAsyncTable metaTable; 240 -241 private final int maxAttempts; +241 private final long rpcTimeoutNs; 242 -243 private final int startLogErrorsCnt; +243 private final long operationTimeoutNs; 244 -245 private final NonceGenerator ng; +245 private final long pauseNs; 246 -247 RawAsyncHBaseAdmin(AsyncConnectionImpl connection, AsyncAdminBuilderBase<?> builder) { -248 this.connection = connection; -249 this.metaTable = connection.getRawTable(META_TABLE_NAME); -250 this.rpcTimeoutNs = builder.rpcTimeoutNs; -251 this.operationTimeoutNs = builder.operationTimeoutNs; -252 this.pauseNs = builder.pauseNs; -253 this.maxAttempts = builder.maxAttempts; -254 this.startLogErrorsCnt = builder.startLogErrorsCnt; -255 this.ng = connection.getNonceGenerator(); -256 } -257 -258 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { -259 return this.connection.callerFactory.<T> masterRequest() -260 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -261 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -262 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -263 .startLogErrorsCnt(startLogErrorsCnt); -264 } -265 -266 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { -267 return this.connection.callerFactory.<T> adminRequest() -268 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) -269 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) -270 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) -271 .startLogErrorsCnt(startLogErrorsCnt); -272 } -273 -274 @FunctionalInterface -275 private interface MasterRpcCall<RESP, REQ> { -276 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, -277 RpcCallback<RESP> done); +247 private final int maxAttempts; +248 +249 private final int startLogErrorsCnt; +250 +251 private final NonceGenerator ng; +252 +253 RawAsyncHBaseAdmin(AsyncConnectionImpl connection, AsyncAdminBuilderBase<?> builder) { +254 this.connection = connection; +255 this.metaTable = connection.getRawTable(META_TABLE_NAME); +256 this.rpcTimeoutNs = builder.rpcTimeoutNs; +257 this.operationTimeoutNs = builder.operationTimeoutNs; +258 this.pauseNs = builder.pauseNs; +259 this.maxAttempts = builder.maxAttempts; +260 this.startLogErrorsCnt = builder.startLogErrorsCnt; +261 this.ng = connection.getNonceGenerator(); +262 } +263 +264 private <T> MasterRequestCallerBuilder<T> newMasterCaller() { +265 return this.connection.callerFactory.<T> masterRequest() +266 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +267 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +268 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +269 .startLogErrorsCnt(startLogErrorsCnt); +270 } +271 +272 private <T> AdminRequestCallerBuilder<T> newAdminCaller() { +273 return this.connection.callerFactory.<T> adminRequest() +274 .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) +275 .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) +276 .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) +277 .startLogErrorsCnt(startLogErrorsCnt); 278 } 279 280 @FunctionalInterface -281 private interface AdminRpcCall<RESP, REQ> { -282 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, +281 private interface MasterRpcCall<RESP, REQ> { +282 void call(MasterService.Interface stub, HBaseRpcController controller, REQ req, 283 RpcCallback<RESP> done); 284 } 285 286 @FunctionalInterface -287 private interface Converter<D, S> { -288 D convert(S src) throws IOException; -289 } -290 -291 private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, -292 MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, -293 Converter<RESP, PRESP> respConverter) { -294 CompletableFuture<RESP> future = new CompletableFuture<>(); -295 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { +287 private interface AdminRpcCall<RESP, REQ> { +288 void call(AdminService.Interface stub, HBaseRpcController controller, REQ req, +289 RpcCallback<RESP> done); +290 } +291 +292 @FunctionalInterface +293 private interface Converter<D, S> { +294 D convert(S src) throws IOException; +295 } 296 -297 @Override -298 public void run(PRESP resp) { -299 if (controller.failed()) { -300 future.completeExceptionally(controller.getFailed()); -301 } else { -302 try { -303 future.complete(respConverter.convert(resp)); -304 } catch (IOException e) { -305 future.completeExceptionally(e); -306 } -307 } -308 } -309 }); -310 return future; -311 } -312 -313 private <PREQ, PRESP, RESP> CompletableFuture<RESP> adminCall(HBaseRpcController controller, -314 AdminService.Interface stub, PREQ preq, AdminRpcCall<PRESP, PREQ> rpcCall, -315 Converter<RESP, PRESP> respConverter) { -316 -317 CompletableFuture<RESP> future = new CompletableFuture<>(); -318 rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { -319 -320 @Override -321 public void run(PRESP resp) { -322 if (controller.failed()) { -323 future.completeExceptionally(new IOException(controller.errorText())); -324 } else { -325 try { -326 future.complete(respConverter.convert(resp)); -327 } catch (IOException e) { -328 future.completeExceptionally(e); -329 } -330 } -331 } -332 }); -333 return future; -334 } -335 -336 private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq, -337 MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter, -338 ProcedureBiConsumer consumer) { -339 CompletableFuture<Long> procFuture = this -340 .<Long> newMasterCaller() -341 .action( -342 (controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall, -343 respConverter)).call(); -344 return waitProcedureResult(procFuture).whenComplete(consumer); -345 } -346 -347 @FunctionalInterface -348 private interface TableOperator { -349 CompletableFuture<Void> operate(TableName table); -350 } -351 -352 private CompletableFuture<List<TableDescriptor>> batchTableOperations(Pattern pattern, -353 TableOperator operator, String operationType) { -354 CompletableFuture<List<TableDescriptor>> future = new CompletableFuture<>(); -355 List<TableDescriptor> failed = new LinkedList<>(); -356 listTables(Optional.ofNullable(pattern), false).whenComplete( -357 (tables, error) -> { -358 if (error != null) { -359 future.completeExceptionally(error); -360 return; -361 } -362 CompletableFuture[] futures = -363 tables.stream() -364 .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { -365 if (ex != null) { -366 LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); -367 failed.add(table); -368 } -369 })).<CompletableFuture> toArray(size -> new CompletableFuture[size]); -370 CompletableFuture.allOf(futures).thenAccept((v) -> { -371 future.complete(failed); -372 }); -373 }); -374 return future; -375 } -376 -377 @Override -378 public CompletableFuture<Boolean> tableExists(TableName tableName) { -379 return AsyncMetaTableAccessor.tableExists(metaTable, tableName); -380 } -381 -382 @Override -383 public CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern, -384 boolean includeSysTables) { -385 return this.<List<TableDescriptor>> newMasterCaller() -386 .action((controller, stub) -> this -387 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableDescriptor>> call( -388 controller, stub, -389 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables), -390 (s, c, req, done) -> s.getTableDescriptors(c, req, done), -391 (resp) -> ProtobufUtil.toTableDescriptorList(resp))) -392 .call(); -393 } -394 -395 @Override -396 public CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern, -397 boolean includeSysTables) { -398 return this.<List<TableName>> newMasterCaller() -399 .action((controller, stub) -> this -400 .<GetTableNamesRequest, GetTableNamesResponse, List<TableName>> call(controller, stub, -401 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), -402 (s, c, req, done) -> s.getTableNames(c, req, done), -403 (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) -404 .call(); -405 } -406 -407 @Override -408 public CompletableFuture<TableDescriptor> getTableDescriptor(TableName tableName) { -409 CompletableFuture<TableDescriptor> future = new CompletableFuture<>(); -410 this.<List<TableSchema>> newMasterCaller() -411 .action( -412 (controller, stub) -> this -413 .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>> call( -414 controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, -415 c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp -416 .getTableSchemaList())).call().whenComplete((tableSchemas, error) -> { -417 if (error != null) { -418 future.completeExceptionally(error); -419 return; -420 } -421 if (!tableSchemas.isEmpty()) { -422 future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0))); -423 } else { -424 future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); -425 } -426 }); -427 return future; -428 } -429 -430 @Override -431 public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, -432 int numRegions) { -433 try { -434 return createTable(desc, Optional.of(getSplitKeys(startKey, endKey, numRegions))); -435 } catch (IllegalArgumentException e) { -436 return failedFuture(e); -437 } -438 } -439 -440 @Override -441 public CompletableFuture<Void> createTable(TableDescriptor desc, Optional<byte[][]> splitKeys) { -442 if (desc.getTableName() == null) { -443 return failedFuture(new IllegalArgumentException("TableName cannot be null")); -444 } -445 try { -446 splitKeys.ifPresent(keys -> verifySplitKeys(keys)); -447 return this.<CreateTableRequest, CreateTableResponse> procedureCall(RequestConverter -448 .buildCreateTableRequest(desc, splitKeys, ng.getNonceGroup(), ng.newNonce()), (s, c, req, -449 done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(), -450 new CreateTableProcedureBiConsumer(this, desc.getTableName())); -451 } catch (IllegalArgumentException e) { -452 return failedFuture(e); -453 } -454 } -455 -456 @Override -457 public CompletableFuture<Void> deleteTable(TableName tableName) { -458 return this.<DeleteTableRequest, DeleteTableResponse> procedureCall(RequestConverter -459 .buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -460 (s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(), -461 new DeleteTableProcedureBiConsumer(this, tableName)); -462 } -463 -464 @Override -465 public CompletableFuture<List<TableDescriptor>> deleteTables(Pattern pattern) { -466 return batchTableOperations(pattern, (table) -> deleteTable(table), "DELETE"); -467 } -468 -469 @Override -470 public CompletableFuture<Void> truncateTable(TableName tableName, boolean preserveSplits) { -471 return this.<TruncateTableRequest, TruncateTableResponse> procedureCall( -472 RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), -473 ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), -474 (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(this, tableName)); -475 } -476 -477 @Override -478 public CompletableFuture<Void> enableTable(TableName tableName) { -479 return this.<EnableTableRequest, EnableTableResponse> procedureCall(RequestConverter -480 .buildEnableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -481 (s, c, req, done) -> s.enableTable(c, req, done), (resp) -> resp.getProcId(), -482 new EnableTableProcedureBiConsumer(this, tableName)); -483 } -484 -485 @Override -486 public CompletableFuture<List<TableDescriptor>> enableTables(Pattern pattern) { -487 return batchTableOperations(pattern, (table) -> enableTable(table), "ENABLE"); -488 } -489 -490 @Override -491 public CompletableFuture<Void> disableTable(TableName tableName) { -492 return this.<DisableTableRequest, DisableTableResponse> procedureCall(RequestConverter -493 .buildDisableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), -494 (s, c, req, done) -> s.disableTable(c, req, done), (resp) -> resp.getProcId(), -495 new DisableTableProcedureBiConsumer(this, tableName)); -496 } -497 -498 @Override -499 public CompletableFuture<List<TableDescriptor>> disableTables(Pattern pattern) { -500 return batchTableOperations(pattern, (table) -> disableTable(table), "DISABLE"); -501 } -502 -503 @Override -504 public CompletableFuture<Boolean> isTableEnabled(TableName tableName) { -505 CompletableFuture<Boolean> future = new CompletableFuture<>(); -506 AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> { -507 if (error != null) { -508 future.completeExceptionally(error); -509 return; -510 } -511 if (state.isPresent()) { -512 future.complete(state.get().inStates(TableState.State.ENABLED)); -513 } else { -514 future.completeExceptionally(new TableNotFoundException(tableName)); -515 } -516 }); -517 return future; -518 } -519 -520 @Override -521 public CompletableFuture<Boolean> isTableDisabled(TableName tableName) { -522 CompletableFuture<Boolean> future = new CompletableFuture<>(); -523 AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> { -524 if (error != null) { -525 future.completeExceptionally(error); -526 return; -527 } -528 if (state.isPresent()) { -529 future.complete(state.get().inStates(TableState.State.DISABLED)); -530 } else { -531 future.completeExceptionally(new TableNotFoundException(tableName)); -532 } -533 }); -534 return future; -535 } -536 -537 @Override -538 public CompletableFuture<Boolean> isTableAvailable(TableName tableName) { -539 return isTableAvailable(tableName, null); -540 } -541 -542 @Override -543 public CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys) { -544 CompletableFuture<Boolean> future = new CompletableFuture<>(); -545 isTableEnabled(tableName).whenComplete( -546 (enabled, error) -> { -547 if (error != null) { -548 future.completeExceptionally(error); -549 return; -550 } -551 if (!enabled) { -552 future.complete(false); -553 } else { -554 AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)) -555 .whenComplete( -556 (locations, error1) -> { -557 if (error1 != null) { -558 future.completeExceptionally(error1); -559 return; -560 } -561 int notDeployed = 0; -562 int regionCount = 0; -563 for (HRegionLocation location : locations) { -564 HRegionInfo info = location.getRegionInfo(); -565 if (location.getServerName() == null) { -566 if (LOG.isDebugEnabled()) { -567 LOG.debug("Table " + tableName + " has not deployed region " -568 + info.getEncodedName()); -569 } -570 notDeployed++; -571 } else if (splitKeys != null -572 && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { -573 for (byte[] splitKey : splitKeys) { -574 // Just check if the splitkey is available -575 if (Bytes.equals(info.getStartKey(), splitKey)) { -576 regionCount++; -577 break; -578 } -579 } -580 } else { -581 // Always empty start row should be counted -582 regionCount++; -583 } -584 } -585 if (notDeployed > 0) { -586 if (LOG.isDebugEnabled()) { -587 LOG.debug("Table " + tableName + " has " + notDeployed + " regions"); -588 } -589 future.complete(false); -590 } else if (splitKeys != null && regionCount != splitKeys.length + 1) { -591 if (LOG.isDebugEnabled()) { -592 LOG.debug("Table " + tableName + " expected to have " -593 + (splitKeys.length + 1) + " regions, but only " + regionCount -594 + " available"); -595 } -596 future.complete(false); -597 } else { -598 if (LOG.isDebugEnabled()) { -599 LOG.debug("Table " + tableName + " should be available"); -600 } -601 future.complete(true); -602 } -603 }); -604 } -605 }); -606 return future; -607 } -608 -609 @Override -610 public CompletableFuture<Pair<Integer, Integer>> getAlterStatus(TableName tableName) { -611 return this -612 .<Pair<Integer, Integer>>newMasterCaller() -613 .action( -614 (controller, stub) -> this -615 .<GetSchemaAlterStatusRequest, GetSchemaAlterStatusResponse, Pair<Integer, Integer>> call( -616 controller, stub, RequestConverter.buildGetSchemaAlterStatusRequest(tableName), (s, -617 c, req, done) -> s.getSchemaAlterStatus(c, req, done), (resp) -> new Pair<>( -618 resp.getYetToUpdateRegions(), resp.getTotalRegions()))).call(); -619 } -620 -621 @Override -622 public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) { -623 return this.<AddColumnRequest, AddColumnResponse> procedureCall( -624 RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -625 ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), -626 new AddColumnFamilyProcedureBiConsumer(this, tableName)); -627 } -628 -629 @Override -630 public CompletableFuture<Void> deleteColumnFamily(TableName tableName, byte[] columnFamily) { -631 return this.<DeleteColumnRequest, DeleteColumnResponse> procedureCall( -632 RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -633 ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), -634 (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(this, tableName)); -635 } -636 -637 @Override -638 public CompletableFuture<Void> modifyColumnFamily(TableName tableName, -639 ColumnFamilyDescriptor columnFamily) { -640 return this.<ModifyColumnRequest, ModifyColumnResponse> procedureCall( -641 RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), -642 ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), -643 (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(this, tableName)); -644 } -645 -646 @Override -647 public CompletableFuture<Void> createNamespace(NamespaceDescriptor descriptor) { -648 return this.<CreateNamespaceRequest, CreateNamespaceResponse> procedureCall( -649 RequestConverter.buildCreateNamespaceRequest(descriptor), -650 (s, c, req, done) -> s.createNamespace(c, req, done), (resp) -> resp.getProcId(), -651 new CreateNamespaceProcedureBiConsumer(this, descriptor.getName())); -652 } -653 -654 @Override -655 public CompletableFuture<Void> modifyNamespace(NamespaceDescriptor descriptor) { -656 return this.<ModifyNamespaceRequest, ModifyNamespaceResponse> procedureCall( -657 RequestConverter.buildModifyNamespaceRequest(descriptor), -658 (s, c, req, done) -> s.modifyNamespace(c, req, done), (resp) -> resp.getProcId(), -659 new ModifyNamespaceProcedureBiConsumer(this, descriptor.getName())); -660 } -661 -662 @Override -663 public CompletableFuture<Void> deleteNamespace(String name) { -664 return this.<DeleteNamespaceRequest, DeleteNamespaceResponse> procedureCall( -665 RequestConverter.buildDeleteNamespaceRequest(name), -666 (s, c, req, done) -> s.deleteNamespace(c, req, done), (resp) -> resp.getProcId(), -667 new DeleteNamespaceProcedureBiConsumer(this, name)); -668 } -669 -670 @Override -671 public CompletableFuture<NamespaceDescriptor> getNamespaceDescriptor(String name) { -672 return this -673 .<NamespaceDescriptor> newMasterCaller() -674 .action( -675 (controller, stub) -> this -676 .<GetNamespaceDescriptorRequest, GetNamespaceDescriptorResponse, NamespaceDescriptor> call( -677 controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), (s, c, -678 req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) -> ProtobufUtil -679 .toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call(); -680 } -681 -682 @Override -683 public CompletableFuture<List<NamespaceDescriptor>> listNamespaceDescriptors() { -684 return this -685 .<List<NamespaceDescriptor>> newMasterCaller() -686 .action( -687 (controller, stub) -> this -688 .<ListNamespaceDescriptorsRequest, ListNamespaceDescriptorsResponse, List<NamespaceDescriptor>> call( -689 controller, stub, ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req, -690 done) -> s.listNamespaceDescriptors(c, req, done), (resp) -> ProtobufUtil -691 .toNamespaceDescriptorList(resp))).call(); -692 } -693 -694 @Override -695 public CompletableFuture<Boolean> closeRegion(byte[] regionName, Optional<ServerName> serverName) { -696 CompletableFuture<Boolean> future = new CompletableFuture<>(); -697 getRegionLocation(regionName).whenComplete((location, err) -> { -698 if (err != null) { -699 future.completeExceptionally(err); -700 return; -701 } -702 ServerName server = serverName.isPresent() ? serverName.get() : location.getServerName(); -703 if (server == null) { -704 future.completeExceptionally(new NotServingRegionException(regionName));