hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a...@apache.org
Subject [2/3] hbase git commit: HBASE-19200 Make hbase-client only depend on ZKAsyncRegistry and ZNodePaths
Date Fri, 10 Nov 2017 18:12:27 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index 2a0764d..5954e94 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -22,22 +22,24 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
 import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
 import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
 import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
-import static org.apache.hadoop.hbase.HRegionInfo.DEFAULT_REPLICA_ID;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
+import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
 
 import java.util.Optional;
 import java.util.stream.IntStream;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
+
 /**
  * Class that hold all the paths of znode for HBase.
  */
 @InterfaceAudience.Private
 public class ZNodePaths {
+  // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
+  public static final char ZNODE_PATH_SEPARATOR = '/';
 
   public final static String META_ZNODE_PREFIX = "meta-region-server";
 
@@ -90,43 +92,35 @@ public class ZNodePaths {
     baseZNode = conf.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT);
     ImmutableMap.Builder<Integer, String> builder = ImmutableMap.builder();
     metaZNodePrefix = conf.get("zookeeper.znode.metaserver", META_ZNODE_PREFIX);
-    String defaultMetaReplicaZNode = ZKUtil.joinZNode(baseZNode, metaZNodePrefix);
+    String defaultMetaReplicaZNode = ZNodePaths.joinZNode(baseZNode, metaZNodePrefix);
     builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode);
     int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM);
     IntStream.range(1, numMetaReplicas)
         .forEachOrdered(i -> builder.put(i, defaultMetaReplicaZNode + "-" + i));
     metaReplicaZNodes = builder.build();
-    rsZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs"));
-    drainingZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.draining.rs", "draining"));
-    masterAddressZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master"));
-    backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.backup.masters", "backup-masters"));
-    clusterStateZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running"));
-    tableZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.tableEnableDisable", "table"));
-    clusterIdZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid"));
-    splitLogZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.splitlog", SPLIT_LOGDIR_NAME));
-    balancerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer"));
-    regionNormalizerZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.regionNormalizer", "normalizer"));
-    switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
-    tableLockZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.tableLock", "table-lock"));
-    namespaceZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.namespace", "namespace"));
-    masterMaintZNode = ZKUtil.joinZNode(baseZNode,
-      conf.get("zookeeper.znode.masterMaintenance", "master-maintenance"));
-    replicationZNode =
-        ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication"));
+    rsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs"));
+    drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining"));
+    masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master"));
+    backupMasterAddressesZNode =
+        joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters"));
+    clusterStateZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running"));
+    tableZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table"));
+    clusterIdZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid"));
+    splitLogZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", SPLIT_LOGDIR_NAME));
+    balancerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer"));
+    regionNormalizerZNode =
+        joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer"));
+    switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
+    tableLockZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock"));
+    namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace"));
+    masterMaintZNode =
+        joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance"));
+    replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication"));
     peersZNode =
-        ZKUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers"));
-    queuesZNode =
-        ZKUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs"));
-    hfileRefsZNode =
-        ZKUtil.joinZNode(replicationZNode,
-          conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs"));
+        joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers"));
+    queuesZNode = joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs"));
+    hfileRefsZNode = joinZNode(replicationZNode,
+      conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs"));
   }
 
   @Override
@@ -175,7 +169,7 @@ public class ZNodePaths {
    */
   public int getMetaReplicaIdFromZnode(String znode) {
     if (znode.equals(metaZNodePrefix)) {
-      return HRegionInfo.DEFAULT_REPLICA_ID;
+      return RegionInfo.DEFAULT_REPLICA_ID;
     }
     return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
   }
@@ -188,4 +182,18 @@ public class ZNodePaths {
   public boolean isDefaultMetaReplicaZnode(String znode) {
     return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
   }
+
+  /**
+   * Join the prefix znode name with the suffix znode name to generate a proper
+   * full znode name.
+   *
+   * Assumes prefix does not end with slash and suffix does not begin with it.
+   *
+   * @param prefix beginning of znode name
+   * @param suffix ending of znode name
+   * @return result of properly joining prefix with suffix
+   */
+  public static String joinZNode(String prefix, String suffix) {
+    return prefix + ZNodePaths.ZNODE_PATH_SEPARATOR + suffix;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
index 3df9880..045fd97 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
@@ -51,7 +51,7 @@ public class ZkAclReset extends Configured implements Tool {
     List<String> children = ZKUtil.listChildrenNoWatch(zkw, znode);
     if (children != null) {
       for (String child: children) {
-        resetAcls(zkw, ZKUtil.joinZNode(znode, child), eraseAcls);
+        resetAcls(zkw, ZNodePaths.joinZNode(znode, child), eraseAcls);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 49113d0..5d10cdf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -221,7 +221,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     List<String> children = recoverableZooKeeper.getChildren(znode, false);
 
     for (String child : children) {
-      setZnodeAclsRecursive(ZKUtil.joinZNode(znode, child));
+      setZnodeAclsRecursive(ZNodePaths.joinZNode(znode, child));
     }
     List<ACL> acls = ZKUtil.createACL(this, znode, true);
     LOG.info("Setting ACLs for znode:" + znode + " , acl:" + acls);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingAsyncRegistry.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingAsyncRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingAsyncRegistry.java
new file mode 100644
index 0000000..6633068
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingAsyncRegistry.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Registry that does nothing. Otherwise, default Registry wants zookeeper up and running.
+ */
+@InterfaceAudience.Private
+class DoNothingAsyncRegistry implements AsyncRegistry {
+
+  public DoNothingAsyncRegistry(Configuration conf) {
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> getMetaRegionLocation() {
+    return CompletableFuture.completedFuture(null);
+  }
+
+  @Override
+  public CompletableFuture<String> getClusterId() {
+    return CompletableFuture.completedFuture(null);
+  }
+
+  @Override
+  public CompletableFuture<Integer> getCurrentNrHRS() {
+    return CompletableFuture.completedFuture(0);
+  }
+
+  @Override
+  public CompletableFuture<ServerName> getMasterAddress() {
+    return CompletableFuture.completedFuture(null);
+  }
+
+  @Override
+  public CompletableFuture<Integer> getMasterInfoPort() {
+    return CompletableFuture.completedFuture(0);
+  }
+
+  @Override
+  public void close() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index a0f18f4..f9fbe85 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -37,6 +37,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -456,23 +457,20 @@ public class TestAsyncProcess {
    * Returns our async process.
    */
   static class MyConnectionImpl extends ConnectionImplementation {
-    public static class TestRegistry implements Registry {
-      @Override
-      public void init(Connection connection) {}
+    public static class TestRegistry extends DoNothingAsyncRegistry {
 
-      @Override
-      public RegionLocations getMetaRegionLocation() throws IOException {
-        return null;
+      public TestRegistry(Configuration conf) {
+        super(conf);
       }
 
       @Override
-      public String getClusterId() {
-        return "testClusterId";
+      public CompletableFuture<String> getClusterId() {
+        return CompletableFuture.completedFuture("testClusterId");
       }
 
       @Override
-      public int getCurrentNrHRS() throws IOException {
-        return 1;
+      public CompletableFuture<Integer> getCurrentNrHRS() {
+        return CompletableFuture.completedFuture(1);
       }
     }
 
@@ -483,7 +481,8 @@ public class TestAsyncProcess {
     }
 
     private static Configuration setupConf(Configuration conf) {
-      conf.setClass(RegistryFactory.REGISTRY_IMPL_CONF_KEY, TestRegistry.class, Registry.class);
+      conf.setClass(AsyncRegistryFactory.REGISTRY_IMPL_CONF_KEY, TestRegistry.class,
+        AsyncRegistry.class);
       return conf;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index d1a3eb9..50befce 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
-
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -40,35 +38,6 @@ public class TestBufferedMutator {
   public TestName name = new TestName();
 
   /**
-   * Registry that does nothing.
-   * Otherwise, default Registry wants zookeeper up and running.
-   */
-  public static class DoNothingRegistry implements Registry {
-    @Override
-    public void init(Connection connection) {
-      // TODO Auto-generated method stub
-    }
-
-    @Override
-    public RegionLocations getMetaRegionLocation() throws IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
-    public String getClusterId() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
-    public int getCurrentNrHRS() throws IOException {
-      // TODO Auto-generated method stub
-      return 0;
-    }
-  }
-
-  /**
    * My BufferedMutator.
    * Just to prove that I can insert a BM other than default.
    */
@@ -83,7 +52,7 @@ public class TestBufferedMutator {
   public void testAlternateBufferedMutatorImpl() throws IOException {
     BufferedMutatorParams params =  new BufferedMutatorParams(TableName.valueOf(name.getMethodName()));
     Configuration conf = HBaseConfiguration.create();
-    conf.set(RegistryFactory.REGISTRY_IMPL_CONF_KEY, DoNothingRegistry.class.getName());
+    conf.set(AsyncRegistryFactory.REGISTRY_IMPL_CONF_KEY, DoNothingAsyncRegistry.class.getName());
     try (Connection connection = ConnectionFactory.createConnection(conf)) {
       BufferedMutator bm = connection.getBufferedMutator(params);
       // Assert we get default BM if nothing specified.

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 0f11156..9f32976 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -27,18 +27,20 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 import java.util.SortedMap;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
+
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -49,6 +51,26 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Stopwatch;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
@@ -73,26 +95,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrEx
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Stopwatch;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 
 /**
  * Test client behavior w/o setting up a cluster.
@@ -117,27 +119,27 @@ public class TestClientNoCluster extends Configured implements Tool {
   /**
    * Simple cluster registry inserted in place of our usual zookeeper based one.
    */
-  static class SimpleRegistry implements Registry {
+  static class SimpleRegistry extends DoNothingAsyncRegistry {
     final ServerName META_HOST = META_SERVERNAME;
 
-    @Override
-    public void init(Connection connection) {
+    public SimpleRegistry(Configuration conf) {
+      super(conf);
     }
 
     @Override
-    public RegionLocations getMetaRegionLocation() throws IOException {
-      return new RegionLocations(
-        new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST));
+    public CompletableFuture<RegionLocations> getMetaRegionLocation() {
+      return CompletableFuture.completedFuture(new RegionLocations(
+          new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, META_HOST)));
     }
 
     @Override
-    public String getClusterId() {
-      return HConstants.CLUSTER_ID_DEFAULT;
+    public CompletableFuture<String> getClusterId() {
+      return CompletableFuture.completedFuture(HConstants.CLUSTER_ID_DEFAULT);
     }
 
     @Override
-    public int getCurrentNrHRS() throws IOException {
-      return 1;
+    public CompletableFuture<Integer> getCurrentNrHRS() {
+      return CompletableFuture.completedFuture(1);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
index 4b6ef96..3c99175 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.zookeeper;
 
 import static org.junit.Assert.assertFalse;
@@ -33,15 +32,16 @@ public class TestZooKeeperWatcher {
 
   @Test
   public void testIsClientReadable() throws IOException {
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(HBaseConfiguration.create(),
-        "testIsClientReadable", null, false);
+    ZooKeeperWatcher watcher =
+      new ZooKeeperWatcher(HBaseConfiguration.create(), "testIsClientReadable", null, false);
 
     assertTrue(watcher.isClientReadable(watcher.znodePaths.baseZNode));
     assertTrue(watcher.isClientReadable(watcher.znodePaths.getZNodeForReplica(0)));
     assertTrue(watcher.isClientReadable(watcher.znodePaths.masterAddressZNode));
     assertTrue(watcher.isClientReadable(watcher.znodePaths.clusterIdZNode));
     assertTrue(watcher.isClientReadable(watcher.znodePaths.tableZNode));
-    assertTrue(watcher.isClientReadable(ZKUtil.joinZNode(watcher.znodePaths.tableZNode, "foo")));
+    assertTrue(
+      watcher.isClientReadable(ZNodePaths.joinZNode(watcher.znodePaths.tableZNode, "foo")));
     assertTrue(watcher.isClientReadable(watcher.znodePaths.rsZNode));
 
     assertFalse(watcher.isClientReadable(watcher.znodePaths.tableLockZNode));
@@ -54,5 +54,4 @@ public class TestZooKeeperWatcher {
 
     watcher.close();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index 667daa8..eba8518 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -63,7 +64,7 @@ public class IntegrationTestMetaReplicas {
     Configuration conf = util.getConfiguration();
     String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
         HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
-    String primaryMetaZnode = ZKUtil.joinZNode(baseZNode,
+    String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
         conf.get("zookeeper.znode.metaserver", "meta-region-server"));
     // check that the data in the znode is parseable (this would also mean the znode exists)
     byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
index 0d85e42..92bad7f 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
@@ -165,7 +166,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool {
       List<String> children = zk.getChildren(znode, false);
 
       for (String child : children) {
-        checkZnodePermsRecursive(watcher, zk, ZKUtil.joinZNode(znode, child));
+        checkZnodePermsRecursive(watcher, zk, ZNodePaths.joinZNode(znode, child));
       }
     } catch (KeeperException ke) {
       // if we are not authenticated for listChildren, it is fine.

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index ca545f7..0f39b2a 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -152,7 +153,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
         throw new IllegalArgumentException("Cannot remove peer with id=" + id
             + " because that id does not exist.");
       }
-      ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id));
+      ZKUtil.deleteNodeRecursively(this.zookeeper, ZNodePaths.joinZNode(this.peersZNode, id));
     } catch (KeeperException e) {
       throw new ReplicationException("Could not remove peer with id=" + id, e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index 95b2e04..49e55ef 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -29,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
@@ -61,8 +62,8 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
 
   @Override
   public List<String> getLogsInQueue(String serverName, String queueId) throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
-    znode = ZKUtil.joinZNode(znode, queueId);
+    String znode = ZNodePaths.joinZNode(this.queuesZNode, serverName);
+    znode = ZNodePaths.joinZNode(znode, queueId);
     List<String> result = null;
     try {
       result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
@@ -76,7 +77,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
 
   @Override
   public List<String> getAllQueues(String serverName) throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
+    String znode = ZNodePaths.joinZNode(this.queuesZNode, serverName);
     List<String> result = null;
     try {
       result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
@@ -162,7 +163,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
 
   @Override
   public List<String> getReplicableHFiles(String peerId) throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    String znode = ZNodePaths.joinZNode(this.hfileRefsZNode, peerId);
     List<String> result = null;
     try {
       result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 8e61df9..7b1d5c2 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -81,7 +82,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void init(String serverName) throws ReplicationException {
-    this.myQueuesZnode = ZKUtil.joinZNode(this.queuesZNode, serverName);
+    this.myQueuesZnode = ZNodePaths.joinZNode(this.queuesZNode, serverName);
     try {
       if (ZKUtil.checkExists(this.zookeeper, this.myQueuesZnode) < 0) {
         ZKUtil.createWithParents(this.zookeeper, this.myQueuesZnode);
@@ -105,7 +106,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   @Override
   public void removeQueue(String queueId) {
     try {
-      ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.myQueuesZnode, queueId));
+      ZKUtil.deleteNodeRecursively(this.zookeeper,
+        ZNodePaths.joinZNode(this.myQueuesZnode, queueId));
     } catch (KeeperException e) {
       this.abortable.abort("Failed to delete queue (queueId=" + queueId + ")", e);
     }
@@ -113,8 +115,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void addLog(String queueId, String filename) throws ReplicationException {
-    String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId);
-    znode = ZKUtil.joinZNode(znode, filename);
+    String znode = ZNodePaths.joinZNode(this.myQueuesZnode, queueId);
+    znode = ZNodePaths.joinZNode(znode, filename);
     try {
       ZKUtil.createWithParents(this.zookeeper, znode);
     } catch (KeeperException e) {
@@ -127,8 +129,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   @Override
   public void removeLog(String queueId, String filename) {
     try {
-      String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId);
-      znode = ZKUtil.joinZNode(znode, filename);
+      String znode = ZNodePaths.joinZNode(this.myQueuesZnode, queueId);
+      znode = ZNodePaths.joinZNode(znode, filename);
       ZKUtil.deleteNode(this.zookeeper, znode);
     } catch (KeeperException e) {
       this.abortable.abort("Failed to remove wal from queue (queueId=" + queueId + ", filename="
@@ -139,8 +141,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   @Override
   public void setLogPosition(String queueId, String filename, long position) {
     try {
-      String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId);
-      znode = ZKUtil.joinZNode(znode, filename);
+      String znode = ZNodePaths.joinZNode(this.myQueuesZnode, queueId);
+      znode = ZNodePaths.joinZNode(znode, filename);
       // Why serialize String of Long and not Long as bytes?
       ZKUtil.setData(this.zookeeper, znode, ZKUtil.positionToByteArray(position));
     } catch (KeeperException e) {
@@ -151,8 +153,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public long getLogPosition(String queueId, String filename) throws ReplicationException {
-    String clusterZnode = ZKUtil.joinZNode(this.myQueuesZnode, queueId);
-    String znode = ZKUtil.joinZNode(clusterZnode, filename);
+    String clusterZnode = ZNodePaths.joinZNode(this.myQueuesZnode, queueId);
+    String znode = ZNodePaths.joinZNode(clusterZnode, filename);
     byte[] bytes = null;
     try {
       bytes = ZKUtil.getData(this.zookeeper, znode);
@@ -176,7 +178,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public boolean isThisOurRegionServer(String regionserver) {
-    return ZKUtil.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode);
+    return ZNodePaths.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode);
   }
 
   @Override
@@ -184,7 +186,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     if (isThisOurRegionServer(regionserver)) {
       return null;
     }
-    String rsZnodePath = ZKUtil.joinZNode(this.queuesZNode, regionserver);
+    String rsZnodePath = ZNodePaths.joinZNode(this.queuesZNode, regionserver);
     List<String> queues = null;
     try {
       queues = ZKUtil.listChildrenNoWatch(this.zookeeper, rsZnodePath);
@@ -202,7 +204,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void removeReplicatorIfQueueIsEmpty(String regionserver) {
-    String rsPath = ZKUtil.joinZNode(this.queuesZNode, regionserver);
+    String rsPath = ZNodePaths.joinZNode(this.queuesZNode, regionserver);
     try {
       List<String> list = ZKUtil.listChildrenNoWatch(this.zookeeper, rsPath);
       if (list != null && list.isEmpty()){
@@ -229,7 +231,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public List<String> getLogsInQueue(String queueId) {
-    String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId);
+    String znode = ZNodePaths.joinZNode(this.myQueuesZnode, queueId);
     List<String> result = null;
     try {
       result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
@@ -260,21 +262,21 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   private Pair<String, SortedSet<String>> moveQueueUsingMulti(String znode, String peerId) {
     try {
       // hbase/replication/rs/deadrs
-      String deadRSZnodePath = ZKUtil.joinZNode(this.queuesZNode, znode);
+      String deadRSZnodePath = ZNodePaths.joinZNode(this.queuesZNode, znode);
       List<ZKUtilOp> listOfOps = new ArrayList<>();
       ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(peerId);
 
       String newPeerId = peerId + "-" + znode;
-      String newPeerZnode = ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
+      String newPeerZnode = ZNodePaths.joinZNode(this.myQueuesZnode, newPeerId);
       // check the logs queue for the old peer cluster
-      String oldClusterZnode = ZKUtil.joinZNode(deadRSZnodePath, peerId);
+      String oldClusterZnode = ZNodePaths.joinZNode(deadRSZnodePath, peerId);
       List<String> wals = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
 
       if (!peerExists(replicationQueueInfo.getPeerId())) {
         LOG.warn("Peer " + replicationQueueInfo.getPeerId() +
                 " didn't exist, will move its queue to avoid the failure of multi op");
         for (String wal : wals) {
-          String oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal);
+          String oldWalZnode = ZNodePaths.joinZNode(oldClusterZnode, wal);
           listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
         }
         listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
@@ -291,10 +293,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
         listOfOps.add(op);
         // get the offset of the logs and set it to new znodes
         for (String wal : wals) {
-          String oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal);
+          String oldWalZnode = ZNodePaths.joinZNode(oldClusterZnode, wal);
           byte[] logOffset = ZKUtil.getData(this.zookeeper, oldWalZnode);
           LOG.debug("Creating " + wal + " with data " + Bytes.toString(logOffset));
-          String newLogZnode = ZKUtil.joinZNode(newPeerZnode, wal);
+          String newLogZnode = ZNodePaths.joinZNode(newPeerZnode, wal);
           listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
           listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
           logQueue.add(wal);
@@ -322,7 +324,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   @Override
   public void addHFileRefs(String peerId, List<Pair<Path, Path>> pairs)
       throws ReplicationException {
-    String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    String peerZnode = ZNodePaths.joinZNode(this.hfileRefsZNode, peerId);
     boolean debugEnabled = LOG.isDebugEnabled();
     if (debugEnabled) {
       LOG.debug("Adding hfile references " + pairs + " in queue " + peerZnode);
@@ -333,7 +335,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
     for (int i = 0; i < size; i++) {
       listOfOps.add(ZKUtilOp.createAndFailSilent(
-        ZKUtil.joinZNode(peerZnode, pairs.get(i).getSecond().getName()),
+        ZNodePaths.joinZNode(peerZnode, pairs.get(i).getSecond().getName()),
         HConstants.EMPTY_BYTE_ARRAY));
     }
     if (debugEnabled) {
@@ -349,7 +351,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void removeHFileRefs(String peerId, List<String> files) {
-    String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    String peerZnode = ZNodePaths.joinZNode(this.hfileRefsZNode, peerId);
     boolean debugEnabled = LOG.isDebugEnabled();
     if (debugEnabled) {
       LOG.debug("Removing hfile references " + files + " from queue " + peerZnode);
@@ -359,7 +361,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     List<ZKUtilOp> listOfOps = new ArrayList<>(size);
 
     for (int i = 0; i < size; i++) {
-      listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i))));
+      listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZNodePaths.joinZNode(peerZnode, files.get(i))));
     }
     if (debugEnabled) {
       LOG.debug(" The multi list size for removing hfile references in zk for node " + peerZnode
@@ -374,7 +376,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void addPeerToHFileRefs(String peerId) throws ReplicationException {
-    String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    String peerZnode = ZNodePaths.joinZNode(this.hfileRefsZNode, peerId);
     try {
       if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) {
         LOG.info("Adding peer " + peerId + " to hfile reference queue.");
@@ -388,7 +390,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   @Override
   public void removePeerFromHFileRefs(String peerId) {
-    final String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    final String peerZnode = ZNodePaths.joinZNode(this.hfileRefsZNode, peerId);
     try {
       if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) {
         if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
index c6501e1..e96401e 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -87,11 +88,11 @@ public abstract class ReplicationStateZKBase {
     this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state");
     this.tableCFsNodeName = conf.get("zookeeper.znode.replication.peers.tableCFs", "tableCFs");
     this.ourClusterKey = ZKConfig.getZooKeeperClusterKey(this.conf);
-    this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.znodePaths.baseZNode,
+    this.replicationZNode = ZNodePaths.joinZNode(this.zookeeper.znodePaths.baseZNode,
       replicationZNodeName);
-    this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName);
-    this.queuesZNode = ZKUtil.joinZNode(replicationZNode, queuesZNodeName);
-    this.hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName);
+    this.peersZNode = ZNodePaths.joinZNode(replicationZNode, peersZNodeName);
+    this.queuesZNode = ZNodePaths.joinZNode(replicationZNode, queuesZNodeName);
+    this.hfileRefsZNode = ZNodePaths.joinZNode(replicationZNode, hfileRefsZNodeName);
   }
 
   public List<String> getListOfReplicators() {
@@ -127,7 +128,7 @@ public abstract class ReplicationStateZKBase {
   }
 
   protected boolean peerExists(String id) throws KeeperException {
-    return ZKUtil.checkExists(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)) >= 0;
+    return ZKUtil.checkExists(this.zookeeper, ZNodePaths.joinZNode(this.peersZNode, id)) >= 0;
   }
 
   /**
@@ -141,15 +142,15 @@ public abstract class ReplicationStateZKBase {
 
   @VisibleForTesting
   protected String getTableCFsNode(String id) {
-    return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.tableCFsNodeName));
+    return ZNodePaths.joinZNode(this.peersZNode, ZNodePaths.joinZNode(id, this.tableCFsNodeName));
   }
 
   @VisibleForTesting
   protected String getPeerStateNode(String id) {
-    return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.peerStateNodeName));
+    return ZNodePaths.joinZNode(this.peersZNode, ZNodePaths.joinZNode(id, this.peerStateNodeName));
   }
   @VisibleForTesting
   protected String getPeerNode(String id) {
-    return ZKUtil.joinZNode(this.peersZNode, id);
+    return ZNodePaths.joinZNode(this.peersZNode, id);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 7cf04c7..3d152bb 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
@@ -319,13 +320,13 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
-    String groupBasePath = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode);
+    String groupBasePath = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode);
     List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
     //Overwrite any info stored by table, this takes precedence
     try {
       if(ZKUtil.checkExists(watcher, groupBasePath) != -1) {
         for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) {
-          byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode));
+          byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode));
           if(data.length > 0) {
             ProtobufUtil.expectPBMagicPrefix(data);
             ByteArrayInputStream bis = new ByteArrayInputStream(
@@ -469,20 +470,20 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     resetRSGroupAndTableMaps(newGroupMap, newTableMap);
 
     try {
-      String groupBasePath = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode);
+      String groupBasePath = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode);
       ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC);
 
       List<ZKUtil.ZKUtilOp> zkOps = new ArrayList<>(newGroupMap.size());
       for(String groupName : prevRSGroups) {
         if(!newGroupMap.containsKey(groupName)) {
-          String znode = ZKUtil.joinZNode(groupBasePath, groupName);
+          String znode = ZNodePaths.joinZNode(groupBasePath, groupName);
           zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
         }
       }
 
 
       for (RSGroupInfo RSGroupInfo : newGroupMap.values()) {
-        String znode = ZKUtil.joinZNode(groupBasePath, RSGroupInfo.getName());
+        String znode = ZNodePaths.joinZNode(groupBasePath, RSGroupInfo.getName());
         RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
         LOG.debug("Updating znode: "+znode);
         ZKUtil.createAndFailSilent(watcher, znode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index ea019bc..9077f15 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.Assert;
@@ -124,9 +125,9 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
     Assert.assertEquals(Sets.newHashSet(groupMap.values()),
         Sets.newHashSet(wrapped.listRSGroups()));
     try {
-      String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
+      String groupBasePath = ZNodePaths.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
       for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
-        byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
+        byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(groupBasePath, znode));
         if(data.length > 0) {
           ProtobufUtil.expectPBMagicPrefix(data);
           ByteArrayInputStream bis = new ByteArrayInputStream(

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
index 6e40295..c4fad44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -165,7 +166,7 @@ public class ZKNamespaceManager extends ZooKeeperListener {
   }
 
   private void deleteNamespace(String name) throws IOException {
-    String zNode = ZKUtil.joinZNode(nsZNode, name);
+    String zNode = ZNodePaths.joinZNode(nsZNode, name);
     try {
       ZKUtil.deleteNode(watcher, zNode);
     } catch (KeeperException e) {
@@ -180,7 +181,7 @@ public class ZKNamespaceManager extends ZooKeeperListener {
   }
 
   private void writeNamespace(NamespaceDescriptor ns) throws IOException {
-    String zNode = ZKUtil.joinZNode(nsZNode, ns.getName());
+    String zNode = ZNodePaths.joinZNode(nsZNode, ns.getName());
     try {
       ZKUtil.createWithParents(watcher, zNode);
       ZKUtil.updateExistingNodeData(watcher, zNode,

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
index 5b3278f..d7fdeb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -181,11 +182,12 @@ public class ZNodeClearer {
     String znodeFileContent;
     try {
       znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk();
-      if(ZNodeClearer.tablesOnMaster(conf)) {
-      //In case of master crash also remove rsZnode since master is also regionserver 
-        ZKUtil.deleteNodeFailSilent(zkw, ZKUtil.joinZNode(zkw.znodePaths.rsZNode,znodeFileContent));
-        return MasterAddressTracker.deleteIfEquals(zkw, 
-                                    ZNodeClearer.parseMasterServerName(znodeFileContent));
+      if (ZNodeClearer.tablesOnMaster(conf)) {
+        // In case of master crash also remove rsZnode since master is also regionserver
+        ZKUtil.deleteNodeFailSilent(zkw,
+          ZNodePaths.joinZNode(zkw.znodePaths.rsZNode, znodeFileContent));
+        return MasterAddressTracker.deleteIfEquals(zkw,
+          ZNodeClearer.parseMasterServerName(znodeFileContent));
       } else {
         return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
index 61d6a02..d261993 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
@@ -27,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -164,6 +165,6 @@ class HFileArchiveManager {
    * @return znode for the table's archive status
    */
   private String getTableNode(byte[] table) {
-    return ZKUtil.joinZNode(archiveZnode, Bytes.toString(table));
+    return ZNodePaths.joinZNode(archiveZnode, Bytes.toString(table));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
index 6c173cf..9a7903a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -149,7 +149,7 @@ public class ZKTableArchiveClient extends Configured {
    * @return get the znode for long-term archival of a table for
    */
   public static String getArchiveZNode(Configuration conf, ZooKeeperWatcher zooKeeper) {
-    return ZKUtil.joinZNode(zooKeeper.znodePaths.baseZNode, conf.get(
+    return ZNodePaths.joinZNode(zooKeeper.znodePaths.baseZNode, conf.get(
       ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 8073d64..c19c3a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -41,12 +41,12 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKMetadata;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.StringUtils;
@@ -58,6 +58,8 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+
 /**
  * ZooKeeper based implementation of
  * {@link SplitLogManagerCoordination}
@@ -355,7 +357,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
       setDone(path, FAILURE);
       return;
     }
-    data = RecoverableZooKeeper.removeMetaData(data);
+    data = ZKMetadata.removeMetaData(data);
     SplitLogTask slt = SplitLogTask.parseFrom(data);
     if (slt.isUnassigned()) {
       LOG.debug("task not yet acquired " + path + " ver = " + version);
@@ -478,7 +480,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
     int listSize = orphans.size();
     for (int i = 0; i < listSize; i++) {
       String path = orphans.get(i);
-      String nodepath = ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, path);
+      String nodepath = ZNodePaths.joinZNode(watcher.znodePaths.splitLogZNode, path);
       if (ZKSplitLog.isRescanNode(watcher, nodepath)) {
         rescan_nodes++;
         LOG.debug("found orphan rescan node " + path);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index e196248..ef87498 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
@@ -43,12 +42,14 @@ import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKMetadata;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.AsyncCallback;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
@@ -419,7 +420,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
         // don't call ZKSplitLog.getNodeName() because that will lead to
         // double encoding of the path name
         if (this.calculateAvailableSplitters(numTasks) > 0) {
-          grabTask(ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, paths.get(idx)));
+          grabTask(ZNodePaths.joinZNode(watcher.znodePaths.splitLogZNode, paths.get(idx)));
         } else {
           LOG.debug("Current region server " + server.getServerName() + " has "
               + this.tasksInProgress.get() + " tasks in progress and can't take more.");
@@ -524,7 +525,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
         getDataSetWatchFailure(path);
         return;
       }
-      data = RecoverableZooKeeper.removeMetaData(data);
+      data = ZKMetadata.removeMetaData(data);
       getDataSetWatchSuccess(path, data);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index 25e1ec8..f154347 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -155,7 +156,7 @@ public class ActiveMasterManager extends ZooKeeperListener {
    */
   boolean blockUntilBecomingActiveMaster(
       int checkInterval, MonitoredTask startupStatus) {
-    String backupZNode = ZKUtil.joinZNode(
+    String backupZNode = ZNodePaths.joinZNode(
       this.watcher.znodePaths.backupMasterAddressesZNode, this.sn.toString());
     while (!(master.isAborted() || master.isStopped())) {
       startupStatus.setStatus("Trying to register in ZK as active master");

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f18202e..91c5218 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -183,6 +183,7 @@ import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
 import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
@@ -1980,7 +1981,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   private void startActiveMasterManager(int infoPort) throws KeeperException {
-    String backupZNode = ZKUtil.joinZNode(
+    String backupZNode = ZNodePaths.joinZNode(
       zooKeeper.znodePaths.backupMasterAddressesZNode, serverName.toString());
     /*
     * Add a ZNode for ourselves in the backup master directory since we
@@ -2511,7 +2512,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         try {
           byte [] bytes;
           try {
-            bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
+            bytes = ZKUtil.getData(this.zooKeeper, ZNodePaths.joinZNode(
                 this.zooKeeper.znodePaths.backupMasterAddressesZNode, s));
           } catch (InterruptedException e) {
             throw new InterruptedIOException();
@@ -3429,7 +3430,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     String parentZnode = getZooKeeper().znodePaths.drainingZNode;
     for (ServerName server : servers) {
       try {
-        String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+        String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
         ZKUtil.createAndFailSilent(getZooKeeper(), node);
       } catch (KeeperException ke) {
         throw new HBaseIOException(
@@ -3476,7 +3477,7 @@ public class HMaster extends HRegionServer implements MasterServices {
       final List<byte[]> encodedRegionNames) throws HBaseIOException {
     // Remove the server from decommissioned (draining) server list.
     String parentZnode = getZooKeeper().znodePaths.drainingZNode;
-    String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+    String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
     try {
       ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
     } catch (KeeperException ke) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index 822ca6f..97d3080 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
 import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -139,8 +140,8 @@ public class ReplicationZKNodeCleaner {
      * @throws IOException
      */
     public void removeQueue(final String replicator, final String queueId) throws IOException {
-      String queueZnodePath = ZKUtil.joinZNode(ZKUtil.joinZNode(this.queuesZNode, replicator),
-        queueId);
+      String queueZnodePath =
+        ZNodePaths.joinZNode(ZNodePaths.joinZNode(this.queuesZNode, replicator), queueId);
       try {
         ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
         if (!replicationPeers.getAllPeerIds().contains(queueInfo.getPeerId())) {
@@ -159,7 +160,7 @@ public class ReplicationZKNodeCleaner {
      * @throws IOException
      */
     public void removeHFileRefsQueue(final String hfileRefsQueueId) throws IOException {
-      String node = ZKUtil.joinZNode(this.hfileRefsZNode, hfileRefsQueueId);
+      String node = ZNodePaths.joinZNode(this.hfileRefsZNode, hfileRefsQueueId);
       try {
         if (!replicationPeers.getAllPeerIds().contains(hfileRefsQueueId)) {
           ZKUtil.deleteNodeRecursively(this.zookeeper, node);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
index 8d34fe4..6985591 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
@@ -28,6 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -96,7 +97,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs {
       ZKUtil.createWithParents(zkProc.getWatcher(), acquire, data);
       // loop through all the children of the acquire phase and watch for them
       for (String node : nodeNames) {
-        String znode = ZKUtil.joinZNode(acquire, node);
+        String znode = ZNodePaths.joinZNode(acquire, node);
         LOG.debug("Watching for acquire node:" + znode);
         if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) {
           coordinator.memberAcquiredBarrier(procName, node);
@@ -119,7 +120,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs {
       ZKUtil.createWithParents(zkProc.getWatcher(), reachedNode);
       // loop through all the children of the acquire phase and watch for them
       for (String node : nodeNames) {
-        String znode = ZKUtil.joinZNode(reachedNode, node);
+        String znode = ZNodePaths.joinZNode(reachedNode, node);
         if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) {
           byte[] dataFromMember = ZKUtil.getData(zkProc.getWatcher(), znode);
           // ProtobufUtil.isPBMagicPrefix will check null

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
index f8db277..36f4f44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
@@ -136,7 +137,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
       // this is the list of the currently aborted procedues
       for (String node : ZKUtil.listChildrenAndWatchForNewChildren(zkController.getWatcher(),
         zkController.getAbortZnode())) {
-        String abortNode = ZKUtil.joinZNode(zkController.getAbortZnode(), node);
+        String abortNode = ZNodePaths.joinZNode(zkController.getAbortZnode(), node);
         abort(abortNode);
       }
     } catch (KeeperException e) {
@@ -166,7 +167,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
     }
     for (String procName : runningProcedures) {
       // then read in the procedure information
-      String path = ZKUtil.joinZNode(zkController.getAcquiredBarrier(), procName);
+      String path = ZNodePaths.joinZNode(zkController.getAcquiredBarrier(), procName);
       startNewSubprocedure(path);
     }
   }
@@ -238,7 +239,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
     try {
       LOG.debug("Member: '" + memberName + "' joining acquired barrier for procedure (" + procName
           + ") in zk");
-      String acquiredZNode = ZKUtil.joinZNode(ZKProcedureUtil.getAcquireBarrierNode(
+      String acquiredZNode = ZNodePaths.joinZNode(ZKProcedureUtil.getAcquireBarrierNode(
         zkController, procName), memberName);
       ZKUtil.createAndFailSilent(zkController.getWatcher(), acquiredZNode);
 
@@ -262,7 +263,8 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
     String procName = sub.getName();
     LOG.debug("Marking procedure  '" + procName + "' completed for member '" + memberName
         + "' in zk");
-    String joinPath = ZKUtil.joinZNode(zkController.getReachedBarrierNode(procName), memberName);
+    String joinPath =
+      ZNodePaths.joinZNode(zkController.getReachedBarrierNode(procName), memberName);
     // ProtobufUtil.prependPBMagic does not take care of null
     if (data == null) {
       data = new byte[0];

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
index a19ecb2..24693cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -78,10 +79,10 @@ public abstract class ZKProcedureUtil
     // make sure we are listening for events
     watcher.registerListener(this);
     // setup paths for the zknodes used in procedures
-    this.baseZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, procDescription);
-    acquiredZnode = ZKUtil.joinZNode(baseZNode, ACQUIRED_BARRIER_ZNODE_DEFAULT);
-    reachedZnode = ZKUtil.joinZNode(baseZNode, REACHED_BARRIER_ZNODE_DEFAULT);
-    abortZnode = ZKUtil.joinZNode(baseZNode, ABORT_ZNODE_DEFAULT);
+    this.baseZNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, procDescription);
+    acquiredZnode = ZNodePaths.joinZNode(baseZNode, ACQUIRED_BARRIER_ZNODE_DEFAULT);
+    reachedZnode = ZNodePaths.joinZNode(baseZNode, REACHED_BARRIER_ZNODE_DEFAULT);
+    abortZnode = ZNodePaths.joinZNode(baseZNode, ABORT_ZNODE_DEFAULT);
 
     // first make sure all the ZK nodes exist
     // make sure all the parents exist (sometimes not the case in tests)
@@ -130,7 +131,7 @@ public abstract class ZKProcedureUtil
    */
   public static String getAcquireBarrierNode(ZKProcedureUtil controller,
       String opInstanceName) {
-    return ZKUtil.joinZNode(controller.acquiredZnode, opInstanceName);
+    return ZNodePaths.joinZNode(controller.acquiredZnode, opInstanceName);
   }
 
   /**
@@ -142,7 +143,7 @@ public abstract class ZKProcedureUtil
    */
   public static String getReachedBarrierNode(ZKProcedureUtil controller,
       String opInstanceName) {
-    return ZKUtil.joinZNode(controller.reachedZnode, opInstanceName);
+    return ZNodePaths.joinZNode(controller.reachedZnode, opInstanceName);
   }
 
   /**
@@ -153,7 +154,7 @@ public abstract class ZKProcedureUtil
    * @return full znode path to the abort znode
    */
   public static String getAbortNode(ZKProcedureUtil controller, String opInstanceName) {
-    return ZKUtil.joinZNode(controller.abortZnode, opInstanceName);
+    return ZNodePaths.joinZNode(controller.abortZnode, opInstanceName);
   }
 
   public ZooKeeperWatcher getWatcher() {
@@ -212,7 +213,7 @@ public abstract class ZKProcedureUtil
   private boolean isMemberNode(final String path, final String statePath) {
     int count = 0;
     for (int i = statePath.length(); i < path.length(); ++i) {
-      count += (path.charAt(i) == ZKUtil.ZNODE_PATH_SEPARATOR) ? 1 : 0;
+      count += (path.charAt(i) == ZNodePaths.ZNODE_PATH_SEPARATOR) ? 1 : 0;
     }
     return count == 2;
   }
@@ -261,7 +262,7 @@ public abstract class ZKProcedureUtil
     if (children == null) return;
     for (String child : children) {
       LOG.debug(prefix + child);
-      String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child);
+      String node = ZNodePaths.joinZNode(root.equals("/") ? "" : root, child);
       logZKTree(node, prefix + "---");
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 48189be..6ad595f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -162,6 +162,7 @@ import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.ipc.RemoteException;
@@ -3523,7 +3524,7 @@ public class HRegionServer extends HasThread implements
   }
 
   private String getMyEphemeralNodePath() {
-    return ZKUtil.joinZNode(this.zooKeeper.znodePaths.rsZNode, getServerName().toString());
+    return ZNodePaths.joinZNode(this.zooKeeper.znodePaths.rsZNode, getServerName().toString());
   }
 
   private boolean isHealthCheckerConfigured() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index 09a1771..447ba51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.DaemonThreadFactory;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -67,7 +68,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
     super(watcher);
     this.authManager = authManager;
     String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
-    this.aclZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, aclZnodeParent);
+    this.aclZNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, aclZnodeParent);
     executor = Executors.newSingleThreadExecutor(
       new DaemonThreadFactory("zk-permission-watcher"));
   }
@@ -260,8 +261,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
    */
   public void writeToZookeeper(byte[] entry, byte[] permsData) {
     String entryName = Bytes.toString(entry);
-    String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
-    zkNode = ZKUtil.joinZNode(zkNode, entryName);
+    String zkNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
+    zkNode = ZNodePaths.joinZNode(zkNode, entryName);
 
     try {
       ZKUtil.createWithParents(watcher, zkNode);
@@ -278,8 +279,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
    * @param tableName
    */
   public void deleteTableACLNode(final TableName tableName) {
-    String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
-    zkNode = ZKUtil.joinZNode(zkNode, tableName.getNameAsString());
+    String zkNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
+    zkNode = ZNodePaths.joinZNode(zkNode, tableName.getNameAsString());
 
     try {
       ZKUtil.deleteNode(watcher, zkNode);
@@ -295,8 +296,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
    * Delete the acl notify node of namespace
    */
   public void deleteNamespaceACLNode(final String namespace) {
-    String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
-    zkNode = ZKUtil.joinZNode(zkNode, AccessControlLists.NAMESPACE_PREFIX + namespace);
+    String zkNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE);
+    zkNode = ZNodePaths.joinZNode(zkNode, AccessControlLists.NAMESPACE_PREFIX + namespace);
 
     try {
       ZKUtil.deleteNode(watcher, zkNode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
index 718e8e0..27ea509 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.SecretManager;
@@ -301,7 +301,7 @@ public class AuthenticationTokenSecretManager
       setDaemon(true);
       setName("ZKSecretWatcher-leaderElector");
       zkLeader = new ZKLeaderManager(watcher,
-          ZKUtil.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"),
+          ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"),
           Bytes.toBytes(serverName), this);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/30f55f23/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java
index 42dc3a9..9f0da78 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java
@@ -29,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -52,8 +53,8 @@ public class ZKSecretWatcher extends ZooKeeperListener {
     super(watcher);
     this.secretManager = secretManager;
     String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE);
-    this.baseKeyZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, keyZNodeParent);
-    this.keysParentZNode = ZKUtil.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT);
+    this.baseKeyZNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, keyZNodeParent);
+    this.keysParentZNode = ZNodePaths.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT);
   }
 
   public void start() throws KeeperException {
@@ -159,7 +160,7 @@ public class ZKSecretWatcher extends ZooKeeperListener {
   }
 
   private String getKeyNode(int keyId) {
-    return ZKUtil.joinZNode(keysParentZNode, Integer.toString(keyId));
+    return ZNodePaths.joinZNode(keysParentZNode, Integer.toString(keyId));
   }
 
   public void removeKeyFromZK(AuthenticationKey key) {


Mime
View raw message