hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sunc...@apache.org
Subject [12/50] [abbrv] hadoop git commit: HADOOP-15821. Move YARN Registry to Hadoop Registry. Contributed by Íñigo Goiri
Date Wed, 24 Oct 2018 05:52:32 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
new file mode 100644
index 0000000..75b6fb2
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.secure;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.registry.RegistryTestHelper;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
+import org.apache.hadoop.registry.server.services.AddingCompositeService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
+import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.security.Principal;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * Add kerberos tests. This is based on the (JUnit3) KerberosSecurityTestcase
+ * and its test case, <code>TestMiniKdc</code>
+ */
+public class AbstractSecureRegistryTest extends RegistryTestHelper {
+  public static final String REALM = "EXAMPLE.COM";
+  public static final String ZOOKEEPER = "zookeeper";
+  public static final String ZOOKEEPER_LOCALHOST = "zookeeper/localhost";
+  public static final String ZOOKEEPER_1270001 = "zookeeper/127.0.0.1";
+  public static final String ZOOKEEPER_REALM = "zookeeper@" + REALM;
+  public static final String ZOOKEEPER_CLIENT_CONTEXT = ZOOKEEPER;
+  public static final String ZOOKEEPER_SERVER_CONTEXT = "ZOOKEEPER_SERVER";
+  ;
+  public static final String ZOOKEEPER_LOCALHOST_REALM =
+      ZOOKEEPER_LOCALHOST + "@" + REALM;
+  public static final String ALICE = "alice";
+  public static final String ALICE_CLIENT_CONTEXT = "alice";
+  public static final String ALICE_LOCALHOST = "alice/localhost";
+  public static final String BOB = "bob";
+  public static final String BOB_CLIENT_CONTEXT = "bob";
+  public static final String BOB_LOCALHOST = "bob/localhost";
+
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AbstractSecureRegistryTest.class);
+
+  public static final Configuration CONF;
+
+  static {
+    CONF = new Configuration();
+    CONF.set("hadoop.security.authentication", "kerberos");
+    CONF.setBoolean("hadoop.security.authorization", true);
+  }
+
+  private static final AddingCompositeService classTeardown =
+      new AddingCompositeService("classTeardown");
+
+  // static initializer guarantees it is always started
+  // ahead of any @BeforeClass methods
+  static {
+    classTeardown.init(CONF);
+    classTeardown.start();
+  }
+
+  public static final String SUN_SECURITY_KRB5_DEBUG =
+      "sun.security.krb5.debug";
+
+  private final AddingCompositeService teardown =
+      new AddingCompositeService("teardown");
+
+  protected static MiniKdc kdc;
+  protected static File keytab_zk;
+  protected static File keytab_bob;
+  protected static File keytab_alice;
+  protected static File kdcWorkDir;
+  protected static Properties kdcConf;
+  protected static RegistrySecurity registrySecurity;
+
+  @Rule
+  public final Timeout testTimeout = new Timeout(900000);
+
+  @Rule
+  public TestName methodName = new TestName();
+  protected MicroZookeeperService secureZK;
+  protected static File jaasFile;
+  private LoginContext zookeeperLogin;
+  private static String zkServerPrincipal;
+
+  /**
+   * All class initialization for this test class
+   * @throws Exception
+   */
+  @BeforeClass
+  public static void beforeSecureRegistryTestClass() throws Exception {
+    registrySecurity = new RegistrySecurity("registrySecurity");
+    registrySecurity.init(CONF);
+    setupKDCAndPrincipals();
+    RegistrySecurity.clearJaasSystemProperties();
+    RegistrySecurity.bindJVMtoJAASFile(jaasFile);
+    initHadoopSecurity();
+  }
+
+  @AfterClass
+  public static void afterSecureRegistryTestClass() throws
+      Exception {
+    describe(LOG, "teardown of class");
+    classTeardown.close();
+    teardownKDC();
+  }
+
+  /**
+   * give our thread a name
+   */
+  @Before
+  public void nameThread() {
+    Thread.currentThread().setName("JUnit");
+  }
+
+  /**
+   * For unknown reasons, the before-class setting of the JVM properties were
+   * not being picked up. This method addresses that by setting them
+   * before every test case
+   */
+  @Before
+  public void beforeSecureRegistryTest() {
+
+  }
+
+  @After
+  public void afterSecureRegistryTest() throws IOException {
+    describe(LOG, "teardown of instance");
+    teardown.close();
+    stopSecureZK();
+  }
+
+  protected static void addToClassTeardown(Service svc) {
+    classTeardown.addService(svc);
+  }
+
+  protected void addToTeardown(Service svc) {
+    teardown.addService(svc);
+  }
+
+
+  public static void teardownKDC() throws Exception {
+    if (kdc != null) {
+      kdc.stop();
+      kdc = null;
+    }
+  }
+
+  /**
+   * Sets up the KDC and a set of principals in the JAAS file
+   *
+   * @throws Exception
+   */
+  public static void setupKDCAndPrincipals() throws Exception {
+    // set up the KDC
+    File target = new File(System.getProperty("test.dir", "target"));
+    kdcWorkDir = new File(target, "kdc");
+    kdcWorkDir.mkdirs();
+    if (!kdcWorkDir.mkdirs()) {
+      assertTrue(kdcWorkDir.isDirectory());
+    }
+    kdcConf = MiniKdc.createConf();
+    kdcConf.setProperty(MiniKdc.DEBUG, "true");
+    kdc = new MiniKdc(kdcConf, kdcWorkDir);
+    kdc.start();
+
+    keytab_zk = createKeytab(ZOOKEEPER, "zookeeper.keytab");
+    keytab_alice = createKeytab(ALICE, "alice.keytab");
+    keytab_bob = createKeytab(BOB, "bob.keytab");
+    zkServerPrincipal = Shell.WINDOWS ? ZOOKEEPER_1270001 : ZOOKEEPER_LOCALHOST;
+
+    StringBuilder jaas = new StringBuilder(1024);
+    jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_CLIENT_CONTEXT,
+        ZOOKEEPER, keytab_zk));
+    jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_SERVER_CONTEXT,
+        zkServerPrincipal, keytab_zk));
+    jaas.append(registrySecurity.createJAASEntry(ALICE_CLIENT_CONTEXT,
+        ALICE_LOCALHOST , keytab_alice));
+    jaas.append(registrySecurity.createJAASEntry(BOB_CLIENT_CONTEXT,
+        BOB_LOCALHOST, keytab_bob));
+
+    jaasFile = new File(kdcWorkDir, "jaas.txt");
+    FileUtils.write(jaasFile, jaas.toString(), Charset.defaultCharset());
+    LOG.info("\n"+ jaas);
+    RegistrySecurity.bindJVMtoJAASFile(jaasFile);
+  }
+
+
+  //
+  protected static final String kerberosRule =
+      "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT";
+
+  /**
+   * Init hadoop security by setting up the UGI config
+   */
+  public static void initHadoopSecurity() {
+
+    UserGroupInformation.setConfiguration(CONF);
+
+    KerberosName.setRules(kerberosRule);
+  }
+
+  /**
+   * Stop the secure ZK and log out the ZK account
+   */
+  public synchronized void stopSecureZK() {
+    ServiceOperations.stop(secureZK);
+    secureZK = null;
+    logout(zookeeperLogin);
+    zookeeperLogin = null;
+  }
+
+
+  public static MiniKdc getKdc() {
+    return kdc;
+  }
+
+  public static File getKdcWorkDir() {
+    return kdcWorkDir;
+  }
+
+  public static Properties getKdcConf() {
+    return kdcConf;
+  }
+
+  /**
+   * Create a secure instance
+   * @param name instance name
+   * @return the instance
+   * @throws Exception
+   */
+  protected static MicroZookeeperService createSecureZKInstance(String name)
+      throws Exception {
+    String context = ZOOKEEPER_SERVER_CONTEXT;
+    Configuration conf = new Configuration();
+
+    File testdir = new File(System.getProperty("test.dir", "target"));
+    File workDir = new File(testdir, name);
+    if (!workDir.mkdirs()) {
+      assertTrue(workDir.isDirectory());
+    }
+    System.setProperty(
+        ZookeeperConfigOptions.PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE,
+        "false");
+    RegistrySecurity.validateContext(context);
+    conf.set(MicroZookeeperServiceKeys.KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT,
+        context);
+    MicroZookeeperService secureZK = new MicroZookeeperService(name);
+    secureZK.init(conf);
+    LOG.info(secureZK.getDiagnostics());
+    return secureZK;
+  }
+
+  /**
+   * Create the keytabl for the given principal, includes
+   * raw principal and $principal/localhost
+   * @param principal principal short name
+   * @param filename filename of keytab
+   * @return file of keytab
+   * @throws Exception
+   */
+  public static File createKeytab(String principal,
+      String filename) throws Exception {
+    assertNotEmpty("empty principal", principal);
+    assertNotEmpty("empty host", filename);
+    assertNotNull("Null KDC", kdc);
+    File keytab = new File(kdcWorkDir, filename);
+    kdc.createPrincipal(keytab,
+        principal,
+        principal + "/localhost",
+        principal + "/127.0.0.1");
+    return keytab;
+  }
+
+  public static String getPrincipalAndRealm(String principal) {
+    return principal + "@" + getRealm();
+  }
+
+  protected static String getRealm() {
+    return kdc.getRealm();
+  }
+
+
+  /**
+   * Log in, defaulting to the client context
+   * @param principal principal
+   * @param context context
+   * @param keytab keytab
+   * @return the logged in context
+   * @throws LoginException failure to log in
+   * @throws FileNotFoundException no keytab
+   */
+  protected LoginContext login(String principal,
+      String context, File keytab) throws LoginException,
+      FileNotFoundException {
+    LOG.info("Logging in as {} in context {} with keytab {}",
+        principal, context, keytab);
+    if (!keytab.exists()) {
+      throw new FileNotFoundException(keytab.getAbsolutePath());
+    }
+    Set<Principal> principals = new HashSet<Principal>();
+    principals.add(new KerberosPrincipal(principal));
+    Subject subject = new Subject(false, principals, new HashSet<Object>(),
+        new HashSet<Object>());
+    LoginContext login;
+    login = new LoginContext(context, subject, null,
+        KerberosConfiguration.createClientConfig(principal, keytab));
+    login.login();
+    return login;
+  }
+
+
+  /**
+   * Start the secure ZK instance using the test method name as the path.
+   * As the entry is saved to the {@link #secureZK} field, it
+   * is automatically stopped after the test case.
+   * @throws Exception on any failure
+   */
+  protected synchronized void startSecureZK() throws Exception {
+    assertNull("Zookeeper is already running", secureZK);
+
+    zookeeperLogin = login(zkServerPrincipal,
+        ZOOKEEPER_SERVER_CONTEXT,
+        keytab_zk);
+    secureZK = createSecureZKInstance("test-" + methodName.getMethodName());
+    secureZK.start();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/KerberosConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/KerberosConfiguration.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/KerberosConfiguration.java
new file mode 100644
index 0000000..01f13a3
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/KerberosConfiguration.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.secure;
+
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
+class KerberosConfiguration extends javax.security.auth.login.Configuration {
+  private String principal;
+  private String keytab;
+  private boolean isInitiator;
+
+  KerberosConfiguration(String principal, File keytab,
+      boolean client) {
+    this.principal = principal;
+    this.keytab = keytab.getAbsolutePath();
+    this.isInitiator = client;
+  }
+
+  public static javax.security.auth.login.Configuration createClientConfig(
+      String principal,
+      File keytab) {
+    return new KerberosConfiguration(principal, keytab, true);
+  }
+
+  public static javax.security.auth.login.Configuration createServerConfig(
+      String principal,
+      File keytab) {
+    return new KerberosConfiguration(principal, keytab, false);
+  }
+
+  @Override
+  public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+    Map<String, String> options = new HashMap<String, String>();
+    if (IBM_JAVA) {
+      options.put("useKeytab", keytab.startsWith("file://")
+          ? keytab
+          : "file://" + keytab);
+      options.put("principal", principal);
+      options.put("refreshKrb5Config", "true");
+      options.put("credsType", "both");
+    } else {
+      options.put("keyTab", keytab);
+      options.put("principal", principal);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("doNotPrompt", "true");
+      options.put("useTicketCache", "true");
+      options.put("renewTGT", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("isInitiator", Boolean.toString(isInitiator));
+    }
+    String ticketCache = System.getenv("KRB5CCNAME");
+    if (ticketCache != null) {
+      if (IBM_JAVA) {
+        // IBM JAVA only respect system property and not env variable
+        // The first value searched when "useDefaultCcache" is used.
+        System.setProperty("KRB5CCNAME", ticketCache);
+        options.put("useDefaultCcache", "true");
+        options.put("renewTGT", "true");
+      } else {
+        options.put("ticketCache", ticketCache);
+      }
+    }
+    options.put("debug", "true");
+
+    return new AppConfigurationEntry[]{
+        new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+            AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+            options)
+    };
+  }
+
+  @Override
+  public String toString() {
+    return "KerberosConfiguration with principal " + principal;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
new file mode 100644
index 0000000..8d0dc6a
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.secure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+
+/**
+ * Test for registry security operations
+ */
+public class TestRegistrySecurityHelper extends Assert {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRegistrySecurityHelper.class);
+
+  public static final String YARN_EXAMPLE_COM = "yarn@example.com";
+  public static final String SASL_YARN_EXAMPLE_COM =
+      "sasl:" + YARN_EXAMPLE_COM;
+  public static final String MAPRED_EXAMPLE_COM = "mapred@example.com";
+  public static final String SASL_MAPRED_EXAMPLE_COM =
+      "sasl:" + MAPRED_EXAMPLE_COM;
+  public static final String SASL_MAPRED_APACHE = "sasl:mapred@APACHE";
+  public static final String DIGEST_F0AF = "digest:f0afbeeb00baa";
+  public static final String SASL_YARN_SHORT = "sasl:yarn@";
+  public static final String SASL_MAPRED_SHORT = "sasl:mapred@";
+  public static final String REALM_EXAMPLE_COM = "example.com";
+  private static RegistrySecurity registrySecurity;
+
+  @BeforeClass
+  public static void setupTestRegistrySecurityHelper() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setBoolean(KEY_REGISTRY_SECURE, true);
+    conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
+    registrySecurity = new RegistrySecurity("");
+    // init the ACLs OUTSIDE A KERBEROS CLUSTER
+    registrySecurity.init(conf);
+  }
+
+  @Test
+  public void testACLSplitRealmed() throws Throwable {
+    List<String> pairs =
+        registrySecurity.splitAclPairs(
+            SASL_YARN_EXAMPLE_COM +
+            ", " +
+            SASL_MAPRED_EXAMPLE_COM,
+            "");
+
+    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
+    assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
+  }
+
+
+  @Test
+  public void testBuildAclsRealmed() throws Throwable {
+    List<ACL> acls = registrySecurity.buildACLs(
+        SASL_YARN_EXAMPLE_COM +
+        ", " +
+        SASL_MAPRED_EXAMPLE_COM,
+        "",
+        ZooDefs.Perms.ALL);
+    assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
+    assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
+  }
+
+  @Test
+  public void testACLDefaultRealm() throws Throwable {
+    List<String> pairs =
+        registrySecurity.splitAclPairs(
+            SASL_YARN_SHORT +
+            ", " +
+            SASL_MAPRED_SHORT,
+            REALM_EXAMPLE_COM);
+
+    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
+    assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
+  }
+
+  @Test
+  public void testBuildAclsDefaultRealm() throws Throwable {
+    List<ACL> acls = registrySecurity.buildACLs(
+        SASL_YARN_SHORT +
+        ", " +
+        SASL_MAPRED_SHORT,
+        REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
+
+    assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
+    assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
+  }
+
+  @Test
+  public void testACLSplitNullRealm() throws Throwable {
+    List<String> pairs =
+        registrySecurity.splitAclPairs(
+            SASL_YARN_SHORT +
+            ", " +
+            SASL_MAPRED_SHORT,
+            "");
+
+    assertEquals(SASL_YARN_SHORT, pairs.get(0));
+    assertEquals(SASL_MAPRED_SHORT, pairs.get(1));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBuildAclsNullRealm() throws Throwable {
+    registrySecurity.buildACLs(
+        SASL_YARN_SHORT +
+        ", " +
+        SASL_MAPRED_SHORT,
+        "", ZooDefs.Perms.ALL);
+    fail("");
+
+  }
+
+  @Test
+  public void testACLDefaultRealmOnlySASL() throws Throwable {
+    List<String> pairs =
+        registrySecurity.splitAclPairs(
+            SASL_YARN_SHORT +
+            ", " +
+            DIGEST_F0AF,
+            REALM_EXAMPLE_COM);
+
+    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
+    assertEquals(DIGEST_F0AF, pairs.get(1));
+  }
+
+  @Test
+  public void testACLSplitMixed() throws Throwable {
+    List<String> pairs =
+        registrySecurity.splitAclPairs(
+            SASL_YARN_SHORT +
+            ", " +
+            SASL_MAPRED_APACHE +
+            ", ,," +
+            DIGEST_F0AF,
+            REALM_EXAMPLE_COM);
+
+    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
+    assertEquals(SASL_MAPRED_APACHE, pairs.get(1));
+    assertEquals(DIGEST_F0AF, pairs.get(2));
+  }
+
+  @Test
+  public void testDefaultAClsValid() throws Throwable {
+    registrySecurity.buildACLs(
+        RegistryConstants.DEFAULT_REGISTRY_SYSTEM_ACCOUNTS,
+        REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
+  }
+
+  @Test
+  public void testDefaultRealm() throws Throwable {
+    String realm = RegistrySecurity.getDefaultRealmInJVM();
+    LOG.info("Realm {}", realm);
+  }
+
+  @Test
+  public void testUGIProperties() throws Throwable {
+    UserGroupInformation user = UserGroupInformation.getCurrentUser();
+    ACL acl = registrySecurity.createACLForUser(user, ZooDefs.Perms.ALL);
+    assertFalse(RegistrySecurity.ALL_READWRITE_ACCESS.equals(acl));
+    LOG.info("User {} has ACL {}", user, acl);
+  }
+
+
+  @Test
+  public void testSecurityImpliesKerberos() throws Throwable {
+    Configuration conf = new Configuration();
+    conf.setBoolean("hadoop.security.authentication", true);
+    conf.setBoolean(KEY_REGISTRY_SECURE, true);
+    conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
+    RegistrySecurity security = new RegistrySecurity("registry security");
+    try {
+      security.init(conf);
+    } catch (Exception e) {
+      assertTrue(
+          "did not find "+ RegistrySecurity.E_NO_KERBEROS + " in " + e,
+          e.toString().contains(RegistrySecurity.E_NO_KERBEROS));
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
new file mode 100644
index 0000000..ee15c0a
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.secure;
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.nio.charset.Charset;
+import java.security.Principal;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import javax.security.auth.Subject;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+
+import org.apache.zookeeper.Environment;
+import org.apache.zookeeper.data.ACL;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.security.HadoopKerberosName;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Verify that logins work
+ */
+public class TestSecureLogins extends AbstractSecureRegistryTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSecureLogins.class);
+
+  @Test
+  public void testHasRealm() throws Throwable {
+    assertNotNull(getRealm());
+    LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST));
+  }
+
+  @Test
+  public void testJaasFileSetup() throws Throwable {
+    // the JVM has seemed inconsistent on setting up here
+    assertNotNull("jaasFile", jaasFile);
+    String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
+    assertEquals(jaasFile.getAbsolutePath(), confFilename);
+  }
+
+  @Test
+  public void testJaasFileBinding() throws Throwable {
+    // the JVM has seemed inconsistent on setting up here
+    assertNotNull("jaasFile", jaasFile);
+    RegistrySecurity.bindJVMtoJAASFile(jaasFile);
+    String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
+    assertEquals(jaasFile.getAbsolutePath(), confFilename);
+  }
+
+  @Test
+  public void testClientLogin() throws Throwable {
+    LoginContext client = login(ALICE_LOCALHOST,
+                                ALICE_CLIENT_CONTEXT,
+                                keytab_alice);
+
+    try {
+      logLoginDetails(ALICE_LOCALHOST, client);
+      String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
+      assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename);
+      String config = FileUtils.readFileToString(new File(confFilename),
+          Charset.defaultCharset());
+      LOG.info("{}=\n{}", confFilename, config);
+      RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT);
+    } finally {
+      client.logout();
+    }
+  }
+
+  @Test
+  public void testZKServerContextLogin() throws Throwable {
+    LoginContext client = login(ZOOKEEPER_LOCALHOST,
+                                ZOOKEEPER_SERVER_CONTEXT,
+                                keytab_zk);
+    logLoginDetails(ZOOKEEPER_LOCALHOST, client);
+
+    client.logout();
+  }
+
+  @Test
+  public void testServerLogin() throws Throwable {
+    LoginContext loginContext = createLoginContextZookeeperLocalhost();
+    loginContext.login();
+    loginContext.logout();
+  }
+
+  public LoginContext createLoginContextZookeeperLocalhost() throws
+      LoginException {
+    String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST);
+    Set<Principal> principals = new HashSet<Principal>();
+    principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST));
+    Subject subject = new Subject(false, principals, new HashSet<Object>(),
+        new HashSet<Object>());
+    return new LoginContext("", subject, null,
+        KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, keytab_zk));
+  }
+
+  @Test
+  public void testKerberosAuth() throws Throwable {
+    File krb5conf = getKdc().getKrb5conf();
+    String krbConfig = FileUtils.readFileToString(krb5conf,
+        Charset.defaultCharset());
+    LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig);
+    Subject subject = new Subject();
+    Class<?> kerb5LoginClass =
+        Class.forName(KerberosUtil.getKrb5LoginModuleName());
+    Constructor<?> kerb5LoginConstr = kerb5LoginClass.getConstructor();
+    Object kerb5LoginObject = kerb5LoginConstr.newInstance();
+    final Map<String, String> options = new HashMap<String, String>();
+    options.put("debug", "true");
+    if (IBM_JAVA) {
+      options.put("useKeytab",
+          keytab_alice.getAbsolutePath().startsWith("file://")
+            ? keytab_alice.getAbsolutePath()
+            : "file://" +  keytab_alice.getAbsolutePath());
+      options.put("principal", ALICE_LOCALHOST);
+      options.put("refreshKrb5Config", "true");
+      options.put("credsType", "both");
+      String ticketCache = System.getenv("KRB5CCNAME");
+      if (ticketCache != null) {
+        // IBM JAVA only respect system property and not env variable
+        // The first value searched when "useDefaultCcache" is used.
+        System.setProperty("KRB5CCNAME", ticketCache);
+        options.put("useDefaultCcache", "true");
+        options.put("renewTGT", "true");
+      }
+    } else {
+      options.put("keyTab", keytab_alice.getAbsolutePath());
+      options.put("principal", ALICE_LOCALHOST);
+      options.put("doNotPrompt", "true");
+      options.put("isInitiator", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("renewTGT", "true");
+      options.put("storeKey", "true");
+      options.put("useKeyTab", "true");
+      options.put("useTicketCache", "true");
+    }
+    Method methodInitialize =
+        kerb5LoginObject.getClass().getMethod("initialize", Subject.class,
+          CallbackHandler.class, Map.class, Map.class);
+    methodInitialize.invoke(kerb5LoginObject, subject, null,
+        new HashMap<String, String>(), options);
+    Method methodLogin = kerb5LoginObject.getClass().getMethod("login");
+    boolean loginOk = (Boolean) methodLogin.invoke(kerb5LoginObject);
+    assertTrue("Failed to login", loginOk);
+    Method methodCommit = kerb5LoginObject.getClass().getMethod("commit");
+    boolean commitOk = (Boolean) methodCommit.invoke(kerb5LoginObject);
+    assertTrue("Failed to Commit", commitOk);
+  }
+
+  @Test
+  public void testDefaultRealmValid() throws Throwable {
+    String defaultRealm = KerberosUtil.getDefaultRealm();
+    assertNotEmpty("No default Kerberos Realm",
+        defaultRealm);
+    LOG.info("Default Realm '{}'", defaultRealm);
+  }
+
+  @Test
+  public void testKerberosRulesValid() throws Throwable {
+    assertTrue("!KerberosName.hasRulesBeenSet()",
+        KerberosName.hasRulesBeenSet());
+    String rules = KerberosName.getRules();
+    assertEquals(kerberosRule, rules);
+    LOG.info(rules);
+  }
+
+  @Test
+  public void testValidKerberosName() throws Throwable {
+
+    new HadoopKerberosName(ZOOKEEPER).getShortName();
+    new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
+    new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
+    // standard rules don't pick this up
+    // new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
+  }
+
+  @Test
+  public void testUGILogin() throws Throwable {
+
+    UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
+    RegistrySecurity.UgiInfo ugiInfo =
+        new RegistrySecurity.UgiInfo(ugi);
+    LOG.info("logged in as: {}", ugiInfo);
+    assertTrue("security is not enabled: " + ugiInfo,
+        UserGroupInformation.isSecurityEnabled());
+    assertTrue("login is keytab based: " + ugiInfo,
+        ugi.isFromKeytab());
+
+    // now we are here, build a SASL ACL
+    ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
+      @Override
+      public ACL run() throws Exception {
+        return registrySecurity.createSaslACLFromCurrentUser(0);
+      }
+    });
+    assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
+    assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
+    registrySecurity.addSystemACL(acl);
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
new file mode 100644
index 0000000..9d5848e
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.secure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
+import org.apache.hadoop.registry.client.impl.zk.CuratorService;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Login;
+import org.apache.zookeeper.server.ZooKeeperSaslServer;
+import org.apache.zookeeper.server.auth.SaslServerCallbackHandler;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.LoginContext;
+
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+import static org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_CONTEXT;
+import static org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_USERNAME;
+
+/**
+ * Verify that the Mini ZK service can be started up securely
+ */
+public class TestSecureRegistry extends AbstractSecureRegistryTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSecureRegistry.class);
+
+  @Before
+  public void beforeTestSecureZKService() throws Throwable {
+      enableKerberosDebugging();
+  }
+
+  @After
+  public void afterTestSecureZKService() throws Throwable {
+    disableKerberosDebugging();
+    RegistrySecurity.clearZKSaslClientProperties();
+  }
+
+  /**
+  * this is a cut and paste of some of the ZK internal code that was
+   * failing on windows and swallowing its exceptions
+   */
+  @Test
+  public void testLowlevelZKSaslLogin() throws Throwable {
+    RegistrySecurity.bindZKToServerJAASContext(ZOOKEEPER_SERVER_CONTEXT);
+    String serverSection =
+        System.getProperty(ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
+            ZooKeeperSaslServer.DEFAULT_LOGIN_CONTEXT_NAME);
+    assertEquals(ZOOKEEPER_SERVER_CONTEXT, serverSection);
+
+    AppConfigurationEntry entries[];
+    entries = javax.security.auth.login.Configuration.getConfiguration()
+                                                     .getAppConfigurationEntry(
+                                                         serverSection);
+
+    assertNotNull("null entries", entries);
+
+    SaslServerCallbackHandler saslServerCallbackHandler =
+        new SaslServerCallbackHandler(
+            javax.security.auth.login.Configuration.getConfiguration());
+    Login login = new Login(serverSection, saslServerCallbackHandler);
+    try {
+      login.startThreadIfNeeded();
+    } finally {
+      login.shutdown();
+    }
+  }
+
+  @Test
+  public void testCreateSecureZK() throws Throwable {
+    startSecureZK();
+    secureZK.stop();
+  }
+
+  @Test
+  public void testInsecureClientToZK() throws Throwable {
+    startSecureZK();
+    userZookeeperToCreateRoot();
+    RegistrySecurity.clearZKSaslClientProperties();
+
+    CuratorService curatorService =
+        startCuratorServiceInstance("insecure client", false);
+
+    curatorService.zkList("/");
+    curatorService.zkMkPath("", CreateMode.PERSISTENT, false,
+        RegistrySecurity.WorldReadWriteACL);
+  }
+
+  /**
+   * test that ZK can write as itself
+   * @throws Throwable
+   */
+  @Test
+  public void testZookeeperCanWrite() throws Throwable {
+
+    System.setProperty("curator-log-events", "true");
+    startSecureZK();
+    CuratorService curator = null;
+    LoginContext login = login(ZOOKEEPER_LOCALHOST,
+        ZOOKEEPER_CLIENT_CONTEXT,
+        keytab_zk);
+    try {
+      logLoginDetails(ZOOKEEPER, login);
+      RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+                                                ZOOKEEPER_CLIENT_CONTEXT);
+      curator = startCuratorServiceInstance("ZK", true);
+      LOG.info(curator.toString());
+
+      addToTeardown(curator);
+      curator.zkMkPath("/", CreateMode.PERSISTENT, false,
+          RegistrySecurity.WorldReadWriteACL);
+      curator.zkList("/");
+      curator.zkMkPath("/zookeeper", CreateMode.PERSISTENT, false,
+          RegistrySecurity.WorldReadWriteACL);
+    } finally {
+      logout(login);
+      ServiceOperations.stop(curator);
+    }
+  }
+
+  @Test
+  public void testSystemPropertyOverwrite() {
+    System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, "");
+    System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, "");
+    RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+        ZOOKEEPER_CLIENT_CONTEXT);
+    assertEquals(ZOOKEEPER, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+    assertEquals(ZOOKEEPER_CLIENT_CONTEXT,
+        System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+
+    String userName = "user1";
+    String context = "context1";
+    System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, userName);
+    System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+    RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+        ZOOKEEPER_CLIENT_CONTEXT);
+    assertEquals(userName, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+    assertEquals(context, System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+  }
+
+  /**
+   * Start a curator service instance
+   * @param name name
+   * @param secure flag to indicate the cluster is secure
+   * @return an inited and started curator service
+   */
+  protected CuratorService startCuratorServiceInstance(String name,
+      boolean secure) {
+    Configuration clientConf = new Configuration();
+    clientConf.set(KEY_REGISTRY_ZK_ROOT, "/");
+    clientConf.setBoolean(KEY_REGISTRY_SECURE, secure);
+    describe(LOG, "Starting Curator service");
+    CuratorService curatorService = new CuratorService(name, secureZK);
+    curatorService.init(clientConf);
+    curatorService.start();
+    LOG.info("Curator Binding {}",
+        curatorService.bindingDiagnosticDetails());
+    return curatorService;
+  }
+
+  /**
+   * have the ZK user create the root dir.
+   * This logs out the ZK user after and stops its curator instance,
+   * to avoid contamination
+   * @throws Throwable
+   */
+  public void userZookeeperToCreateRoot() throws Throwable {
+
+    System.setProperty("curator-log-events", "true");
+    CuratorService curator = null;
+    LoginContext login = login(ZOOKEEPER_LOCALHOST,
+        ZOOKEEPER_CLIENT_CONTEXT,
+        keytab_zk);
+    try {
+      logLoginDetails(ZOOKEEPER, login);
+      RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+          ZOOKEEPER_CLIENT_CONTEXT);
+      curator = startCuratorServiceInstance("ZK", true);
+      LOG.info(curator.toString());
+
+      addToTeardown(curator);
+      curator.zkMkPath("/", CreateMode.PERSISTENT, false,
+          RegistrySecurity.WorldReadWriteACL);
+      ZKPathDumper pathDumper = curator.dumpPath(true);
+      LOG.info(pathDumper.toString());
+    } finally {
+      logout(login);
+      ServiceOperations.stop(curator);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
new file mode 100644
index 0000000..a0c4ca3
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
@@ -0,0 +1,725 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import org.apache.commons.net.util.Base64;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.xbill.DNS.AAAARecord;
+import org.xbill.DNS.ARecord;
+import org.xbill.DNS.CNAMERecord;
+import org.xbill.DNS.DClass;
+import org.xbill.DNS.DNSKEYRecord;
+import org.xbill.DNS.DNSSEC;
+import org.xbill.DNS.Flags;
+import org.xbill.DNS.Message;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.OPTRecord;
+import org.xbill.DNS.PTRRecord;
+import org.xbill.DNS.RRSIGRecord;
+import org.xbill.DNS.RRset;
+import org.xbill.DNS.Rcode;
+import org.xbill.DNS.Record;
+import org.xbill.DNS.SRVRecord;
+import org.xbill.DNS.Section;
+import org.xbill.DNS.Type;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.security.KeyFactory;
+import java.security.PrivateKey;
+import java.security.spec.RSAPrivateKeySpec;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+
+/**
+ *
+ */
+public class TestRegistryDNS extends Assert {
+
+  private RegistryDNS registryDNS;
+  private RegistryUtils.ServiceRecordMarshal marshal;
+
+  private static final String APPLICATION_RECORD = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"Slider Application Master\",\n"
+      + "  \"external\" : [ {\n"
+      + "    \"api\" : \"classpath:org.apache.hadoop.yarn.service.appmaster.ipc"
+      + "\",\n"
+      + "    \"addressType\" : \"host/port\",\n"
+      + "    \"protocolType\" : \"hadoop/IPC\",\n"
+      + "    \"addresses\" : [ {\n"
+      + "      \"host\" : \"192.168.1.5\",\n"
+      + "      \"port\" : \"1026\"\n"
+      + "    } ]\n"
+      + "  }, {\n"
+      + "    \"api\" : \"http://\",\n"
+      + "    \"addressType\" : \"uri\",\n"
+      + "    \"protocolType\" : \"webui\",\n"
+      + "    \"addresses\" : [ {\n"
+      + "      \"uri\" : \"http://192.168.1.5:1027\"\n"
+      + "    } ]\n"
+      + "  }, {\n"
+      + "    \"api\" : \"classpath:org.apache.hadoop.yarn.service.management\""
+      + ",\n"
+      + "    \"addressType\" : \"uri\",\n"
+      + "    \"protocolType\" : \"REST\",\n"
+      + "    \"addresses\" : [ {\n"
+      + "      \"uri\" : \"http://192.168.1.5:1027/ws/v1/slider/mgmt\"\n"
+      + "    } ]\n"
+      + "  } ],\n"
+      + "  \"internal\" : [ {\n"
+      + "    \"api\" : \"classpath:org.apache.hadoop.yarn.service.agents.secure"
+      + "\",\n"
+      + "    \"addressType\" : \"uri\",\n"
+      + "    \"protocolType\" : \"REST\",\n"
+      + "    \"addresses\" : [ {\n"
+      + "      \"uri\" : \"https://192.168.1.5:47700/ws/v1/slider/agents\"\n"
+      + "    } ]\n"
+      + "  }, {\n"
+      + "    \"api\" : \"classpath:org.apache.hadoop.yarn.service.agents.oneway"
+      + "\",\n"
+      + "    \"addressType\" : \"uri\",\n"
+      + "    \"protocolType\" : \"REST\",\n"
+      + "    \"addresses\" : [ {\n"
+      + "      \"uri\" : \"https://192.168.1.5:35531/ws/v1/slider/agents\"\n"
+      + "    } ]\n"
+      + "  } ],\n"
+      + "  \"yarn:id\" : \"application_1451931954322_0016\",\n"
+      + "  \"yarn:persistence\" : \"application\"\n"
+      + "}\n";
+  static final String CONTAINER_RECORD = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
+      + "  \"yarn:persistence\" : \"container\",\n"
+      + "  \"yarn:ip\" : \"172.17.0.19\",\n"
+      + "  \"yarn:hostname\" : \"host1\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
+      + "}\n";
+
+  static final String CONTAINER_RECORD2 = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"httpd-2\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
+      + "  \"yarn:persistence\" : \"container\",\n"
+      + "  \"yarn:ip\" : \"172.17.0.20\",\n"
+      + "  \"yarn:hostname\" : \"host2\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
+      + "}\n";
+
+  private static final String CONTAINER_RECORD_NO_IP = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
+      + "  \"yarn:persistence\" : \"container\",\n"
+      + "  \"yarn:component\" : \"httpd\"\n"
+      + "}\n";
+
+  private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"httpd-1\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
+      + "  \"yarn:ip\" : \"172.17.0.19\",\n"
+      + "  \"yarn:hostname\" : \"0a134d6329bb\",\n"
+      + "  \"yarn:component\" : \"httpd\""
+      + "}\n";
+
+  @Before
+  public void initialize() throws Exception {
+    setRegistryDNS(new RegistryDNS("TestRegistry"));
+    Configuration conf = createConfiguration();
+
+    getRegistryDNS().setDomainName(conf);
+    getRegistryDNS().initializeZones(conf);
+
+    setMarshal(new RegistryUtils.ServiceRecordMarshal());
+  }
+
+  protected Configuration createConfiguration() {
+    Configuration conf = new Configuration();
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
+    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+    return conf;
+  }
+
+  protected boolean isSecure() {
+    return false;
+  }
+
+  @After
+  public void closeRegistry() throws Exception {
+    getRegistryDNS().stopExecutor();
+  }
+
+  @Test
+  public void testAppRegistration() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        APPLICATION_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/", record);
+
+    // start assessing whether correct records are available
+    Record[] recs = assertDNSQuery("test1.root.dev.test.");
+    assertEquals("wrong result", "192.168.1.5",
+        ((ARecord) recs[0]).getAddress().getHostAddress());
+
+    recs = assertDNSQuery("management-api.test1.root.dev.test.", 2);
+    assertEquals("wrong target name", "test1.root.dev.test.",
+        ((CNAMERecord) recs[0]).getTarget().toString());
+    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
+
+    recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.",
+        Type.SRV, 1);
+    assertTrue("not an SRV record", recs[0] instanceof SRVRecord);
+    assertEquals("wrong port", 1026, ((SRVRecord) recs[0]).getPort());
+
+    recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", 2);
+    assertEquals("wrong target name", "test1.root.dev.test.",
+        ((CNAMERecord) recs[0]).getTarget().toString());
+    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
+
+    recs = assertDNSQuery("http-api.test1.root.dev.test.", 2);
+    assertEquals("wrong target name", "test1.root.dev.test.",
+        ((CNAMERecord) recs[0]).getTarget().toString());
+    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
+
+    recs = assertDNSQuery("http-api.test1.root.dev.test.", Type.SRV,
+        1);
+    assertTrue("not an SRV record", recs[0] instanceof SRVRecord);
+    assertEquals("wrong port", 1027, ((SRVRecord) recs[0]).getPort());
+
+    assertDNSQuery("test1.root.dev.test.", Type.TXT, 3);
+    assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", Type.TXT, 1);
+    assertDNSQuery("http-api.test1.root.dev.test.", Type.TXT, 1);
+    assertDNSQuery("management-api.test1.root.dev.test.", Type.TXT, 1);
+  }
+
+  @Test
+  public void testContainerRegistration() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test.");
+    assertEquals("wrong result", "172.17.0.19",
+        ((ARecord) recs[0]).getAddress().getHostAddress());
+
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
+    assertTrue("not an ARecord", recs[0] instanceof ARecord);
+  }
+
+  @Test
+  public void testContainerRegistrationPersistanceAbsent() throws Exception {
+    ServiceRecord record = marshal.fromBytes("somepath",
+        CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT.getBytes());
+    registryDNS.register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000003",
+         record);
+
+    Name name =
+        Name.fromString("ctr-e50-1451931954322-0016-01-000002.dev.test.");
+    Record question = Record.newRecord(name, Type.A, DClass.IN);
+    Message query = Message.newQuery(question);
+    byte[] responseBytes = registryDNS.generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("Excepting NXDOMAIN as Record must not have regsisterd wrong",
+        Rcode.NXDOMAIN, response.getRcode());
+  }
+
+  @Test
+  public void testRecordTTL() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs = assertDNSQuery(
+        "ctr-e50-1451931954322-0016-01-000002.dev.test.");
+    assertEquals("wrong result", "172.17.0.19",
+        ((ARecord) recs[0]).getAddress().getHostAddress());
+    assertEquals("wrong ttl", 30L, recs[0].getTTL());
+
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
+    assertTrue("not an ARecord", recs[0] instanceof ARecord);
+
+    assertEquals("wrong ttl", 30L, recs[0].getTTL());
+  }
+
+  @Test
+  public void testReverseLookup() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
+    assertEquals("wrong result",
+        "httpd-1.test1.root.dev.test.",
+        ((PTRRecord) recs[0]).getTarget().toString());
+  }
+
+  @Test
+  public void testReverseLookupInLargeNetwork() throws Exception {
+    setRegistryDNS(new RegistryDNS("TestRegistry"));
+    Configuration conf = createConfiguration();
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
+    conf.set(KEY_DNS_ZONE_SUBNET, "172.17.0.0");
+    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+
+    getRegistryDNS().setDomainName(conf);
+    getRegistryDNS().initializeZones(conf);
+
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
+    assertEquals("wrong result",
+        "httpd-1.test1.root.dev.test.",
+        ((PTRRecord) recs[0]).getTarget().toString());
+  }
+
+  @Test
+  public void testMissingReverseLookup() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Name name = Name.fromString("19.1.17.172.in-addr.arpa.");
+    Record question = Record.newRecord(name, Type.PTR, DClass.IN);
+    Message query = Message.newQuery(question);
+    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
+    query.addRecord(optRecord, Section.ADDITIONAL);
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("Missing record should be: ", Rcode.NXDOMAIN,
+        response.getRcode());
+  }
+
+  @Test
+  public void testNoContainerIP() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD_NO_IP.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Name name =
+        Name.fromString("ctr-e50-1451931954322-0016-01-000002.dev.test.");
+    Record question = Record.newRecord(name, Type.A, DClass.IN);
+    Message query = Message.newQuery(question);
+
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("wrong status", Rcode.NXDOMAIN, response.getRcode());
+  }
+
+  private Record[] assertDNSQuery(String lookup) throws IOException {
+    return assertDNSQuery(lookup, Type.A, 1);
+  }
+
+  private Record[] assertDNSQuery(String lookup, int numRecs)
+      throws IOException {
+    return assertDNSQuery(lookup, Type.A, numRecs);
+  }
+
+  Record[] assertDNSQuery(String lookup, int type, int numRecs)
+      throws IOException {
+    Name name = Name.fromString(lookup);
+    Record question = Record.newRecord(name, type, DClass.IN);
+    Message query = Message.newQuery(question);
+    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
+    query.addRecord(optRecord, Section.ADDITIONAL);
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("not successful", Rcode.NOERROR, response.getRcode());
+    assertNotNull("Null response", response);
+    assertEquals("Questions do not match", query.getQuestion(),
+        response.getQuestion());
+    Record[] recs = response.getSectionArray(Section.ANSWER);
+    assertEquals("wrong number of answer records",
+        isSecure() ? numRecs * 2 : numRecs, recs.length);
+    if (isSecure()) {
+      boolean signed = false;
+      for (Record record : recs) {
+        signed = record.getType() == Type.RRSIG;
+        if (signed) {
+          break;
+        }
+      }
+      assertTrue("No signatures found", signed);
+    }
+    return recs;
+  }
+
+  Record[] assertDNSQueryNotNull(String lookup, int type, int answerCount)
+      throws IOException {
+    Name name = Name.fromString(lookup);
+    Record question = Record.newRecord(name, type, DClass.IN);
+    Message query = Message.newQuery(question);
+    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
+    query.addRecord(optRecord, Section.ADDITIONAL);
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("not successful", Rcode.NOERROR, response.getRcode());
+    assertNotNull("Null response", response);
+    assertEquals("Questions do not match", query.getQuestion(),
+        response.getQuestion());
+    Record[] recs = response.getSectionArray(Section.ANSWER);
+    assertEquals(answerCount, recs.length);
+    assertEquals(recs[0].getType(), type);
+    return recs;
+  }
+
+  @Test
+  public void testDNSKEYRecord() throws Exception {
+    String publicK =
+        "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
+            + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
+            + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
+            + "l9Ozs5bV";
+    //    byte[] publicBytes = Base64.decodeBase64(publicK);
+    //    X509EncodedKeySpec keySpec = new X509EncodedKeySpec(publicBytes);
+    //    KeyFactory keyFactory = KeyFactory.getInstance("RSA");
+    //    PublicKey pubKey = keyFactory.generatePublic(keySpec);
+    DNSKEYRecord dnskeyRecord =
+        new DNSKEYRecord(Name.fromString("dev.test."), DClass.IN, 0,
+            DNSKEYRecord.Flags.ZONE_KEY,
+            DNSKEYRecord.Protocol.DNSSEC,
+            DNSSEC.Algorithm.RSASHA256,
+            Base64.decodeBase64(publicK.getBytes()));
+    assertNotNull(dnskeyRecord);
+    RSAPrivateKeySpec privateSpec = new RSAPrivateKeySpec(new BigInteger(1,
+        Base64.decodeBase64(
+            "7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDd"
+                + "LMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUq"
+                + "oWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU=")),
+        new BigInteger(1, Base64.decodeBase64(
+            "MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/"
+                + "yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC"
+                + "705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60=")));
+    KeyFactory factory = KeyFactory.getInstance("RSA");
+    PrivateKey priv = factory.generatePrivate(privateSpec);
+
+    ARecord aRecord = new ARecord(Name.fromString("some.test."), DClass.IN, 0,
+        InetAddress.getByName("192.168.0.1"));
+    Calendar cal = Calendar.getInstance();
+    Date inception = cal.getTime();
+    cal.add(Calendar.YEAR, 1);
+    Date expiration = cal.getTime();
+    RRset rrset = new RRset(aRecord);
+    RRSIGRecord rrsigRecord = DNSSEC.sign(rrset,
+        dnskeyRecord,
+        priv,
+        inception,
+        expiration);
+    DNSSEC.verify(rrset, rrsigRecord, dnskeyRecord);
+
+  }
+
+  @Test
+  public void testIpv4toIpv6() throws Exception {
+    InetAddress address =
+        BaseServiceRecordProcessor
+            .getIpv6Address(InetAddress.getByName("172.17.0.19"));
+    assertTrue("not an ipv6 address", address instanceof Inet6Address);
+    assertEquals("wrong IP", "172.17.0.19",
+        InetAddress.getByAddress(address.getAddress()).getHostAddress());
+  }
+
+  @Test
+  public void testAAAALookup() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs = assertDNSQuery(
+        "ctr-e50-1451931954322-0016-01-000002.dev.test.", Type.AAAA, 1);
+    assertEquals("wrong result", "172.17.0.19",
+        ((AAAARecord) recs[0]).getAddress().getHostAddress());
+
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", Type.AAAA, 1);
+    assertTrue("not an ARecord", recs[0] instanceof AAAARecord);
+  }
+
+  @Test
+  public void testNegativeLookup() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Name name = Name.fromString("missing.dev.test.");
+    Record question = Record.newRecord(name, Type.A, DClass.IN);
+    Message query = Message.newQuery(question);
+
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("not successful", Rcode.NXDOMAIN, response.getRcode());
+    assertNotNull("Null response", response);
+    assertEquals("Questions do not match", query.getQuestion(),
+        response.getQuestion());
+    Record[] sectionArray = response.getSectionArray(Section.AUTHORITY);
+    assertEquals("Wrong number of recs in AUTHORITY", isSecure() ? 2 : 1,
+        sectionArray.length);
+    boolean soaFound = false;
+    for (Record rec : sectionArray) {
+      soaFound = rec.getType() == Type.SOA;
+      if (soaFound) {
+        break;
+      }
+    }
+    assertTrue("wrong record type",
+        soaFound);
+
+  }
+
+  @Test
+  public void testReadMasterFile() throws Exception {
+    setRegistryDNS(new RegistryDNS("TestRegistry"));
+    Configuration conf = new Configuration();
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
+    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
+        getClass().getResource("/").getFile());
+    if (isSecure()) {
+      conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
+      conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
+          "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
+              + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
+              + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
+              + "l9Ozs5bV");
+      conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
+          getClass().getResource("/test.private").getFile());
+    }
+
+    getRegistryDNS().setDomainName(conf);
+    getRegistryDNS().initializeZones(conf);
+
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test.");
+    assertEquals("wrong result", "172.17.0.19",
+        ((ARecord) recs[0]).getAddress().getHostAddress());
+
+    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
+    assertTrue("not an ARecord", recs[0] instanceof ARecord);
+
+    // lookup dyanmic reverse records
+    recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
+    assertEquals("wrong result",
+        "httpd-1.test1.root.dev.test.",
+        ((PTRRecord) recs[0]).getTarget().toString());
+
+    // now lookup static reverse records
+    Name name = Name.fromString("5.0.17.172.in-addr.arpa.");
+    Record question = Record.newRecord(name, Type.PTR, DClass.IN);
+    Message query = Message.newQuery(question);
+    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
+    query.addRecord(optRecord, Section.ADDITIONAL);
+    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
+    Message response = new Message(responseBytes);
+    recs = response.getSectionArray(Section.ANSWER);
+    assertEquals("wrong result", "cn005.dev.test.",
+        ((PTRRecord) recs[0]).getTarget().toString());
+  }
+
+  @Test
+  public void testReverseZoneNames() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0");
+    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
+
+    Name name = getRegistryDNS().getReverseZoneName(conf);
+    assertEquals("wrong name", "26.172.in-addr.arpa.", name.toString());
+  }
+
+  @Test
+  public void testSplitReverseZoneNames() throws Exception {
+    Configuration conf = new Configuration();
+    registryDNS = new RegistryDNS("TestRegistry");
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "example.com");
+    conf.set(KEY_DNS_SPLIT_REVERSE_ZONE, "true");
+    conf.set(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE, "256");
+    conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0");
+    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
+        getClass().getResource("/").getFile());
+    if (isSecure()) {
+      conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
+      conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
+          "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
+              + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
+              + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
+              + "l9Ozs5bV");
+      conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
+          getClass().getResource("/test.private").getFile());
+    }
+    registryDNS.setDomainName(conf);
+    registryDNS.setDNSSECEnabled(conf);
+    registryDNS.addSplitReverseZones(conf, 4);
+    assertEquals(4, registryDNS.getZoneCount());
+  }
+
+  @Test
+  public void testExampleDotCom() throws Exception {
+    Name name = Name.fromString("example.com.");
+    Record[] records = getRegistryDNS().getRecords(name, Type.SOA);
+    assertNotNull("example.com exists:", records);
+  }
+
+  @Test
+  public void testExternalCNAMERecord() throws Exception {
+    setRegistryDNS(new RegistryDNS("TestRegistry"));
+    Configuration conf = new Configuration();
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
+    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
+        getClass().getResource("/").getFile());
+    getRegistryDNS().setDomainName(conf);
+    getRegistryDNS().initializeZones(conf);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME, 1);
+  }
+
+  @Test
+  public void testRootLookup() throws Exception {
+    setRegistryDNS(new RegistryDNS("TestRegistry"));
+    Configuration conf = new Configuration();
+    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
+    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
+    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
+    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
+        getClass().getResource("/").getFile());
+    getRegistryDNS().setDomainName(conf);
+    getRegistryDNS().initializeZones(conf);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQueryNotNull(".", Type.NS, 13);
+  }
+
+  @Test
+  public void testMultiARecord() throws Exception {
+    ServiceRecord record = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD.getBytes());
+    ServiceRecord record2 = getMarshal().fromBytes("somepath",
+        CONTAINER_RECORD2.getBytes());
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000002",
+        record);
+    getRegistryDNS().register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "ctr-e50-1451931954322-0016-01-000003",
+        record2);
+
+    // start assessing whether correct records are available
+    Record[] recs =
+        assertDNSQuery("httpd.test1.root.dev.test.", 2);
+    assertTrue("not an ARecord", recs[0] instanceof ARecord);
+    assertTrue("not an ARecord", recs[1] instanceof ARecord);
+  }
+
+  @Test(timeout=5000)
+  public void testUpstreamFault() throws Exception {
+    Name name = Name.fromString("19.0.17.172.in-addr.arpa.");
+    Record[] recs = getRegistryDNS().getRecords(name, Type.CNAME);
+    assertNull("Record is not null", recs);
+  }
+
+  public RegistryDNS getRegistryDNS() {
+    return registryDNS;
+  }
+
+  public void setRegistryDNS(
+      RegistryDNS registryDNS) {
+    this.registryDNS = registryDNS;
+  }
+
+  public RegistryUtils.ServiceRecordMarshal getMarshal() {
+    return marshal;
+  }
+
+  public void setMarshal(
+      RegistryUtils.ServiceRecordMarshal marshal) {
+    this.marshal = marshal;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
new file mode 100644
index 0000000..1331f75
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import java.net.UnknownHostException;
+import static org.junit.Assert.assertEquals;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+/**
+ * Tests for the reverse zone utilities.
+ */
+public class TestReverseZoneUtils {
+  private static final String NET = "172.17.4.0";
+  private static final int RANGE = 256;
+  private static final int INDEX = 0;
+
+  @Rule public ExpectedException exception = ExpectedException.none();
+
+  @Test
+  public void testGetReverseZoneNetworkAddress() throws Exception {
+    assertEquals("172.17.4.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, INDEX));
+  }
+
+  @Test
+  public void testSplitIp() throws Exception {
+    long[] splitIp = ReverseZoneUtils.splitIp(NET);
+    assertEquals(172, splitIp[0]);
+    assertEquals(17, splitIp[1]);
+    assertEquals(4, splitIp[2]);
+    assertEquals(0, splitIp[3]);
+  }
+
+  @Test
+  public void testThrowIllegalArgumentExceptionIfIndexIsNegative()
+      throws Exception {
+    exception.expect(IllegalArgumentException.class);
+    ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, -1);
+  }
+
+  @Test
+  public void testThrowUnknownHostExceptionIfIpIsInvalid() throws Exception {
+    exception.expect(UnknownHostException.class);
+    ReverseZoneUtils
+        .getReverseZoneNetworkAddress("213124.21231.14123.13", RANGE, INDEX);
+  }
+
+  @Test
+  public void testThrowIllegalArgumentExceptionIfRangeIsNegative()
+      throws Exception {
+    exception.expect(IllegalArgumentException.class);
+    ReverseZoneUtils.getReverseZoneNetworkAddress(NET, -1, INDEX);
+  }
+
+  @Test
+  public void testVariousRangeAndIndexValues() throws Exception {
+    // Given the base address of 172.17.4.0, step 256 IP addresses, 5 times.
+    assertEquals("172.17.9.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 5));
+    assertEquals("172.17.4.128",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 128, 1));
+    assertEquals("172.18.0.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 252));
+    assertEquals("172.17.12.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1024, 2));
+    assertEquals("172.17.4.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 0, 1));
+    assertEquals("172.17.4.0",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 0));
+    assertEquals("172.17.4.1",
+        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 1));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
new file mode 100644
index 0000000..ded63bd
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+
+/**
+ *
+ */
+public class TestSecureRegistryDNS extends TestRegistryDNS {
+  @Override protected Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
+    conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
+    conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
+        "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
+            + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
+            + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
+            + "l9Ozs5bV");
+    conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
+        getClass().getResource("/test.private").getFile());
+
+    return conf;
+  }
+
+  @Override protected boolean isSecure() {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/resources/0.17.172.in-addr.arpa.zone
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/resources/0.17.172.in-addr.arpa.zone b/hadoop-common-project/hadoop-registry/src/test/resources/0.17.172.in-addr.arpa.zone
new file mode 100644
index 0000000..08071e2
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/resources/0.17.172.in-addr.arpa.zone
@@ -0,0 +1,36 @@
+;
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements.  See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership.  The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License.  You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+;
+;
+$ORIGIN .
+$TTL 1800 ; 30 minutes
+0.17.172.in-addr.arpa IN SOA ns.hwhq.hortonworks.com. it.hortonworks.com. (
+  2015081000 ; serial
+  10800      ; refresh (3 hours)
+  900        ; retry (15 minutes)
+  1814400    ; expire (3 weeks)
+  10800      ; minimum (3 hours)
+)
+ NS ns.hwhq.hortonworks.com.
+ NS ns2.hwhq.hortonworks.com.
+
+$ORIGIN 0.17.172.in-addr.arpa.
+5  PTR  cn005.dev.test.
+6  PTR  cn006.dev.test.
+7  PTR  cn007.dev.test.
+8  PTR  cn008.dev.test.
+9  PTR  cn009.dev.test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-registry/src/test/resources/log4j.properties
new file mode 100644
index 0000000..bed1abc
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/resources/log4j.properties
@@ -0,0 +1,63 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+
+# packages under test
+log4j.logger.org.apache.hadoop.yarn.registry=DEBUG
+log4j.logger.org.apache.hadoop.service=DEBUG
+
+log4j.logger.org.apache.hadoop.security.UserGroupInformation=DEBUG
+
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.zookeeper.ClientCnxn=DEBUG
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN
+log4j.logger.org.apache.hadoop.metrics2=ERROR
+log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN
+log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN
+log4j.logger.org.apache.hadoop.security.token.delegation=WARN
+log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN
+
+# curator noise
+log4j.logger.org.apache.curator.framework.imps=WARN
+log4j.logger.org.apache.curator.framework.state.ConnectionStateManager=ERROR
+
+log4j.logger.org.apache.directory.api.ldap=ERROR
+log4j.logger.org.apache.directory.server=ERROR
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/hadoop-registry/src/test/resources/test.private
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/src/test/resources/test.private b/hadoop-common-project/hadoop-registry/src/test/resources/test.private
new file mode 100644
index 0000000..5f0da9d
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/src/test/resources/test.private
@@ -0,0 +1,32 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+Private-key-format: v1.3
+Algorithm: 8 (RSASHA256)
+Modulus: 7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDdLMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUqoWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU=
+PublicExponent: AQAB
+PrivateExponent: MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60=
+Prime1: /HFdjI4cRuJBjK9IGWWmmVZWwaFsQYO9GHLCDwjm691GxaDpXuMdPd0uH9EqQvskyF8JPmzQXI43swyUFjizow==
+Prime2: 8KFxkWEHlhgB2GLi8tk39TKY5vmFUvh4FO28COl1N/rWjKVpfM1p6HQ6YavoGNZQmDBazv4WOZRqSQukHApzJw==
+Exponent1: alX+h/RcqOcpoW88OaZ99N1PkiTDCx3JC4FbiSXAz93Xr+vGIfgdGzAN+80JtklABz8xD6CabEJj6AIGZw3fbQ==
+Exponent2: vvPusqZkJcjBVh0K6hpUXKEdU1W5ZmFEsZ8Cs7PH0Hee4Je3QVGk9NGfLrkDgwo3hL4CofZiXqkXOwYg4husyw==
+Coefficient: omxpbNU6u/swbnkTC6MicaDqbJP7ETnCCJ1iN2+HZO/AlQCFlqVzLwGZmvGMAGA9ZWF+YpqpPhvzi4bWmi5XrQ==
+Created: 20160119155251
+Publish: 20160119155251
+Activate: 20160119155251
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-common-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 9f57569..e96d1ba 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -37,6 +37,7 @@
     <module>hadoop-nfs</module>
     <module>hadoop-minikdc</module>
     <module>hadoop-kms</module>
+    <module>hadoop-registry</module>
   </modules>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1e5b1b8..e5f4965 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -373,7 +373,7 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-registry</artifactId>
+        <artifactId>hadoop-registry</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
       <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 8290fcd..3ec0311 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -142,6 +142,7 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.QueueCLI
     ;;
     registrydns)
+      echo "DEPRECATED: Use of this command is deprecated." 1>&2
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter'
       HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer'


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message