hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject [6/6] git commit: YARN-913 service registry: YARN-2652 add hadoop-yarn-registry package under hadoop-yarn
Date Wed, 08 Oct 2014 20:02:48 GMT
YARN-913 service registry: YARN-2652 add hadoop-yarn-registry package under hadoop-yarn


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a326711
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a326711
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a326711

Branch: refs/heads/trunk
Commit: 6a326711aa27e84fd4c53937afc5c41a746ec65a
Parents: e16e25a
Author: Steve Loughran <stevel@apache.org>
Authored: Wed Oct 8 12:54:37 2014 -0700
Committer: Steve Loughran <stevel@apache.org>
Committed: Wed Oct 8 13:02:25 2014 -0700

----------------------------------------------------------------------
 .gitignore                                      |   2 +
 hadoop-project/pom.xml                          |   5 +
 hadoop-yarn-project/CHANGES.txt                 |   7 +
 .../dev-support/findbugs-exclude.xml            |  10 +
 .../src/main/resources/yarn-default.xml         | 126 +++
 .../hadoop-yarn/hadoop-yarn-registry/pom.xml    | 218 ++++
 .../apache/hadoop/registry/cli/RegistryCli.java | 445 +++++++++
 .../hadoop/registry/client/api/BindFlags.java   |  41 +
 .../registry/client/api/RegistryConstants.java  | 286 ++++++
 .../registry/client/api/RegistryOperations.java | 182 ++++
 .../client/api/RegistryOperationsFactory.java   | 131 +++
 .../registry/client/api/package-info.java       |  35 +
 .../registry/client/binding/JsonSerDeser.java   | 327 ++++++
 .../client/binding/RegistryPathUtils.java       | 218 ++++
 .../client/binding/RegistryTypeUtils.java       | 240 +++++
 .../registry/client/binding/RegistryUtils.java  | 362 +++++++
 .../registry/client/binding/package-info.java   |  22 +
 .../AuthenticationFailedException.java          |  39 +
 .../exceptions/InvalidPathnameException.java    |  40 +
 .../exceptions/InvalidRecordException.java      |  41 +
 .../NoChildrenForEphemeralsException.java       |  48 +
 .../exceptions/NoPathPermissionsException.java  |  45 +
 .../client/exceptions/NoRecordException.java    |  51 +
 .../client/exceptions/RegistryIOException.java  |  58 ++
 .../client/exceptions/package-info.java         |  33 +
 .../client/impl/RegistryOperationsClient.java   |  55 +
 .../registry/client/impl/package-info.java      |  26 +
 .../client/impl/zk/BindingInformation.java      |  41 +
 .../registry/client/impl/zk/CuratorService.java | 769 ++++++++++++++
 .../client/impl/zk/RegistryBindingSource.java   |  36 +
 .../impl/zk/RegistryInternalConstants.java      |  81 ++
 .../impl/zk/RegistryOperationsService.java      | 155 +++
 .../client/impl/zk/RegistrySecurity.java        | 996 +++++++++++++++++++
 .../registry/client/impl/zk/ZKPathDumper.java   | 133 +++
 .../client/impl/zk/ZookeeperConfigOptions.java  | 119 +++
 .../registry/client/impl/zk/package-info.java   |  39 +
 .../registry/client/types/AddressTypes.java     |  92 ++
 .../hadoop/registry/client/types/Endpoint.java  | 190 ++++
 .../registry/client/types/ProtocolTypes.java    | 104 ++
 .../client/types/RegistryPathStatus.java        | 123 +++
 .../registry/client/types/ServiceRecord.java    | 249 +++++
 .../client/types/ServiceRecordHeader.java       |  59 ++
 .../registry/client/types/package-info.java     |  41 +
 .../client/types/yarn/PersistencePolicies.java  |  50 +
 .../types/yarn/YarnRegistryAttributes.java      |  31 +
 .../RMRegistryOperationsService.java            | 246 +++++
 .../integration/SelectByYarnPersistence.java    |  60 ++
 .../server/integration/package-info.java        |  23 +
 .../hadoop/registry/server/package-info.java    |  27 +
 .../server/services/AddingCompositeService.java |  56 ++
 .../services/DeleteCompletionCallback.java      |  58 ++
 .../server/services/MicroZookeeperService.java  | 282 ++++++
 .../services/MicroZookeeperServiceKeys.java     |  69 ++
 .../server/services/RegistryAdminService.java   | 529 ++++++++++
 .../registry/server/services/package-info.java  |  40 +
 .../src/main/resources/.keep                    |   0
 .../src/main/tla/yarnregistry.tla               | 538 ++++++++++
 .../hadoop/registry/AbstractRegistryTest.java   | 123 +++
 .../hadoop/registry/AbstractZKRegistryTest.java | 113 +++
 .../hadoop/registry/RegistryTestHelper.java     | 401 ++++++++
 .../client/binding/TestMarshalling.java         | 121 +++
 .../binding/TestRegistryOperationUtils.java     |  47 +
 .../client/binding/TestRegistryPathUtils.java   | 178 ++++
 .../client/impl/CuratorEventCatcher.java        |  68 ++
 .../client/impl/TestCuratorService.java         | 249 +++++
 .../client/impl/TestMicroZookeeperService.java  |  60 ++
 .../integration/TestRegistryRMOperations.java   | 369 +++++++
 .../integration/TestYarnPolicySelector.java     |  65 ++
 .../operations/TestRegistryOperations.java      | 304 ++++++
 .../secure/AbstractSecureRegistryTest.java      | 356 +++++++
 .../registry/secure/KerberosConfiguration.java  |  81 ++
 .../secure/TestRegistrySecurityHelper.java      | 211 ++++
 .../registry/secure/TestSecureLogins.java       | 214 ++++
 .../secure/TestSecureRMRegistryOperations.java  | 350 +++++++
 .../registry/secure/TestSecureRegistry.java     | 157 +++
 .../src/test/resources/log4j.properties         |  63 ++
 .../hadoop-yarn-site/src/site/apt/index.apt.vm  |   2 +
 .../src/site/markdown/registry/index.md         |  28 +
 .../site/markdown/registry/registry-security.md | 120 +++
 .../registry/using-the-yarn-service-registry.md | 150 +++
 .../src/site/markdown/registry/yarn-registry.md | 889 +++++++++++++++++
 hadoop-yarn-project/hadoop-yarn/pom.xml         |   1 +
 82 files changed, 13049 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index db58f6a..6ece6ca 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
 hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
+yarnregistry.pdf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 79622a2..612781a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -222,6 +222,11 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-yarn-registry</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-nodemanager</artifactId>
         <version>${project.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0eb05c6..872eb39 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -142,6 +142,9 @@ Release 2.6.0 - UNRELEASED
     YARN-1051. Add a system for creating reservations of cluster capacity.
     (see breakdown below)
 
+    YARN-913. Add a way to register long-lived services in a YARN cluster.
+    (stevel)
+
   IMPROVEMENTS
 
     YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
@@ -590,6 +593,10 @@ Release 2.6.0 - UNRELEASED
 
     YARN-2649. Fixed TestAMRMRPCNodeUpdates test failure. (Ming Ma via jianhe)
 
+  BREAKDOWN OF YARN-913 SUBTASKS AND RELATED JIRAS
+
+    YARN-2652 Add hadoop-yarn-registry package under hadoop-yarn. (stevel)
+
 Release 2.5.1 - 2014-09-05
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 0e6207b..6e82af0 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -363,4 +363,14 @@
     <Field name="reservationsContinueLooking" />
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
   </Match>
+
+  <!--
+  This code is meant to deserialize this way...subclasses of will need to
+  instantiate their own JsonSerDeser instances if they want to deserialize.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.registry.client.binding.JsonSerDeser"/>
+    <Bug pattern="UI_INHERITANCE_UNSAFE_GETRESOURCE"/>
+  </Match>
+
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 79244ad..1db7939 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1363,4 +1363,130 @@
     <name>yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled</name>
     <value>false</value>
   </property>
+
+  <!-- YARN registry -->
+
+  <property>
+    <description>
+      Is the registry enabled: does the RM start it up,
+      create the user and system paths, and purge
+      service records when containers, application attempts
+      and applications complete
+    </description>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    </description>
+    <name>hadoop.registry.zk.root</name>
+    <value>/registry</value>
+  </property>
+
+  <property>
+    <description>
+      Zookeeper session timeout in milliseconds
+    </description>
+    <name>hadoop.registry.zk.session.timeout.ms</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <description>
+      Zookeeper session timeout in milliseconds
+    </description>
+    <name>hadoop.registry.zk.connection.timeout.ms</name>
+    <value>15000</value>
+  </property>
+
+  <property>
+    <description>
+      Zookeeper connection retry count before failing
+    </description>
+    <name>hadoop.registry.zk.retry.times</name>
+    <value>5</value>
+  </property>
+
+  <property>
+    <description>
+    </description>
+    <name>hadoop.registry.zk.retry.interval.ms</name>
+    <value>1000</value>
+  </property>
+
+  <property>
+    <description>
+      Zookeeper retry limit in milliseconds, during
+      exponential backoff: {@value}
+
+      This places a limit even
+      if the retry times and interval limit, combined
+      with the backoff policy, result in a long retry
+      period
+    </description>
+    <name>hadoop.registry.zk.retry.ceiling.ms</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <description>
+      List of hostname:port pairs defining the
+      zookeeper quorum binding for the registry
+    </description>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+  </property>
+
+  <property>
+    <description>
+      Key to set if the registry is secure. Turning it on
+      changes the permissions policy from "open access"
+      to restrictions on kerberos with the option of
+      a user adding one or more auth key pairs down their
+      own tree.
+    </description>
+    <name>hadoop.registry.secure</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      A comma separated list of Zookeeper ACL identifiers with
+      system access to the registry in a secure cluster.
+
+      These are given full access to all entries.
+
+      If there is an "@" at the end of a SASL entry it
+      instructs the registry client to append the default kerberos domain.
+    </description>
+    <name>hadoop.registry.system.acls</name>
+    <value>sasl:yarn@, sasl:mapred@, sasl:mapred@hdfs@</value>
+  </property>
+
+  <property>
+    <description>
+      The kerberos realm: used to set the realm of
+      system principals which do not declare their realm,
+      and any other accounts that need the value.
+
+      If empty, the default realm of the running process
+      is used.
+
+      If neither are known and the realm is needed, then the registry
+      service/client will fail.
+    </description>
+    <name>hadoop.registry.kerberos.realm</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+      Key to define the JAAS context. Used in secure
+      mode
+    </description>
+    <name>hadoop.registry.jaas.context</name>
+    <value>Client</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
new file mode 100644
index 0000000..05d8f23
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -0,0 +1,218 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.0.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>hadoop-yarn-registry</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <name>hadoop-yarn-registry</name>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+    </dependency>
+
+    <!-- needed for TimedOutTestsListener -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <!-- Mini KDC is used for testing -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-framework</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-test</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <!--
+    Include all files in src/main/resources.  By default, do not apply property
+    substitution (filtering=false), but do apply property substitution to
+    yarn-version-info.properties (filtering=true).  This will substitute the
+    version information correctly, but prevent Maven from altering other files
+    like yarn-default.xml.
+    -->
+    <resources>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <excludes>
+          <exclude>yarn-version-info.properties</exclude>
+        </excludes>
+        <filtering>false</filtering>
+      </resource>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <includes>
+          <include>yarn-version-info.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
+        <executions>
+          <execution>
+            <id>version-info</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>version-info</goal>
+            </goals>
+            <configuration>
+              <source>
+                <directory>${basedir}/src/main</directory>
+                <includes>
+                  <include>java/**/*.java</include>
+                  <!--
+                  <include>proto/**/*.proto</include>
+                    -->
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <phase>test-compile</phase>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+      <groupId>org.apache.maven.plugins</groupId>
+      <artifactId>maven-surefire-plugin</artifactId>
+      <configuration>
+        <reuseForks>false</reuseForks>
+        <forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
+        <argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</argLine>
+        <environmentVariables>
+          <!-- HADOOP_HOME required for tests on Windows to find winutils -->
+          <HADOOP_HOME>${hadoop.common.build.dir}</HADOOP_HOME>
+          <!-- configurable option to turn JAAS debugging on during test runs -->
+          <HADOOP_JAAS_DEBUG>true</HADOOP_JAAS_DEBUG>
+          <LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib</LD_LIBRARY_PATH>
+          <MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
+        </environmentVariables>
+        <systemPropertyVariables>
+
+
+          <!-- TODO: all references in testcases should be updated to this default -->
+          <test.build.dir>${test.build.dir}</test.build.dir>
+          <hadoop.tmp.dir>${hadoop.tmp.dir}</hadoop.tmp.dir>
+          <test.build.data>${test.build.data}</test.build.data>
+          <test.build.webapps>${test.build.webapps}</test.build.webapps>
+          <test.cache.data>${test.cache.data}</test.cache.data>
+          <hadoop.log.dir>${hadoop.log.dir}</hadoop.log.dir>
+          <test.build.classes>${test.build.classes}</test.build.classes>
+
+          <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
+          <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
+          <java.security.egd>${java.security.egd}</java.security.egd>
+          <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
+        </systemPropertyVariables>
+        <includes>
+          <include>**/Test*.java</include>
+        </includes>
+        <excludes>
+          <exclude>**/${test.exclude}.java</exclude>
+          <exclude>${test.exclude.pattern}</exclude>
+          <exclude>**/Test*$*.java</exclude>
+        </excludes>
+      </configuration>
+    </plugin>
+
+
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
new file mode 100644
index 0000000..863039e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
@@ -0,0 +1,445 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.cli;
+
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
+import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ProtocolTypes;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RegistryCli extends Configured implements Tool {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RegistryCli.class);
+	protected final PrintStream sysout;
+	protected final PrintStream syserr;
+
+
+	private RegistryOperations registry;
+
+  static final String LS_USAGE = "ls pathName";
+  static final String RESOLVE_USAGE = "resolve pathName";
+  static final String BIND_USAGE =
+      "bind -inet  -api apiName -p portNumber -h hostName  pathName" + "\n"
+      + "bind -webui uriString -api apiName  pathName" + "\n"
+      + "bind -rest uriString -api apiName  pathName";
+  static final String MKNODE_USAGE = "mknode directoryName";
+  static final String RM_USAGE = "rm pathName";
+  static final String USAGE =
+      "\n" + LS_USAGE + "\n" + RESOLVE_USAGE + "\n" + BIND_USAGE + "\n" +
+      MKNODE_USAGE + "\n" + RM_USAGE;
+
+
+
+	public RegistryCli(PrintStream sysout, PrintStream syserr) {
+	    super(new YarnConfiguration());
+    this.sysout = sysout;
+    this.syserr = syserr;
+	}
+
+
+  @SuppressWarnings("UseOfSystemOutOrSystemErr")
+  public static void main(String[] args) throws Exception {
+    RegistryCli cli = new RegistryCli(System.out, System.err);
+    int res = ToolRunner.run(cli, args);
+    System.exit(res);
+  }
+
+  private int usageError(String err, String usage) {
+    syserr.println("Error: " + err);
+    syserr.println("Usage: " + usage);
+    return -1;
+  }
+
+  private boolean validatePath(String path) {
+    if (!path.startsWith("/")) {
+      syserr.println("Path must start with /; given path was: " + path);
+      return false;
+    }
+    return true;
+  }
+  @Override
+  public int run(String[] args) throws Exception {
+    Preconditions.checkArgument(getConf() != null, "null configuration");
+    registry = RegistryOperationsFactory.createInstance(
+        new YarnConfiguration(getConf()));
+    registry.start();
+    if (args.length > 0) {
+      if (args[0].equals("ls")) {
+        return ls(args);
+      } else if (args[0].equals("resolve")) {
+        return resolve(args);
+      } else if (args[0].equals("bind")) {
+        return bind(args);
+      } else if (args[0].equals("mknode")) {
+        return mknode(args);
+      } else if (args[0].equals("rm")) {
+        return rm(args);
+      }
+    }
+    return usageError("Invalid command: " + args[0], USAGE);
+  }
+
+  @SuppressWarnings("unchecked")
+	public int ls(String [] args) {
+
+		Options lsOption = new Options();
+		CommandLineParser parser = new GnuParser();
+		try {
+			CommandLine line = parser.parse(lsOption, args);
+
+			List<String> argsList = line.getArgList();
+			if (argsList.size() != 2) {
+				return usageError("ls requires exactly one path argument", LS_USAGE);
+		    }
+			if (!validatePath(argsList.get(1)))
+				return -1;
+
+			try {
+				List<String> children = registry.list(argsList.get(1));
+        for (String child : children) {
+          sysout.println(child);
+        }
+				return 0;
+
+      } catch (Exception e) {
+        syserr.println(analyzeException("ls", e, argsList));
+      }
+			return -1;
+		} catch (ParseException exp) {
+			return usageError("Invalid syntax " + exp, LS_USAGE);
+		}
+	}
+
+  @SuppressWarnings("unchecked")
+  public int resolve(String [] args) {
+		Options resolveOption = new Options();
+		CommandLineParser parser = new GnuParser();
+		try {
+			CommandLine line = parser.parse(resolveOption, args);
+
+			List<String> argsList = line.getArgList();
+			if (argsList.size() != 2) {
+				return usageError("resolve requires exactly one path argument", RESOLVE_USAGE);
+		    }
+			if (!validatePath(argsList.get(1)))
+				return -1;
+
+			try {
+				ServiceRecord record = registry.resolve(argsList.get(1));
+
+				for (Endpoint endpoint : record.external) {
+					if ((endpoint.protocolType.equals(ProtocolTypes.PROTOCOL_WEBUI))
+							|| (endpoint.protocolType.equals(ProtocolTypes.PROTOCOL_REST))) {
+						sysout.print(" Endpoint(ProtocolType="
+								+ endpoint.protocolType + ", Api="
+								+ endpoint.api + "); Uris are: ");
+					} else {
+						sysout.print(" Endpoint(ProtocolType="
+								+ endpoint.protocolType + ", Api="
+								+ endpoint.api + ");"
+								+ " Addresses(AddressType="
+								+ endpoint.addressType + ") are: ");
+
+					}
+					for (List<String> a : endpoint.addresses) {
+						sysout.print(a + " ");
+					}
+					sysout.println();
+				}
+				return 0;
+      } catch (Exception e) {
+        syserr.println(analyzeException("resolve", e, argsList));
+      }
+			return -1;
+		} catch (org.apache.commons.cli.ParseException exp) {
+			return usageError("Invalid syntax " + exp, RESOLVE_USAGE);
+		}
+
+	}
+
+	public int bind(String [] args) {
+		Option rest = OptionBuilder.withArgName("rest")
+				.hasArg()
+				.withDescription("rest Option")
+				.create("rest");
+		Option webui = OptionBuilder.withArgName("webui")
+				.hasArg()
+				.withDescription("webui Option")
+				.create("webui");
+		Option inet = OptionBuilder.withArgName("inet")
+				.withDescription("inet Option")
+				.create("inet");
+		Option port = OptionBuilder.withArgName("port")
+				.hasArg()
+				.withDescription("port to listen on [9999]")
+				.create("p");
+		Option host = OptionBuilder.withArgName("host")
+				.hasArg()
+				.withDescription("host name")
+				.create("h");
+		Option apiOpt = OptionBuilder.withArgName("api")
+				.hasArg()
+				.withDescription("api")
+				.create("api");
+		Options inetOption = new Options();
+		inetOption.addOption(inet);
+		inetOption.addOption(port);
+		inetOption.addOption(host);
+		inetOption.addOption(apiOpt);
+
+		Options webuiOpt = new Options();
+		webuiOpt.addOption(webui);
+		webuiOpt.addOption(apiOpt);
+
+		Options restOpt = new Options();
+		restOpt.addOption(rest);
+		restOpt.addOption(apiOpt);
+
+
+    CommandLineParser parser = new GnuParser();
+    ServiceRecord sr = new ServiceRecord();
+    CommandLine line = null;
+    if (args.length <= 1) {
+      return usageError("Invalid syntax ", BIND_USAGE);
+    }
+    if (args[1].equals("-inet")) {
+      int portNum;
+      String hostName;
+      String api;
+
+      try {
+        line = parser.parse(inetOption, args);
+      } catch (ParseException exp) {
+        return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+      }
+      if (line.hasOption("inet") && line.hasOption("p") &&
+          line.hasOption("h") && line.hasOption("api")) {
+        portNum = Integer.parseInt(line.getOptionValue("p"));
+        hostName = line.getOptionValue("h");
+        api = line.getOptionValue("api");
+        sr.addExternalEndpoint(
+            inetAddrEndpoint(api, ProtocolTypes.PROTOCOL_HADOOP_IPC, hostName,
+                portNum));
+
+      } else {
+        return usageError("Missing options: must have host, port and api",
+            BIND_USAGE);
+      }
+
+    } else if (args[1].equals("-webui")) {
+      try {
+        line = parser.parse(webuiOpt, args);
+      } catch (ParseException exp) {
+        return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+      }
+      if (line.hasOption("webui") && line.hasOption("api")) {
+        URI theUri = null;
+        try {
+          theUri = new URI(line.getOptionValue("webui"));
+        } catch (URISyntaxException e) {
+          return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
+        }
+        sr.addExternalEndpoint(webEndpoint(line.getOptionValue("api"), theUri));
+
+      } else {
+        return usageError("Missing options: must have value for uri and api",
+            BIND_USAGE);
+      }
+    } else if (args[1].equals("-rest")) {
+      try {
+        line = parser.parse(restOpt, args);
+      } catch (ParseException exp) {
+        return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+      }
+      if (line.hasOption("rest") && line.hasOption("api")) {
+        URI theUri = null;
+        try {
+          theUri = new URI(line.getOptionValue("rest"));
+        } catch (URISyntaxException e) {
+          return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
+        }
+        sr.addExternalEndpoint(
+            restEndpoint(line.getOptionValue("api"), theUri));
+
+      } else {
+        return usageError("Missing options: must have value for uri and api",
+            BIND_USAGE);
+      }
+
+    } else {
+      return usageError("Invalid syntax", BIND_USAGE);
+    }
+    @SuppressWarnings("unchecked")
+		List<String> argsList = line.getArgList();
+		if (argsList.size() != 2) {
+			return usageError("bind requires exactly one path argument", BIND_USAGE);
+	    }
+		if (!validatePath(argsList.get(1)))
+			return -1;
+
+		try {
+			registry.bind(argsList.get(1), sr, BindFlags.OVERWRITE);
+			return 0;
+    } catch (Exception e) {
+      syserr.println(analyzeException("bind", e, argsList));
+    }
+
+    return -1;
+	}
+
+  @SuppressWarnings("unchecked")
+	public int mknode(String [] args) {
+		Options mknodeOption = new Options();
+		CommandLineParser parser = new GnuParser();
+		try {
+			CommandLine line = parser.parse(mknodeOption, args);
+
+			List<String> argsList = line.getArgList();
+			if (argsList.size() != 2) {
+				return usageError("mknode requires exactly one path argument", MKNODE_USAGE);
+		    }
+			if (!validatePath(argsList.get(1)))
+				return -1;
+
+			try {
+				registry.mknode(args[1], false);
+				return 0;
+			} catch (Exception e) {
+        syserr.println(analyzeException("mknode", e, argsList));
+			}
+			return -1;
+		} catch (ParseException exp) {
+			return usageError("Invalid syntax " + exp.toString(), MKNODE_USAGE);
+		}
+	}
+
+
+  @SuppressWarnings("unchecked")
+  public int rm(String[] args) {
+		Option recursive = OptionBuilder.withArgName("recursive")
+				.withDescription("delete recursively").create("r");
+
+		Options rmOption = new Options();
+		rmOption.addOption(recursive);
+
+		boolean recursiveOpt = false;
+
+		CommandLineParser parser = new GnuParser();
+		try {
+			CommandLine line = parser.parse(rmOption, args);
+
+			List<String> argsList = line.getArgList();
+			if (argsList.size() != 2) {
+				return usageError("RM requires exactly one path argument", RM_USAGE);
+		    }
+			if (!validatePath(argsList.get(1)))
+				return -1;
+
+			try {
+				if (line.hasOption("r")) {
+					recursiveOpt = true;
+				}
+
+				registry.delete(argsList.get(1), recursiveOpt);
+				return 0;
+      } catch (Exception e) {
+        syserr.println(analyzeException("rm", e, argsList));
+      }
+      return -1;
+		} catch (ParseException exp) {
+			return usageError("Invalid syntax " + exp.toString(), RM_USAGE);
+		}
+	}
+
+  /**
+   * Given an exception and a possibly empty argument list, generate
+   * a diagnostics string for use in error messages
+   * @param operation the operation that failed
+   * @param e exception
+   * @param argsList arguments list
+   * @return a string intended for the user
+   */
+  String analyzeException(String operation,
+      Exception e,
+      List<String> argsList) {
+
+    String pathArg = !argsList.isEmpty() ? argsList.get(1) : "(none)";
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Operation {} on path {} failed with exception {}",
+          operation, pathArg, e, e);
+    }
+    if (e instanceof InvalidPathnameException) {
+      return "InvalidPath :" + pathArg + ": " + e;
+    }
+    if (e instanceof PathNotFoundException) {
+      return "Path not found: " + pathArg;
+    }
+    if (e instanceof NoRecordException) {
+      return "No service record at path " + pathArg;
+    }
+    if (e instanceof AuthenticationFailedException) {
+      return "Failed to authenticate to registry : " + e;
+    }
+    if (e instanceof NoPathPermissionsException) {
+      return "No Permission to path: " + pathArg + ": " + e;
+    }
+    if (e instanceof AccessControlException) {
+      return "No Permission to path: " + pathArg + ": " + e;
+    }
+    if (e instanceof InvalidRecordException) {
+      return "Unable to read record at: " + pathArg + ": " + e;
+    }
+    if (e instanceof IOException) {
+      return "IO Exception when accessing path :" + pathArg + ": " + e;
+    }
+    // something else went very wrong here
+    return "Exception " + e;
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
new file mode 100644
index 0000000..5fd2aef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Combinable Flags to use when creating a service entry.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface BindFlags {
+
+  /**
+   * Create the entry.. This is just "0" and can be "or"ed with anything
+   */
+  int CREATE = 0;
+
+  /**
+   * The entry should be created even if an existing entry is there.
+   */
+  int OVERWRITE = 1;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
new file mode 100644
index 0000000..a6fe216
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Constants for the registry, including configuration keys and default
+ * values.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RegistryConstants {
+
+  /**
+   * prefix for registry configuration options: {@value}.
+   * Why <code>hadoop.</code> and not YARN? It can
+   * live outside YARN
+   */
+  String REGISTRY_PREFIX = "hadoop.registry.";
+
+  /**
+   * Prefix for zookeeper-specific options: {@value}
+   *  <p>
+   * For clients using other protocols, these options are not supported.
+   */
+  String ZK_PREFIX = REGISTRY_PREFIX + "zk.";
+
+  /**
+   * flag to indicate whether or not the registry should
+   * be enabled in the RM: {@value}
+   */
+  String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
+
+  /**
+   * Defaut value for enabling the registry in the RM: {@value}
+   */
+  boolean DEFAULT_REGISTRY_ENABLED = false;
+
+  /**
+   * Key to set if the registry is secure: {@value}.
+   * Turning it on changes the permissions policy from "open access"
+   * to restrictions on kerberos with the option of
+   * a user adding one or more auth key pairs down their
+   * own tree.
+   */
+  String KEY_REGISTRY_SECURE = REGISTRY_PREFIX + "secure";
+
+  /**
+   * Default registry security policy: {@value}.
+   */
+  boolean DEFAULT_REGISTRY_SECURE = false;
+
+  /**
+   * Root path in the ZK tree for the registry: {@value}
+   */
+  String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root";
+
+  /**
+   * Default root of the yarn registry: {@value}
+   */
+  String DEFAULT_ZK_REGISTRY_ROOT = "/registry";
+
+  /**
+   * Registry client authentication policy.
+   *  <p>
+   * This is only used in secure clusters.
+   *  <p>
+   * If the Factory methods of {@link RegistryOperationsFactory}
+   * are used, this key does not need to be set: it is set
+   * up based on the factory method used.
+   */
+  String KEY_REGISTRY_CLIENT_AUTH =
+      REGISTRY_PREFIX + "client.auth";
+
+  /**
+   * Registry client uses Kerberos: authentication is automatic from
+   * logged in user
+   */
+  String REGISTRY_CLIENT_AUTH_KERBEROS = "kerberos";
+
+  /**
+   * Username/password is the authentication mechanism.
+   * If set then both {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_ID}
+   * and {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD} must be set.
+   */
+  String REGISTRY_CLIENT_AUTH_DIGEST = "digest";
+
+  /**
+   * No authentication; client is anonymous
+   */
+  String REGISTRY_CLIENT_AUTH_ANONYMOUS = "";
+
+  /**
+   * Registry client authentication ID
+   * <p>
+   * This is only used in secure clusters with
+   * {@link #KEY_REGISTRY_CLIENT_AUTH} set to
+   * {@link #REGISTRY_CLIENT_AUTH_DIGEST}
+   *
+   */
+  String KEY_REGISTRY_CLIENT_AUTHENTICATION_ID =
+      KEY_REGISTRY_CLIENT_AUTH + ".id";
+
+  /**
+   * Registry client authentication password.
+   * <p>
+   * This is only used in secure clusters with the client set to
+   * use digest (not SASL or anonymouse) authentication.
+   *  <p>
+   * Specifically, {@link #KEY_REGISTRY_CLIENT_AUTH} set to
+   * {@link #REGISTRY_CLIENT_AUTH_DIGEST}
+   *
+   */
+  String KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD =
+      KEY_REGISTRY_CLIENT_AUTH + ".password";
+
+  /**
+   * List of hostname:port pairs defining the
+   * zookeeper quorum binding for the registry {@value}
+   */
+  String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum";
+
+  /**
+   * The default zookeeper quorum binding for the registry: {@value}
+   */
+  String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181";
+
+  /**
+   * Zookeeper session timeout in milliseconds: {@value}
+   */
+  String KEY_REGISTRY_ZK_SESSION_TIMEOUT =
+      ZK_PREFIX + "session.timeout.ms";
+
+  /**
+  * The default ZK session timeout: {@value}.
+  */
+  int DEFAULT_ZK_SESSION_TIMEOUT = 60000;
+
+  /**
+   * Zookeeper connection timeout in milliseconds: {@value}.
+   */
+  String KEY_REGISTRY_ZK_CONNECTION_TIMEOUT =
+      ZK_PREFIX + "connection.timeout.ms";
+
+  /**
+   * The default ZK connection timeout: {@value}.
+   */
+  int DEFAULT_ZK_CONNECTION_TIMEOUT = 15000;
+
+  /**
+   * Zookeeper connection retry count before failing: {@value}.
+   */
+  String KEY_REGISTRY_ZK_RETRY_TIMES = ZK_PREFIX + "retry.times";
+
+  /**
+   * The default # of times to retry a ZK connection: {@value}.
+   */
+  int DEFAULT_ZK_RETRY_TIMES = 5;
+
+  /**
+   * Zookeeper connect interval in milliseconds: {@value}.
+   */
+  String KEY_REGISTRY_ZK_RETRY_INTERVAL =
+      ZK_PREFIX + "retry.interval.ms";
+
+  /**
+   * The default interval between connection retries: {@value}.
+   */
+  int DEFAULT_ZK_RETRY_INTERVAL = 1000;
+
+  /**
+   * Zookeeper retry limit in milliseconds, during
+   * exponential backoff: {@value}.
+   *
+   * This places a limit even
+   * if the retry times and interval limit, combined
+   * with the backoff policy, result in a long retry
+   * period
+   *
+   */
+  String KEY_REGISTRY_ZK_RETRY_CEILING =
+      ZK_PREFIX + "retry.ceiling.ms";
+
+  /**
+   * Default limit on retries: {@value}.
+   */
+  int DEFAULT_ZK_RETRY_CEILING = 60000;
+
+  /**
+   * A comma separated list of Zookeeper ACL identifiers with
+   * system access to the registry in a secure cluster: {@value}.
+   *
+   * These are given full access to all entries.
+   *
+   * If there is an "@" at the end of an entry it
+   * instructs the registry client to append the kerberos realm as
+   * derived from the login and {@link #KEY_REGISTRY_KERBEROS_REALM}.
+   */
+  String KEY_REGISTRY_SYSTEM_ACCOUNTS = REGISTRY_PREFIX + "system.accounts";
+
+  /**
+   * Default system accounts given global access to the registry: {@value}.
+   */
+  String DEFAULT_REGISTRY_SYSTEM_ACCOUNTS =
+      "sasl:yarn@, sasl:mapred@, sasl:hdfs@, sasl:hadoop@";
+
+  /**
+   * A comma separated list of Zookeeper ACL identifiers with
+   * system access to the registry in a secure cluster: {@value}.
+   *
+   * These are given full access to all entries.
+   *
+   * If there is an "@" at the end of an entry it
+   * instructs the registry client to append the default kerberos domain.
+   */
+  String KEY_REGISTRY_USER_ACCOUNTS = REGISTRY_PREFIX + "user.accounts";
+
+  /**
+   * Default system acls: {@value}.
+   */
+  String DEFAULT_REGISTRY_USER_ACCOUNTS = "";
+
+  /**
+   * The kerberos realm: {@value}.
+   *
+   * This is used to set the realm of
+   * system principals which do not declare their realm,
+   * and any other accounts that need the value.
+   *
+   * If empty, the default realm of the running process
+   * is used.
+   *
+   * If neither are known and the realm is needed, then the registry
+   * service/client will fail.
+   */
+  String KEY_REGISTRY_KERBEROS_REALM = REGISTRY_PREFIX + "kerberos.realm";
+
+  /**
+   * Key to define the JAAS context. Used in secure registries: {@value}.
+   */
+  String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context";
+
+  /**
+   * default client-side registry JAAS context: {@value}
+   */
+  String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client";
+
+  /**
+   *  path to users off the root: {@value}.
+   */
+  String PATH_USERS = "/users/";
+
+  /**
+   *  path to system services off the root : {@value}.
+   */
+  String PATH_SYSTEM_SERVICES = "/services/";
+
+  /**
+   *  path to system services under a user's home path : {@value}.
+   */
+  String PATH_USER_SERVICES = "/services/";
+
+  /**
+   *  path under a service record to point to components of that service:
+   *  {@value}.
+   */
+  String SUBPATH_COMPONENTS = "/components/";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
new file mode 100644
index 0000000..c51bcf7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Registry Operations
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RegistryOperations extends Service {
+
+  /**
+   * Create a path.
+   *
+   * It is not an error if the path exists already, be it empty or not.
+   *
+   * The createParents flag also requests creating the parents.
+   * As entries in the registry can hold data while still having
+   * child entries, it is not an error if any of the parent path
+   * elements have service records.
+   *
+   * @param path path to create
+   * @param createParents also create the parents.
+   * @throws PathNotFoundException parent path is not in the registry.
+   * @throws InvalidPathnameException path name is invalid.
+   * @throws IOException Any other IO Exception.
+   * @return true if the path was created, false if it existed.
+   */
+  boolean mknode(String path, boolean createParents)
+      throws PathNotFoundException,
+      InvalidPathnameException,
+      IOException;
+
+  /**
+   * Bind a path in the registry to a service record
+   * @param path path to service record
+   * @param record service record service record to create/update
+   * @param flags bind flags
+   * @throws PathNotFoundException the parent path does not exist
+   * @throws FileAlreadyExistsException path exists but create flags
+   * do not include "overwrite"
+   * @throws InvalidPathnameException path name is invalid.
+   * @throws IOException Any other IO Exception.
+   */
+  void bind(String path, ServiceRecord record, int flags)
+      throws PathNotFoundException,
+      FileAlreadyExistsException,
+      InvalidPathnameException,
+      IOException;
+
+  /**
+   * Resolve the record at a path
+   * @param path path to an entry containing a {@link ServiceRecord}
+   * @return the record
+   * @throws PathNotFoundException path is not in the registry.
+   * @throws NoRecordException if there is not a service record
+   * @throws InvalidRecordException if there was a service record but it could
+   * not be parsed.
+   * @throws IOException Any other IO Exception
+   */
+
+  ServiceRecord resolve(String path)
+      throws PathNotFoundException,
+      NoRecordException,
+      InvalidRecordException,
+      IOException;
+
+  /**
+   * Get the status of a path
+   * @param path path to query
+   * @return the status of the path
+   * @throws PathNotFoundException path is not in the registry.
+   * @throws InvalidPathnameException the path is invalid.
+   * @throws IOException Any other IO Exception
+   */
+  RegistryPathStatus stat(String path)
+      throws PathNotFoundException,
+      InvalidPathnameException,
+      IOException;
+
+  /**
+   * Probe for a path existing.
+   * This is equivalent to {@link #stat(String)} with
+   * any failure downgraded to a
+   * @param path path to query
+   * @return true if the path was found
+   * @throws IOException
+   */
+  boolean exists(String path) throws IOException;
+
+  /**
+   * List all entries under a registry path, returning the relative names
+   * of the entries.
+   * @param path path to query
+   * @return a possibly empty list of the short path names of
+   * child entries.
+   * @throws PathNotFoundException
+   * @throws InvalidPathnameException
+   * @throws IOException
+   */
+   List<String> list(String path) throws
+      PathNotFoundException,
+      InvalidPathnameException,
+      IOException;
+
+  /**
+   * Delete a path.
+   *
+   * If the operation returns without an error then the entry has been
+   * deleted.
+   * @param path path delete recursively
+   * @param recursive recursive flag
+   * @throws PathNotFoundException path is not in the registry.
+   * @throws InvalidPathnameException the path is invalid.
+   * @throws PathIsNotEmptyDirectoryException path has child entries, but
+   * recursive is false.
+   * @throws IOException Any other IO Exception
+   *
+   */
+  void delete(String path, boolean recursive)
+      throws PathNotFoundException,
+      PathIsNotEmptyDirectoryException,
+      InvalidPathnameException,
+      IOException;
+
+  /**
+   * Add a new write access entry to be added to node permissions in all
+   * future write operations of a session connected to a secure registry.
+   *
+   * This does not grant the session any more rights: if it lacked any write
+   * access, it will still be unable to manipulate the registry.
+   *
+   * In an insecure cluster, this operation has no effect.
+   * @param id ID to use
+   * @param pass password
+   * @return true if the accessor was added: that is, the registry connection
+   * uses permissions to manage access
+   * @throws IOException on any failure to build the digest
+   */
+  boolean addWriteAccessor(String id, String pass) throws IOException;
+
+  /**
+   * Clear all write accessors.
+   *
+   * At this point all standard permissions/ACLs are retained,
+   * including any set on behalf of the user
+   * Only  accessors added via {@link #addWriteAccessor(String, String)}
+   * are removed.
+   */
+  public void clearWriteAccessors();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
new file mode 100644
index 0000000..443654d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.registry.client.impl.RegistryOperationsClient;
+
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+
+/**
+ * A factory for registry operation service instances.
+ * <p>
+ * <i>Each created instance will be returned initialized.</i>
+ * <p>
+ * That is, the service will have had <code>Service.init(conf)</code> applied
+ * to it —possibly after the configuration has been modified to
+ * support the specific binding/security mechanism used
+ */
+public final class RegistryOperationsFactory {
+
+  private RegistryOperationsFactory() {
+  }
+
+  /**
+   * Create and initialize a registry operations instance.
+   * Access writes will be determined from the configuration
+   * @param conf configuration
+   * @return a registry operations instance
+   * @throws ServiceStateException on any failure to initialize
+   */
+  public static RegistryOperations createInstance(Configuration conf) {
+    return createInstance("RegistryOperations", conf);
+  }
+
+  /**
+   * Create and initialize a registry operations instance.
+   * Access rights will be determined from the configuration
+   * @param name name of the instance
+   * @param conf configuration
+   * @return a registry operations instance
+   * @throws ServiceStateException on any failure to initialize
+   */
+  public static RegistryOperations createInstance(String name, Configuration conf) {
+    Preconditions.checkArgument(conf != null, "Null configuration");
+    RegistryOperationsClient operations =
+        new RegistryOperationsClient(name);
+    operations.init(conf);
+    return operations;
+  }
+
+  /**
+   * Create and initialize an anonymous read/write registry operations instance.
+   * In a secure cluster, this instance will only have read access to the
+   * registry.
+   * @param conf configuration
+   * @return an anonymous registry operations instance
+   *
+   * @throws ServiceStateException on any failure to initialize
+   */
+  public static RegistryOperations createAnonymousInstance(Configuration conf) {
+    Preconditions.checkArgument(conf != null, "Null configuration");
+    conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_ANONYMOUS);
+    return createInstance("AnonymousRegistryOperations", conf);
+  }
+
+  /**
+   * Create and initialize an secure, Kerberos-authenticated instance.
+   *
+   * The user identity will be inferred from the current user
+   *
+   * The authentication of this instance will expire when any kerberos
+   * tokens needed to authenticate with the registry infrastructure expire.
+   * @param conf configuration
+   * @param jaasContext the JAAS context of the account.
+   * @return a registry operations instance
+   * @throws ServiceStateException on any failure to initialize
+   */
+  public static RegistryOperations createKerberosInstance(Configuration conf,
+      String jaasContext) {
+    Preconditions.checkArgument(conf != null, "Null configuration");
+    conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS);
+    conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasContext);
+    return createInstance("KerberosRegistryOperations", conf);
+  }
+
+  /**
+   * Create and initialize an operations instance authenticated with write
+   * access via an <code>id:password</code> pair.
+   *
+   * The instance will have the read access
+   * across the registry, but write access only to that part of the registry
+   * to which it has been give the relevant permissions.
+   * @param conf configuration
+   * @param id user ID
+   * @param password password
+   * @return a registry operations instance
+   * @throws ServiceStateException on any failure to initialize
+   * @throws IllegalArgumentException if an argument is invalid
+   */
+  public static RegistryOperations createAuthenticatedInstance(Configuration conf,
+      String id,
+      String password) {
+    Preconditions.checkArgument(!StringUtils.isEmpty(id), "empty Id");
+    Preconditions.checkArgument(!StringUtils.isEmpty(password), "empty Password");
+    Preconditions.checkArgument(conf != null, "Null configuration");
+    conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
+    conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, id);
+    conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, password);
+    return createInstance("DigestRegistryOperations", conf);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
new file mode 100644
index 0000000..f5f844e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * YARN Registry Client API.
+ *
+ * This package contains the core API for the YARN registry.
+ *
+ * <ol>
+ *   <li> Data types can be found in
+ * {@link org.apache.hadoop.registry.client.types}</li>
+ *   <li> Exceptions are listed in
+ * {@link org.apache.hadoop.registry.client.exceptions}</li>
+ *   <li> Classes to assist use of the registry are in
+ * {@link org.apache.hadoop.registry.client.binding}</li>
+ * </ol>
+ *
+ *
+ */
+package org.apache.hadoop.registry.client.api;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
new file mode 100644
index 0000000..e086e36
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.DeserializationConfig;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Support for marshalling objects to and from JSON.
+ *  <p>
+ * It constructs an object mapper as an instance field.
+ * and synchronizes access to those methods
+ * which use the mapper
+ * @param <T> Type to marshal.
+ */
+@InterfaceAudience.Private()
+@InterfaceStability.Evolving
+public class JsonSerDeser<T> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(JsonSerDeser.class);
+  private static final String UTF_8 = "UTF-8";
+  public static final String E_NO_SERVICE_RECORD = "No service record at path";
+
+  private final Class<T> classType;
+  private final ObjectMapper mapper;
+  private final byte[] header;
+
+  /**
+   * Create an instance bound to a specific type
+   * @param classType class to marshall
+   * @param header byte array to use as header
+   */
+  public JsonSerDeser(Class<T> classType, byte[] header) {
+    Preconditions.checkArgument(classType != null, "null classType");
+    Preconditions.checkArgument(header != null, "null header");
+    this.classType = classType;
+    this.mapper = new ObjectMapper();
+    mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES,
+        false);
+    // make an immutable copy to keep findbugs happy.
+    byte[] h = new byte[header.length];
+    System.arraycopy(header, 0, h, 0, header.length);
+    this.header = h;
+  }
+
+  public String getName() {
+    return classType.getSimpleName();
+  }
+
+  /**
+   * Convert from JSON
+   *
+   * @param json input
+   * @return the parsed JSON
+   * @throws IOException IO
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  @SuppressWarnings("unchecked")
+  public synchronized T fromJson(String json)
+      throws IOException, JsonParseException, JsonMappingException {
+    try {
+      return mapper.readValue(json, classType);
+    } catch (IOException e) {
+      LOG.error("Exception while parsing json : " + e + "\n" + json, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from a JSON file
+   * @param jsonFile input file
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  @SuppressWarnings("unchecked")
+  public synchronized T fromFile(File jsonFile)
+      throws IOException, JsonParseException, JsonMappingException {
+    try {
+      return mapper.readValue(jsonFile, classType);
+    } catch (IOException e) {
+      LOG.error("Exception while parsing json file {}: {}", jsonFile, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from a JSON file
+   * @param resource input file
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed"})
+  public synchronized T fromResource(String resource)
+      throws IOException, JsonParseException, JsonMappingException {
+    InputStream resStream = null;
+    try {
+      resStream = this.getClass().getResourceAsStream(resource);
+      if (resStream == null) {
+        throw new FileNotFoundException(resource);
+      }
+      return mapper.readValue(resStream, classType);
+    } catch (IOException e) {
+      LOG.error("Exception while parsing json resource {}: {}", resource, e);
+      throw e;
+    } finally {
+      IOUtils.closeStream(resStream);
+    }
+  }
+
+  /**
+   * clone by converting to JSON and back again.
+   * This is much less efficient than any Java clone process.
+   * @param instance instance to duplicate
+   * @return a new instance
+   * @throws IOException problems.
+   */
+  public T fromInstance(T instance) throws IOException {
+    return fromJson(toJson(instance));
+  }
+
+  /**
+   * Load from a Hadoop filesystem
+   * @param fs filesystem
+   * @param path path
+   * @return a loaded CD
+   * @throws IOException IO problems
+   * @throws EOFException if not enough bytes were read in
+   * @throws JsonParseException parse problems
+   * @throws JsonMappingException O/J mapping problems
+   */
+  public T load(FileSystem fs, Path path)
+      throws IOException, JsonParseException, JsonMappingException {
+    FileStatus status = fs.getFileStatus(path);
+    long len = status.getLen();
+    byte[] b = new byte[(int) len];
+    FSDataInputStream dataInputStream = fs.open(path);
+    int count = dataInputStream.read(b);
+    if (count != len) {
+      throw new EOFException(path.toString() + ": read finished prematurely");
+    }
+    return fromBytes(path.toString(), b, 0);
+  }
+
+  /**
+   * Save a cluster description to a hadoop filesystem
+   * @param fs filesystem
+   * @param path path
+   * @param overwrite should any existing file be overwritten
+   * @throws IOException IO exception
+   */
+  public void save(FileSystem fs, Path path, T instance,
+      boolean overwrite) throws
+      IOException {
+    FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
+    writeJsonAsBytes(instance, dataOutputStream);
+  }
+
+  /**
+   * Write the json as bytes -then close the file
+   * @param dataOutputStream an outout stream that will always be closed
+   * @throws IOException on any failure
+   */
+  private void writeJsonAsBytes(T instance,
+      DataOutputStream dataOutputStream) throws
+      IOException {
+    try {
+      byte[] b = toBytes(instance);
+      dataOutputStream.write(b);
+    } finally {
+      dataOutputStream.close();
+    }
+  }
+
+  /**
+   * Convert JSON To bytes
+   * @param instance instance to convert
+   * @return a byte array
+   * @throws IOException
+   */
+  public byte[] toBytes(T instance) throws IOException {
+    String json = toJson(instance);
+    return json.getBytes(UTF_8);
+  }
+
+  /**
+   * Convert JSON To bytes, inserting the header
+   * @param instance instance to convert
+   * @return a byte array
+   * @throws IOException
+   */
+  public byte[] toByteswithHeader(T instance) throws IOException {
+    byte[] body = toBytes(instance);
+
+    ByteBuffer buffer = ByteBuffer.allocate(body.length + header.length);
+    buffer.put(header);
+    buffer.put(body);
+    return buffer.array();
+  }
+
+  /**
+   * Deserialize from a byte array
+   * @param path path the data came from
+   * @param bytes byte array
+   * @return offset in the array to read from
+   * @throws IOException all problems
+   * @throws EOFException not enough data
+   * @throws InvalidRecordException if the parsing failed -the record is invalid
+   */
+  public T fromBytes(String path, byte[] bytes, int offset) throws IOException,
+      InvalidRecordException {
+    int data = bytes.length - offset;
+    if (data <= 0) {
+      throw new EOFException("No data at " + path);
+    }
+    String json = new String(bytes, offset, data, UTF_8);
+    try {
+      return fromJson(json);
+    } catch (JsonProcessingException e) {
+      throw new InvalidRecordException(path, e.toString(), e);
+    }
+  }
+
+  /**
+   * Read from a byte array to a type, checking the header first
+   * @param path source of data
+   * @param buffer buffer
+   * @return the parsed structure
+   * Null if the record was too short or the header did not match
+   * @throws IOException on a failure
+   * @throws NoRecordException if header checks implied there was no record
+   * @throws InvalidRecordException if record parsing failed
+   */
+  @SuppressWarnings("unchecked")
+  public T fromBytesWithHeader(String path, byte[] buffer) throws IOException {
+    int hlen = header.length;
+    int blen = buffer.length;
+    if (hlen > 0) {
+      if (blen < hlen) {
+        throw new NoRecordException(path, E_NO_SERVICE_RECORD);
+      }
+      byte[] magic = Arrays.copyOfRange(buffer, 0, hlen);
+      if (!Arrays.equals(header, magic)) {
+        LOG.debug("start of entry does not match service record header at {}",
+            path);
+        throw new NoRecordException(path, E_NO_SERVICE_RECORD);
+      }
+    }
+    return fromBytes(path, buffer, hlen);
+  }
+
+  /**
+   * Check if a buffer has a header which matches this record type
+   * @param buffer buffer
+   * @return true if there is a match
+   * @throws IOException
+   */
+  public boolean headerMatches(byte[] buffer) throws IOException {
+    int hlen = header.length;
+    int blen = buffer.length;
+    boolean matches = false;
+    if (blen > hlen) {
+      byte[] magic = Arrays.copyOfRange(buffer, 0, hlen);
+      matches = Arrays.equals(header, magic);
+    }
+    return matches;
+  }
+
+  /**
+   * Convert an object to a JSON string
+   * @param instance instance to convert
+   * @return a JSON string description
+   * @throws JsonParseException parse problems
+   * @throws JsonMappingException O/J mapping problems
+   */
+  public synchronized String toJson(T instance) throws IOException,
+      JsonGenerationException,
+      JsonMappingException {
+    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
+    return mapper.writeValueAsString(instance);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a326711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
new file mode 100644
index 0000000..5d8ea3f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
+import org.apache.zookeeper.common.PathUtils;
+
+import java.net.IDN;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Pattern;
+
+/**
+ * Basic operations on paths: manipulating them and creating and validating
+ * path elements.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RegistryPathUtils {
+
+  /**
+   * Compiled down pattern to validate single entries in the path
+   */
+  private static final Pattern PATH_ENTRY_VALIDATION_PATTERN =
+      Pattern.compile(RegistryInternalConstants.VALID_PATH_ENTRY_PATTERN);
+
+  /**
+   * Validate ZK path with the path itself included in
+   * the exception text
+   * @param path path to validate
+   * @return the path parameter
+   * @throws InvalidPathnameException if the pathname is invalid.
+   */
+  public static String validateZKPath(String path) throws
+      InvalidPathnameException {
+    try {
+      PathUtils.validatePath(path);
+
+    } catch (IllegalArgumentException e) {
+      throw new InvalidPathnameException(path,
+          "Invalid Path \"" + path + "\" : " + e, e);
+    }
+    return path;
+  }
+
+  /**
+   * Validate ZK path as valid for a DNS hostname.
+   * @param path path to validate
+   * @return the path parameter
+   * @throws InvalidPathnameException if the pathname is invalid.
+   */
+  public static String validateElementsAsDNS(String path) throws
+      InvalidPathnameException {
+    List<String> splitpath = split(path);
+    for (String fragment : splitpath) {
+      if (!PATH_ENTRY_VALIDATION_PATTERN.matcher(fragment).matches()) {
+        throw new InvalidPathnameException(path,
+            "Invalid Path element \"" + fragment + "\"");
+      }
+    }
+    return path;
+  }
+
+  /**
+   * Create a full path from the registry root and the supplied subdir
+   * @param path path of operation
+   * @return an absolute path
+   * @throws InvalidPathnameException if the path is invalid
+   */
+  public static String createFullPath(String base, String path) throws
+      InvalidPathnameException {
+    Preconditions.checkArgument(path != null, "null path");
+    Preconditions.checkArgument(base != null, "null path");
+    return validateZKPath(join(base, path));
+  }
+
+  /**
+   * Join two paths, guaranteeing that there will not be exactly
+   * one separator between the two, and exactly one at the front
+   * of the path. There will be no trailing "/" except for the special
+   * case that this is the root path
+   * @param base base path
+   * @param path second path to add
+   * @return a combined path.
+   */
+  public static String join(String base, String path) {
+    Preconditions.checkArgument(path != null, "null path");
+    Preconditions.checkArgument(base != null, "null path");
+    StringBuilder fullpath = new StringBuilder();
+
+    if (!base.startsWith("/")) {
+      fullpath.append('/');
+    }
+    fullpath.append(base);
+
+    // guarantee a trailing /
+    if (!fullpath.toString().endsWith("/")) {
+      fullpath.append("/");
+    }
+    // strip off any at the beginning
+    if (path.startsWith("/")) {
+      // path starts with /, so append all other characters -if present
+      if (path.length() > 1) {
+        fullpath.append(path.substring(1));
+      }
+    } else {
+      fullpath.append(path);
+    }
+
+    //here there may be a trailing "/"
+    String finalpath = fullpath.toString();
+    if (finalpath.endsWith("/") && !"/".equals(finalpath)) {
+      finalpath = finalpath.substring(0, finalpath.length() - 1);
+
+    }
+    return finalpath;
+  }
+
+  /**
+   * split a path into elements, stripping empty elements
+   * @param path the path
+   * @return the split path
+   */
+  public static List<String> split(String path) {
+    //
+    String[] pathelements = path.split("/");
+    List<String> dirs = new ArrayList<String>(pathelements.length);
+    for (String pathelement : pathelements) {
+      if (!pathelement.isEmpty()) {
+        dirs.add(pathelement);
+      }
+    }
+    return dirs;
+  }
+
+  /**
+   * Get the last entry in a path; for an empty path
+   * returns "". The split logic is that of
+   * {@link #split(String)}
+   * @param path path of operation
+   * @return the last path entry or "" if none.
+   */
+  public static String lastPathEntry(String path) {
+    List<String> splits = split(path);
+    if (splits.isEmpty()) {
+      // empty path. Return ""
+      return "";
+    } else {
+      return splits.get(splits.size() - 1);
+    }
+  }
+
+  /**
+   * Get the parent of a path
+   * @param path path to look at
+   * @return the parent path
+   * @throws PathNotFoundException if the path was at root.
+   */
+  public static String parentOf(String path) throws PathNotFoundException {
+    List<String> elements = split(path);
+
+    int size = elements.size();
+    if (size == 0) {
+      throw new PathNotFoundException("No parent of " + path);
+    }
+    if (size == 1) {
+      return "/";
+    }
+    elements.remove(size - 1);
+    StringBuilder parent = new StringBuilder(path.length());
+    for (String element : elements) {
+      parent.append("/");
+      parent.append(element);
+    }
+    return parent.toString();
+  }
+
+  /**
+   * Perform any formatting for the registry needed to convert
+   * non-simple-DNS elements
+   * @param element element to encode
+   * @return an encoded string
+   */
+  public static String encodeForRegistry(String element) {
+    return IDN.toASCII(element);
+  }
+
+  /**
+   * Perform whatever transforms are needed to get a YARN ID into
+   * a DNS-compatible name
+   * @param yarnId ID as string of YARN application, instance or container
+   * @return a string suitable for use in registry paths.
+   */
+  public static String encodeYarnID(String yarnId) {
+    return yarnId.replace("_", "-");
+  }
+}


Mime
View raw message