hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vino...@apache.org
Subject svn commit: r1513258 [5/6] - in /hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/tomcat/ROOT/ hadoop-hdfs-nfs/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/m...
Date Mon, 12 Aug 2013 21:26:09 GMT
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Mon Aug 12 21:25:49 2013
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.io.retry.AtMostOnce;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
 
 /**********************************************************************
@@ -81,6 +83,7 @@ public interface DatanodeProtocol {
    * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with
    *  updated registration information
    */
+  @Idempotent
   public DatanodeRegistration registerDatanode(DatanodeRegistration registration
       ) throws IOException;
   
@@ -98,6 +101,7 @@ public interface DatanodeProtocol {
    * @param failedVolumes number of failed volumes
    * @throws IOException on error
    */
+  @Idempotent
   public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
                                        StorageReport[] reports,
                                        int xmitsInProgress,
@@ -120,6 +124,7 @@ public interface DatanodeProtocol {
    * @return - the next command for DN to process.
    * @throws IOException
    */
+  @Idempotent
   public DatanodeCommand blockReport(DatanodeRegistration registration,
       String poolId, StorageBlockReport[] reports) throws IOException;
     
@@ -133,6 +138,7 @@ public interface DatanodeProtocol {
    * writes a new Block here, or another DataNode copies a Block to
    * this DataNode, it will call blockReceived().
    */
+  @Idempotent
   public void blockReceivedAndDeleted(DatanodeRegistration registration,
                             String poolId,
                             StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks)
@@ -142,21 +148,25 @@ public interface DatanodeProtocol {
    * errorReport() tells the NameNode about something that has gone
    * awry.  Useful for debugging.
    */
+  @Idempotent
   public void errorReport(DatanodeRegistration registration,
                           int errorCode, 
                           String msg) throws IOException;
     
+  @Idempotent
   public NamespaceInfo versionRequest() throws IOException;
 
   /**
    * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
    * }
    */
+  @Idempotent
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
   
   /**
    * Commit block synchronization in lease recovery
    */
+  @Idempotent
   public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Mon Aug 12 21:25:49 2013
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.io.retry.AtMostOnce;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
 
 /*****************************************************************************
@@ -73,6 +75,7 @@ public interface NamenodeProtocol {
    * @throws IOException if size is less than or equal to 0 or
                                    datanode does not exist
    */
+  @Idempotent
   public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
   throws IOException;
 
@@ -82,6 +85,7 @@ public interface NamenodeProtocol {
    * @return ExportedBlockKeys containing current block keys
    * @throws IOException 
    */
+  @Idempotent
   public ExportedBlockKeys getBlockKeys() throws IOException;
 
   /**
@@ -90,11 +94,13 @@ public interface NamenodeProtocol {
    * case of a non-active node.
    * @throws IOException
    */
+  @Idempotent
   public long getTransactionID() throws IOException;
 
   /**
    * Get the transaction ID of the most recent checkpoint.
    */
+  @Idempotent
   public long getMostRecentCheckpointTxId() throws IOException;
 
   /**
@@ -103,6 +109,7 @@ public interface NamenodeProtocol {
    * @throws IOException
    * @return a unique token to identify this transaction.
    */
+  @Idempotent
   public CheckpointSignature rollEditLog() throws IOException;
 
   /**
@@ -112,6 +119,7 @@ public interface NamenodeProtocol {
    *          of the name-node
    * @throws IOException
    */
+  @Idempotent
   public NamespaceInfo versionRequest() throws IOException;
 
   /**
@@ -124,6 +132,7 @@ public interface NamenodeProtocol {
    * @param msg free text description of the error
    * @throws IOException
    */
+  @Idempotent
   public void errorReport(NamenodeRegistration registration,
                           int errorCode, 
                           String msg) throws IOException;
@@ -134,6 +143,7 @@ public interface NamenodeProtocol {
    * @return  {@link NamenodeRegistration} of the node,
    *          which this node has just registered with.
    */
+  @Idempotent
   public NamenodeRegistration registerSubordinateNamenode(
       NamenodeRegistration registration) throws IOException;
 
@@ -151,6 +161,7 @@ public interface NamenodeProtocol {
    * @return {@link CheckpointCommand} if checkpoint is allowed.
    * @throws IOException
    */
+  @AtMostOnce
   public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
   throws IOException;
 
@@ -162,6 +173,7 @@ public interface NamenodeProtocol {
    * @param sig {@code CheckpointSignature} which identifies the checkpoint.
    * @throws IOException
    */
+  @AtMostOnce
   public void endCheckpoint(NamenodeRegistration registration,
                             CheckpointSignature sig) throws IOException;
   
@@ -171,6 +183,7 @@ public interface NamenodeProtocol {
    * available to be fetched from the NameNode.
    * @param sinceTxId return only logs that contain transactions >= sinceTxId
    */
+  @Idempotent
   public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
     throws IOException;
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon Aug 12 21:25:49 2013
@@ -47,11 +47,11 @@ import org.apache.hadoop.hdfs.NameNodePr
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
@@ -628,6 +628,7 @@ public class DFSAdmin extends FsShell {
 
     String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
       "\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
+      "\t\t<filename> is overwritten if it exists.\n" +
       "\t\t<filename> will contain one line for each of the following\n" +
       "\t\t\t1. Datanodes heart beating with Namenode\n" +
       "\t\t\t2. Blocks waiting to be replicated\n" +

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Mon Aug 12 21:25:49 2013
@@ -126,7 +126,7 @@ class ImageLoaderCurrent implements Imag
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
       -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-      -40, -41, -42, -43, -44, -45, -46 };
+      -40, -41, -42, -43, -44, -45, -46, -47 };
   private int imageVersion = 0;
   
   private final Map<Long, String> subtreeMap = new HashMap<Long, String>();

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Mon Aug 12 21:25:49 2013
@@ -249,7 +249,7 @@ public class JsonUtil {
     final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
         : INodeId.GRANDFATHER_INODE_ID;
     Long childrenNumLong = (Long) m.get("childrenNum");
-    final int childrenNum = (childrenNumLong == null) ? 0
+    final int childrenNum = (childrenNumLong == null) ? -1
             : childrenNumLong.intValue();
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group,

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1507259
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1503799-1513205

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Mon Aug 12 21:25:49 2013
@@ -54,11 +54,17 @@ message ClientOperationHeaderProto {
   required string clientName = 2;
 }
 
+message CachingStrategyProto {
+  optional bool dropBehind = 1;
+  optional int64 readahead = 2;
+}
+
 message OpReadBlockProto {
   required ClientOperationHeaderProto header = 1;
   required uint64 offset = 2;
   required uint64 len = 3;
   optional bool sendChecksums = 4 [default = true];
+  optional CachingStrategyProto cachingStrategy = 5;
 }
 
 
@@ -100,6 +106,7 @@ message OpWriteBlockProto {
    * The requested checksum mechanism for this block write.
    */
   required ChecksumProto requestedChecksum = 9;
+  optional CachingStrategyProto cachingStrategy = 10;
 }
   
 message OpTransferBlockProto {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Mon Aug 12 21:25:49 2013
@@ -179,7 +179,7 @@ message HdfsFileStatusProto {
 
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
-  optional uint32 childrenNum = 14 [default = 0];
+  optional int32 childrenNum = 14 [default = -1];
 } 
 
 /**

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Aug 12 21:25:49 2013
@@ -1307,4 +1307,88 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.client.cache.drop.behind.writes</name>
+  <value></value>
+  <description>
+    Just like dfs.datanode.drop.cache.behind.writes, this setting causes the
+    page cache to be dropped behind HDFS writes, potentially freeing up more
+    memory for other uses.  Unlike dfs.datanode.drop.cache.behind.writes, this
+    is a client-side setting rather than a setting for the entire datanode.
+    If present, this setting will override the DataNode default.
+
+    If the native libraries are not available to the DataNode, this
+    configuration has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.cache.drop.behind.reads</name>
+  <value></value>
+  <description>
+    Just like dfs.datanode.drop.cache.behind.reads, this setting causes the
+    page cache to be dropped behind HDFS reads, potentially freeing up more
+    memory for other uses.  Unlike dfs.datanode.drop.cache.behind.reads, this
+    is a client-side setting rather than a setting for the entire datanode.  If
+    present, this setting will override the DataNode default.
+
+    If the native libraries are not available to the DataNode, this
+    configuration has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.cache.readahead</name>
+  <value></value>
+  <description>
+    Just like dfs.datanode.readahead.bytes, this setting causes the datanode to
+    read ahead in the block file using posix_fadvise, potentially decreasing
+    I/O wait times.  Unlike dfs.datanode.readahead.bytes, this is a client-side
+    setting rather than a setting for the entire datanode.  If present, this
+    setting will override the DataNode default.
+
+    If the native libraries are not available to the DataNode, this
+    configuration has no effect.
+  </description>
+</property>
+
+<property>
+	<name>dfs.namenode.enable.retrycache</name>
+	<value>true</value>
+	<description>
+	  This enables the retry cache on the namenode. Namenode tracks for
+	  non-idempotent requests the corresponding response. If a client retries the
+	  request, the response from the retry cache is sent. Such operations
+	  are tagged with annotation @AtMostOnce in namenode protocols. It is
+	  recommended that this flag be set to true. Setting it to false, will result
+	  in clients getting failure responses to retried request. This flag must 
+	  be enabled in HA setup for transparent fail-overs.
+	  
+	  The entries in the cache have expiration time configurable
+	  using dfs.namenode.retrycache.expirytime.millis.
+	</description>
+</property>
+
+<property>
+	<name>dfs.namenode.retrycache.expirytime.millis</name>
+	<value>600000</value>
+	<description>
+	  The time for which retry cache entries are retained.
+	</description>
+</property>
+
+<property>
+	<name>dfs.namenode.retrycache.heap.percent</name>
+	<value>0.03f</value>
+	<description>
+	  This parameter configures the heap size allocated for retry cache
+	  (excluding the response cached). This corresponds to approximately
+	  4096 entries for every 64MB of namenode process java heap size.
+	  Assuming retry cache entry expiration time (configured using
+	  dfs.namenode.retrycache.expirytime.millis) of 10 minutes, this
+	  enables retry cache to support 7 operations per second sustained
+	  for 10 minutes. As the heap size is increased, the operation rate
+	  linearly increases.
+	</description>
+</property>
 </configuration>

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1503799-1513205

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1503799-1513205

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css Mon Aug 12 21:25:49 2013
@@ -47,7 +47,6 @@ div#dfsnodetable a#title {
 }
 
 div#dfsnodetable td, th {
-	border-bottom-style : none;
         padding-bottom : 4px;
         padding-top : 4px;       
 }
@@ -103,6 +102,7 @@ table.nodes td {
 div#dfsnodetable td, div#dfsnodetable th, div.dfstable td {
 	padding-left : 10px;
 	padding-right : 10px;
+	border:1px solid black;
 }
 
 td.perc_filled {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Mon Aug 12 21:25:49 2013
@@ -185,6 +185,10 @@ curl -i --negotiate -u : "http://<HOST>:
 curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?delegation=<TOKEN>&op=..."
 +---------------------------------
 
+  []
+ 
+  See also: {{{../hadoop-common/HttpAuthentication.html}Authentication for Hadoop HTTP web-consoles}}
+
 * {Proxy Users}
 
   When the proxy user feature is enabled, a proxy user <P> may submit a request on behalf of another user <U>.
@@ -971,6 +975,12 @@ Transfer-Encoding: chunked
   See {{{http://tools.ietf.org/id/draft-zyp-json-schema-03.html}draft-zyp-json-schema-03}}
   for the syntax definitions of the JSON schemas.
 
+  <<Note>> that the default value of
+  {{{http://tools.ietf.org/id/draft-zyp-json-schema-03.html#additionalProperties}<<<additionalProperties>>>}}
+  is an empty schema which allows any value for additional properties.
+  Therefore, all WebHDFS JSON responses allow any additional property.
+  However, if additional properties are included in the responses, they are
+  considered as optional properties in order to maintain compatibility.
 
 ** {Boolean JSON Schema}
 

Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1503799-1513205
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1507259

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Mon Aug 12 21:25:49 2013
@@ -20,14 +20,18 @@ package org.apache.hadoop.fs;
 import static org.junit.Assert.*;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.*;
 
+import com.google.common.base.Joiner;
+
 public class TestGlobPaths {
   
   static class RegexPathFilter implements PathFilter {
@@ -784,4 +788,265 @@ public class TestGlobPaths {
     fs.delete(new Path(USER_DIR), true);
   }
   
+  /**
+   * A glob test that can be run on either FileContext or FileSystem.
+   */
+  private static interface FSTestWrapperGlobTest {
+    void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception;
+  }
+
+  /**
+   * Run a glob test on FileSystem.
+   */
+  private static void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      FileSystem fs = FileSystem.get(conf);
+      test.run(new FileSystemTestWrapper(fs), fs, null);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Run a glob test on FileContext.
+   */
+  private static void testOnFileContext(FSTestWrapperGlobTest test) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      FileContext fc = FileContext.getFileContext(conf);
+      test.run(new FileContextTestWrapper(fc), null, fc);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Accept all paths.
+   */
+  private static class AcceptAllPathFilter implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      return true;
+    }
+  }
+
+  /**
+   * Accept only paths ending in Z.
+   */
+  private static class AcceptPathsEndingInZ implements PathFilter {
+    @Override
+    public boolean accept(Path path) {
+      String stringPath = path.toUri().getPath();
+      return stringPath.endsWith("z");
+    }
+  }
+
+  /**
+   * Test globbing through symlinks.
+   */
+  private static class TestGlobWithSymlinks implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a directory yields a path
+      // containing that symlink.
+      wrap.mkdir(new Path("/alpha"),
+          FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      wrap.mkdir(new Path("/alphaLink/beta"),
+          FsPermission.getDirDefault(), false);
+      // Test simple glob
+      FileStatus[] statuses =
+          wrap.globStatus(new Path("/alpha/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/beta",
+          statuses[0].getPath().toUri().getPath());
+      // Test glob through symlink
+      statuses =
+          wrap.globStatus(new Path("/alphaLink/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLink/beta",
+          statuses[0].getPath().toUri().getPath());
+      // If the terminal path component in a globbed path is a symlink,
+      // we don't dereference that link.
+      wrap.createSymlink(new Path("beta"), new Path("/alphaLink/betaLink"),
+          false);
+      statuses = wrap.globStatus(new Path("/alpha/betaLi*"),
+          new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/betaLink",
+          statuses[0].getPath().toUri().getPath());
+      // todo: test symlink-to-symlink-to-dir, etc.
+    }
+  }
+
+  @Test
+  public void testGlobWithSymlinksOnFS() throws Exception {
+    testOnFileSystem(new TestGlobWithSymlinks());
+  }
+
+  @Test
+  public void testGlobWithSymlinksOnFC() throws Exception {
+    testOnFileContext(new TestGlobWithSymlinks());
+  }
+
+  /**
+   * Test globbing symlinks to symlinks.
+   *
+   * Also test globbing dangling symlinks.  It should NOT throw any exceptions!
+   */
+  private static class TestGlobWithSymlinksToSymlinks
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a symlink to a directory
+      // fully resolves
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      wrap.createSymlink(new Path("/alphaLink"),
+          new Path("/alphaLinkLink"), false);
+      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
+      // Test glob through symlink to a symlink to a directory
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alphaLinkLink"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkLink",
+          statuses[0].getPath().toUri().getPath());
+      statuses =
+          wrap.globStatus(new Path("/alphaLinkLink/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkLink/beta",
+          statuses[0].getPath().toUri().getPath());
+      // Test glob of dangling symlink (theta does not actually exist)
+      wrap.createSymlink(new Path("theta"), new Path("/alpha/kappa"), false);
+      statuses = wrap.globStatus(new Path("/alpha/kappa/kappa"),
+              new AcceptAllPathFilter());
+      Assert.assertNull(statuses);
+      // Test glob of symlinks
+      wrap.createFile("/alpha/beta/gamma");
+      wrap.createSymlink(new Path("gamma"),
+          new Path("/alpha/beta/gammaLink"), false);
+      wrap.createSymlink(new Path("gammaLink"),
+          new Path("/alpha/beta/gammaLinkLink"), false);
+      wrap.createSymlink(new Path("gammaLinkLink"),
+          new Path("/alpha/beta/gammaLinkLinkLink"), false);
+      statuses = wrap.globStatus(new Path("/alpha/*/gammaLinkLinkLink"),
+              new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alpha/beta/gammaLinkLinkLink",
+          statuses[0].getPath().toUri().getPath());
+      statuses = wrap.globStatus(new Path("/alpha/beta/*"),
+              new AcceptAllPathFilter());
+      Assert.assertEquals("/alpha/beta/gamma;/alpha/beta/gammaLink;" +
+          "/alpha/beta/gammaLinkLink;/alpha/beta/gammaLinkLinkLink",
+          TestPath.mergeStatuses(statuses));
+      // Let's create two symlinks that point to each other, and glob on them.
+      wrap.createSymlink(new Path("tweedledee"),
+          new Path("/tweedledum"), false);
+      wrap.createSymlink(new Path("tweedledum"),
+          new Path("/tweedledee"), false);
+      statuses = wrap.globStatus(new Path("/tweedledee/unobtainium"),
+              new AcceptAllPathFilter());
+      Assert.assertNull(statuses);
+    }
+  }
+
+  @Test
+  public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
+    testOnFileSystem(new TestGlobWithSymlinksToSymlinks());
+  }
+
+  @Test
+  public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
+    testOnFileContext(new TestGlobWithSymlinksToSymlinks());
+  }
+
+  /**
+   * Test globbing symlinks with a custom PathFilter
+   */
+  private static class TestGlobSymlinksWithCustomPathFilter
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+        throws Exception {
+      // Test that globbing through a symlink to a symlink to a directory
+      // fully resolves
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLinkz"), false);
+      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
+      wrap.mkdir(new Path("/alpha/betaz"), FsPermission.getDirDefault(), false);
+      // Test glob through symlink to a symlink to a directory, with a PathFilter
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alpha/beta"), new AcceptPathsEndingInZ());
+      Assert.assertNull(statuses);
+      statuses =
+          wrap.globStatus(new Path("/alphaLinkz/betaz"), new AcceptPathsEndingInZ());
+      Assert.assertEquals(1, statuses.length);
+      Assert.assertEquals("/alphaLinkz/betaz",
+          statuses[0].getPath().toUri().getPath());
+      statuses =
+          wrap.globStatus(new Path("/*/*"), new AcceptPathsEndingInZ());
+      Assert.assertEquals("/alpha/betaz;/alphaLinkz/betaz",
+          TestPath.mergeStatuses(statuses));
+      statuses =
+          wrap.globStatus(new Path("/*/*"), new AcceptAllPathFilter());
+      Assert.assertEquals("/alpha/beta;/alpha/betaz;" +
+          "/alphaLinkz/beta;/alphaLinkz/betaz",
+          TestPath.mergeStatuses(statuses));
+    }
+  }
+
+  @Test
+  public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
+    testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter());
+  }
+
+  @Test
+  public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
+    testOnFileContext(new TestGlobSymlinksWithCustomPathFilter());
+  }
+
+  /**
+   * Test that globStatus fills in the scheme even when it is not provided.
+   */
+  private static class TestGlobFillsInScheme
+      implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) 
+        throws Exception {
+      // Verify that the default scheme is hdfs, when we don't supply one.
+      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
+      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
+      FileStatus statuses[] =
+          wrap.globStatus(new Path("/alphaLink"), new AcceptAllPathFilter());
+      Assert.assertEquals(1, statuses.length);
+      Path path = statuses[0].getPath();
+      Assert.assertEquals("/alphaLink", path.toUri().getPath());
+      Assert.assertEquals("hdfs", path.toUri().getScheme());
+      if (fc != null) {
+        // If we're using FileContext, then we can list a file:/// URI.
+        // Since everyone should have the root directory, we list that.
+        statuses =
+            wrap.globStatus(new Path("file:///"), new AcceptAllPathFilter());
+        Assert.assertEquals(1, statuses.length);
+        Path filePath = statuses[0].getPath();
+        Assert.assertEquals("file", filePath.toUri().getScheme());
+        Assert.assertEquals("/", filePath.toUri().getPath());
+      } else {
+        // The FileSystem we passed in should have scheme 'hdfs'
+        Assert.assertEquals("hdfs", fs.getScheme());
+      }
+    }
+  }
+
+  @Test
+  public void testGlobFillsInSchemeOnFS() throws Exception {
+    testOnFileSystem(new TestGlobFillsInScheme());
+  }
+
+  @Test
+  public void testGlobFillsInSchemeOnFC() throws Exception {
+    testOnFileContext(new TestGlobFillsInScheme());
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Mon Aug 12 21:25:49 2013
@@ -24,8 +24,10 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -69,7 +71,11 @@ public class TestViewFileSystemHdfs exte
     
     fHdfs = cluster.getFileSystem(0);
     fHdfs2 = cluster.getFileSystem(1);
-    
+    fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        FsConstants.VIEWFS_URI.toString());
+    fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        FsConstants.VIEWFS_URI.toString());
+
     defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Mon Aug 12 21:25:49 2013
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.net.NetUtils;
 
@@ -155,7 +156,7 @@ public class BlockReaderTestUtil {
       testBlock.getBlockToken(), 
       offset, lenToRead,
       true, "BlockReaderTestUtil", TcpPeerServer.peerFromSocket(sock),
-      nodes[0], null, null, null, false);
+      nodes[0], null, null, null, false, CachingStrategy.newDefaultStrategy());
   }
 
   /**

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Aug 12 21:25:49 2013
@@ -57,8 +57,11 @@ import org.apache.hadoop.fs.BlockLocatio
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -942,4 +945,102 @@ public class DFSTestUtil {
       return new DFSTestUtil(nFiles, maxLevels, maxSize, minSize);
     }
   }
+  
+  /**
+   * Run a set of operations and generate all edit logs
+   */
+  public static void runOperations(MiniDFSCluster cluster,
+      DistributedFileSystem filesystem, Configuration conf, long blockSize, 
+      int nnIndex) throws IOException {
+    // create FileContext for rename2
+    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
+    
+    // OP_ADD 0
+    final Path pathFileCreate = new Path("/file_create");
+    FSDataOutputStream s = filesystem.create(pathFileCreate);
+    // OP_CLOSE 9
+    s.close();
+    // OP_RENAME_OLD 1
+    final Path pathFileMoved = new Path("/file_moved");
+    filesystem.rename(pathFileCreate, pathFileMoved);
+    // OP_DELETE 2
+    filesystem.delete(pathFileMoved, false);
+    // OP_MKDIR 3
+    Path pathDirectoryMkdir = new Path("/directory_mkdir");
+    filesystem.mkdirs(pathDirectoryMkdir);
+    // OP_ALLOW_SNAPSHOT 29
+    filesystem.allowSnapshot(pathDirectoryMkdir);
+    // OP_DISALLOW_SNAPSHOT 30
+    filesystem.disallowSnapshot(pathDirectoryMkdir);
+    // OP_CREATE_SNAPSHOT 26
+    String ssName = "snapshot1";
+    filesystem.allowSnapshot(pathDirectoryMkdir);
+    filesystem.createSnapshot(pathDirectoryMkdir, ssName);
+    // OP_RENAME_SNAPSHOT 28
+    String ssNewName = "snapshot2";
+    filesystem.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
+    // OP_DELETE_SNAPSHOT 27
+    filesystem.deleteSnapshot(pathDirectoryMkdir, ssNewName);
+    // OP_SET_REPLICATION 4
+    s = filesystem.create(pathFileCreate);
+    s.close();
+    filesystem.setReplication(pathFileCreate, (short)1);
+    // OP_SET_PERMISSIONS 7
+    Short permission = 0777;
+    filesystem.setPermission(pathFileCreate, new FsPermission(permission));
+    // OP_SET_OWNER 8
+    filesystem.setOwner(pathFileCreate, new String("newOwner"), null);
+    // OP_CLOSE 9 see above
+    // OP_SET_GENSTAMP 10 see above
+    // OP_SET_NS_QUOTA 11 obsolete
+    // OP_CLEAR_NS_QUOTA 12 obsolete
+    // OP_TIMES 13
+    long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
+    long atime = mtime;
+    filesystem.setTimes(pathFileCreate, mtime, atime);
+    // OP_SET_QUOTA 14
+    filesystem.setQuota(pathDirectoryMkdir, 1000L, 
+        HdfsConstants.QUOTA_DONT_SET);
+    // OP_RENAME 15
+    fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
+    // OP_CONCAT_DELETE 16
+    Path   pathConcatTarget = new Path("/file_concat_target");
+    Path[] pathConcatFiles  = new Path[2];
+    pathConcatFiles[0]      = new Path("/file_concat_0");
+    pathConcatFiles[1]      = new Path("/file_concat_1");
+
+    long length = blockSize * 3; // multiple of blocksize for concat
+    short replication = 1;
+    long seed = 1;
+    DFSTestUtil.createFile(filesystem, pathConcatTarget, length, replication,
+        seed);
+    DFSTestUtil.createFile(filesystem, pathConcatFiles[0], length, replication,
+        seed);
+    DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication,
+        seed);
+    filesystem.concat(pathConcatTarget, pathConcatFiles);
+    
+    // OP_SYMLINK 17
+    Path pathSymlink = new Path("/file_symlink");
+    fc.createSymlink(pathConcatTarget, pathSymlink, false);
+    
+    // OP_REASSIGN_LEASE 22
+    String filePath = "/hard-lease-recovery-test";
+    byte[] bytes = "foo-bar-baz".getBytes();
+    DFSClientAdapter.stopLeaseRenewer(filesystem);
+    FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
+    leaseRecoveryPath.write(bytes);
+    leaseRecoveryPath.hflush();
+    // Set the hard lease timeout to 1 second.
+    cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
+    // wait for lease recovery to complete
+    LocatedBlocks locatedBlocks;
+    do {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      locatedBlocks = DFSClientAdapter.callGetBlockLocations(
+          cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
+    } while (locatedBlocks.isUnderConstruction());
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Aug 12 21:25:49 2013
@@ -2038,6 +2038,10 @@ public class MiniDFSCluster {
     NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard);
   }
   
+  public void setLeasePeriod(long soft, long hard, int nnIndex) {
+    NameNodeAdapter.setLeasePeriod(getNamesystem(nnIndex), soft, hard);
+  }
+  
   public void setWaitSafeMode(boolean wait) {
     this.waitSafeMode = wait;
   }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Mon Aug 12 21:25:49 2013
@@ -21,11 +21,12 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -45,7 +46,7 @@ public class TestAbandonBlock {
   static final String FILE_NAME_PREFIX
       = "/" + TestAbandonBlock.class.getSimpleName() + "_"; 
   private MiniDFSCluster cluster;
-  private FileSystem fs;
+  private DistributedFileSystem fs;
 
   @Before
   public void setUp() throws Exception {
@@ -73,29 +74,34 @@ public class TestAbandonBlock {
     fout.hflush();
 
     // Now abandon the last block
-    DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
+    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
     LocatedBlocks blocks =
       dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
     int orginalNumBlocks = blocks.locatedBlockCount();
     LocatedBlock b = blocks.getLastLocatedBlock();
-    dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);
+    dfsclient.getNamenode().abandonBlock(b.getBlock(), src,
+        dfsclient.clientName);
+    
+    // call abandonBlock again to make sure the operation is idempotent
+    dfsclient.getNamenode().abandonBlock(b.getBlock(), src,
+        dfsclient.clientName);
 
     // And close the file
     fout.close();
 
     // Close cluster and check the block has been abandoned after restart
     cluster.restartNameNode();
-    blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
-    assert orginalNumBlocks == blocks.locatedBlockCount() + 1 :
-      "Blocks " + b + " has not been abandoned.";
+    blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
+        Integer.MAX_VALUE);
+    Assert.assertEquals("Blocks " + b + " has not been abandoned.",
+        orginalNumBlocks, blocks.locatedBlockCount() + 1);
   }
 
   @Test
   /** Make sure that the quota is decremented correctly when a block is abandoned */
   public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
-    DistributedFileSystem dfs = (DistributedFileSystem)fs;
     // Setting diskspace quota to 3MB
-    dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
+    fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
 
     // Start writing a file with 2 replicas to ensure each datanode has one.
     // Block Size is 1MB.

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Mon Aug 12 21:25:49 2013
@@ -17,17 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.PrintWriter;
+import java.io.*;
 import java.security.Permission;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -42,10 +32,7 @@ import java.util.zip.GZIPOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSInputChecker;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -63,6 +50,9 @@ import org.apache.hadoop.util.ToolRunner
 import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.*;
 
 /**
  * This class tests commands from DFSShell.
@@ -101,6 +91,18 @@ public class TestDFSShell {
     return f;
   }
 
+  static File createLocalFileWithRandomData(int fileLength, File f)
+      throws IOException {
+    assertTrue(!f.exists());
+    f.createNewFile();
+    FileOutputStream out = new FileOutputStream(f.toString());
+    byte[] buffer = new byte[fileLength];
+    out.write(buffer);
+    out.flush();
+    out.close();
+    return f;
+  }
+
   static void show(String s) {
     System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
   }
@@ -1732,6 +1734,85 @@ public class TestDFSShell {
     }
   }
 
+
+  @Test (timeout = 300000)
+  public void testAppendToFile() throws Exception {
+    final int inputFileLength = 1024 * 1024;
+    File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
+    testRoot.mkdirs();
+
+    File file1 = new File(testRoot, "file1");
+    File file2 = new File(testRoot, "file2");
+    createLocalFileWithRandomData(inputFileLength, file1);
+    createLocalFileWithRandomData(inputFileLength, file2);
+
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    try {
+      FileSystem dfs = cluster.getFileSystem();
+      assertTrue("Not a HDFS: " + dfs.getUri(),
+                 dfs instanceof DistributedFileSystem);
+
+      // Run appendToFile once, make sure that the target file is
+      // created and is of the right size.
+      Path remoteFile = new Path("/remoteFile");
+      FsShell shell = new FsShell();
+      shell.setConf(conf);
+      String[] argv = new String[] {
+          "-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
+      int res = ToolRunner.run(shell, argv);
+      assertThat(res, is(0));
+      assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2));
+
+      // Run the command once again and make sure that the target file
+      // size has been doubled.
+      res = ToolRunner.run(shell, argv);
+      assertThat(res, is(0));
+      assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @Test (timeout = 300000)
+  public void testAppendToFileBadArgs() throws Exception {
+    final int inputFileLength = 1024 * 1024;
+    File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
+    testRoot.mkdirs();
+
+    File file1 = new File(testRoot, "file1");
+    createLocalFileWithRandomData(inputFileLength, file1);
+
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    try {
+      FileSystem dfs = cluster.getFileSystem();
+      assertTrue("Not a HDFS: " + dfs.getUri(),
+                 dfs instanceof DistributedFileSystem);
+
+      // Run appendToFile with insufficient arguments.
+      FsShell shell = new FsShell();
+      shell.setConf(conf);
+      String[] argv = new String[] {
+          "-appendToFile", file1.toString() };
+      int res = ToolRunner.run(shell, argv);
+      assertThat(res, not(0));
+
+      // Mix stdin with other input files. Must fail.
+      Path remoteFile = new Path("/remoteFile");
+      argv = new String[] {
+          "-appendToFile", file1.toString(), "-", remoteFile.toString() };
+      res = ToolRunner.run(shell, argv);
+      assertThat(res, not(0));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Test that the server trash configuration is respected when
    * the client configuration is not set.

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Mon Aug 12 21:25:49 2013
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -194,7 +195,7 @@ public class TestDataTransferProtocol {
     sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null, stage,
         0, block.getNumBytes(), block.getNumBytes(), newGS,
-        DEFAULT_CHECKSUM);
+        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
     if (eofExcepted) {
       sendResponse(Status.ERROR, null, null, recvOut);
       sendRecvData(description, true);
@@ -391,7 +392,7 @@ public class TestDataTransferProtocol {
         new DatanodeInfo[1], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE,
         0, 0L, 0L, 0L,
-        badChecksum);
+        badChecksum, CachingStrategy.newDefaultStrategy());
     recvBuf.reset();
     sendResponse(Status.ERROR, null, null, recvOut);
     sendRecvData("wrong bytesPerChecksum while writing", true);
@@ -402,7 +403,7 @@ public class TestDataTransferProtocol {
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
-        DEFAULT_CHECKSUM);
+        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
 
     PacketHeader hdr = new PacketHeader(
       4,     // size of packet
@@ -425,7 +426,7 @@ public class TestDataTransferProtocol {
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
-        DEFAULT_CHECKSUM);
+        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
 
     hdr = new PacketHeader(
       8,     // size of packet
@@ -452,21 +453,21 @@ public class TestDataTransferProtocol {
     recvBuf.reset();
     blk.setBlockId(blkid-1);
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen, true);
+        0L, fileLen, true, CachingStrategy.newDefaultStrategy());
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
     // negative block start offset -1L
     sendBuf.reset();
     blk.setBlockId(blkid);
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        -1L, fileLen, true);
+        -1L, fileLen, true, CachingStrategy.newDefaultStrategy());
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
 
     // bad block start offset
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        fileLen, fileLen, true);
+        fileLen, fileLen, true, CachingStrategy.newDefaultStrategy());
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -483,7 +484,8 @@ public class TestDataTransferProtocol {
     
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, -1L-random.nextInt(oneMil), true);
+        0L, -1L-random.nextInt(oneMil), true,
+        CachingStrategy.newDefaultStrategy());
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -496,14 +498,14 @@ public class TestDataTransferProtocol {
         recvOut);
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen+1, true);
+        0L, fileLen+1, true, CachingStrategy.newDefaultStrategy());
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
     
     //At the end of all this, read the file to make sure that succeeds finally.
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        0L, fileLen, true);
+        0L, fileLen, true, CachingStrategy.newDefaultStrategy());
     readFile(fileSys, file, fileLen);
     } finally {
       cluster.shutdown();

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Mon Aug 12 21:25:49 2013
@@ -306,8 +306,8 @@ public class TestHftpFileSystem {
   @Test
   public void testHftpCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
-    conf.setInt("dfs.http.port", 123);
-    conf.setInt("dfs.https.port", 456);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hftp://localhost");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@@ -341,8 +341,8 @@ public class TestHftpFileSystem {
   @Test
   public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
-    conf.setInt("dfs.http.port", 123);
-    conf.setInt("dfs.https.port", 456);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hftp://localhost:789");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@@ -378,8 +378,8 @@ public class TestHftpFileSystem {
   @Test
   public void testHsftpCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
-    conf.setInt("dfs.http.port", 123);
-    conf.setInt("dfs.https.port", 456);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hsftp://localhost");
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
@@ -413,8 +413,8 @@ public class TestHftpFileSystem {
   @Test
   public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
-    conf.setInt("dfs.http.port", 123);
-    conf.setInt("dfs.https.port", 456);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hsftp://localhost:789");
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Mon Aug 12 21:25:49 2013
@@ -27,12 +27,9 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -40,7 +37,10 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
+import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Mon Aug 12 21:25:49 2013
@@ -34,8 +34,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
-import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -57,6 +56,7 @@ import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import static org.hamcrest.CoreMatchers.*;
 
 /**
@@ -180,7 +180,7 @@ public class TestShortCircuitLocalRead {
       assertTrue(fs.getClient().useLegacyBlockReaderLocal());
     }
     
-    DFSDataInputStream stm = (DFSDataInputStream)fs.open(name);
+    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
     ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Mon Aug 12 21:25:49 2013
@@ -80,9 +80,11 @@ public class TestClientProtocolWithDeleg
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
         3600000, mockNameSys);
     sm.startThreads();
-    final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS,
-        0, 5, true, conf, sm);
-
+    final Server server = new RPC.Builder(conf)
+        .setProtocol(ClientProtocol.class).setInstance(mockNN)
+        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+        .setSecretManager(sm).build();
+    
     server.start();
 
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Mon Aug 12 21:25:49 2013
@@ -60,24 +60,26 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.log4j.Level;
-import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestDelegationTokenForProxyUser {
-  private MiniDFSCluster cluster;
-  Configuration config;
+  private static MiniDFSCluster cluster;
+  private static Configuration config;
   final private static String GROUP1_NAME = "group1";
   final private static String GROUP2_NAME = "group2";
   final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
       GROUP2_NAME };
   final private static String REAL_USER = "RealUser";
   final private static String PROXY_USER = "ProxyUser";
+  private static UserGroupInformation ugi;
+  private static UserGroupInformation proxyUgi;
   
   private static final Log LOG = LogFactory.getLog(TestDoAsEffectiveUser.class);
   
-  private void configureSuperUserIPAddresses(Configuration conf,
+  private static void configureSuperUserIPAddresses(Configuration conf,
       String superUserShortName) throws IOException {
     ArrayList<String> ipList = new ArrayList<String>();
     Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
@@ -102,8 +104,8 @@ public class TestDelegationTokenForProxy
         builder.toString());
   }
   
-  @Before
-  public void setUp() throws Exception {
+  @BeforeClass
+  public static void setUp() throws Exception {
     config = new HdfsConfiguration();
     config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(
@@ -119,21 +121,20 @@ public class TestDelegationTokenForProxy
     cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
+    ugi = UserGroupInformation.createRemoteUser(REAL_USER);
+    proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
+        GROUP_NAMES);
   }
 
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
     }
   }
  
-  @Test
+  @Test(timeout=20000)
   public void testDelegationTokenWithRealUser() throws IOException {
-    UserGroupInformation ugi = UserGroupInformation
-        .createRemoteUser(REAL_USER);
-    final UserGroupInformation proxyUgi = UserGroupInformation
-        .createProxyUserForTesting(PROXY_USER, ugi, GROUP_NAMES);
     try {
       Token<?>[] tokens = proxyUgi
           .doAs(new PrivilegedExceptionAction<Token<?>[]>() {
@@ -154,12 +155,11 @@ public class TestDelegationTokenForProxy
     }
   }
   
-  @Test
+  @Test(timeout=20000)
   public void testWebHdfsDoAs() throws Exception {
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
     ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
-    final UserGroupInformation ugi = UserGroupInformation.createRemoteUser(REAL_USER);
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
     final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
     

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Mon Aug 12 21:25:49 2013
@@ -232,8 +232,9 @@ public class TestBlockToken {
         ProtobufRpcEngine.class);
     BlockingService service = ClientDatanodeProtocolService
         .newReflectiveBlockingService(mockDN);
-    return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
-        true, conf, sm);
+    return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
+        .setInstance(service).setBindAddress(ADDRESS).setPort(0)
+        .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
   }
 
   @Test

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Mon Aug 12 21:25:49 2013
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.NetUtils;
@@ -148,7 +149,8 @@ public class TestBlockTokenWithDFS {
       blockReader = BlockReaderFactory.newBlockReader(
           new DFSClient.Conf(conf), file, block, lblock.getBlockToken(), 0, -1,
           true, "TestBlockTokenWithDFS", TcpPeerServer.peerFromSocket(s),
-          nodes[0], null, null, null, false);
+          nodes[0], null, null, null, false,
+          CachingStrategy.newDefaultStrategy());
 
     } catch (IOException ex) {
       if (ex instanceof InvalidBlockTokenException) {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java Mon Aug 12 21:25:49 2013
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.bl
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.util.ArrayList;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -28,13 +30,21 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.junit.Test;
 
+import com.google.common.base.Preconditions;
+
 /**
  * This class tests the internals of PendingReplicationBlocks.java,
  * as well as how PendingReplicationBlocks acts in BlockManager
@@ -44,7 +54,22 @@ public class TestPendingReplication {
   private static final int DFS_REPLICATION_INTERVAL = 1;
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 5;
+  
+  private DatanodeDescriptor genDatanodeId(int seed) {
+    seed = seed % 256;
+    String ip = seed + "." + seed + "." + seed + "." + seed;
+    return DFSTestUtil.getDatanodeDescriptor(ip, null);
+  }
 
+  private DatanodeDescriptor[] genDatanodes(int number) {
+    Preconditions.checkArgument(number >= 0);
+    DatanodeDescriptor[] nodes = new DatanodeDescriptor[number];
+    for (int i = 0; i < number; i++) {
+      nodes[i] = genDatanodeId(i);
+    }
+    return nodes;
+  }
+  
   @Test
   public void testPendingReplication() {
     PendingReplicationBlocks pendingReplications;
@@ -56,7 +81,7 @@ public class TestPendingReplication {
     //
     for (int i = 0; i < 10; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.increment(block, i);
+      pendingReplications.increment(block, genDatanodes(i));
     }
     assertEquals("Size of pendingReplications ",
                  10, pendingReplications.size());
@@ -66,15 +91,16 @@ public class TestPendingReplication {
     // remove one item and reinsert it
     //
     Block blk = new Block(8, 8, 0);
-    pendingReplications.decrement(blk);             // removes one replica
+    pendingReplications.decrement(blk, genDatanodeId(7)); // removes one replica
     assertEquals("pendingReplications.getNumReplicas ",
                  7, pendingReplications.getNumReplicas(blk));
 
     for (int i = 0; i < 7; i++) {
-      pendingReplications.decrement(blk);           // removes all replicas
+      // removes all replicas
+      pendingReplications.decrement(blk, genDatanodeId(i));
     }
     assertTrue(pendingReplications.size() == 9);
-    pendingReplications.increment(blk, 8);
+    pendingReplications.increment(blk, genDatanodes(8));
     assertTrue(pendingReplications.size() == 10);
 
     //
@@ -102,7 +128,7 @@ public class TestPendingReplication {
 
     for (int i = 10; i < 15; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.increment(block, i);
+      pendingReplications.increment(block, genDatanodes(i));
     }
     assertTrue(pendingReplications.size() == 15);
 
@@ -134,6 +160,101 @@ public class TestPendingReplication {
   }
   
   /**
+   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
+   * pending replications. Also make sure the blockReceivedAndDeleted call is
+   * idempotent to the pending replications. 
+   */
+  @Test
+  public void testBlockReceived() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+          DATANODE_COUNT).build();
+      cluster.waitActive();
+
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+      FSNamesystem fsn = cluster.getNamesystem();
+      BlockManager blkManager = fsn.getBlockManager();
+    
+      final String file = "/tmp.txt";
+      final Path filePath = new Path(file);
+      short replFactor = 1;
+      DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
+
+      // temporarily stop the heartbeat
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      for (int i = 0; i < DATANODE_COUNT; i++) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
+      }
+
+      hdfs.setReplication(filePath, (short) DATANODE_COUNT);
+      BlockManagerTestUtil.computeAllPendingWork(blkManager);
+
+      assertEquals(1, blkManager.pendingReplications.size());
+      INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
+      Block[] blocks = fileNode.getBlocks();
+      assertEquals(DATANODE_COUNT - 1,
+          blkManager.pendingReplications.getNumReplicas(blocks[0]));
+
+      LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
+          .get(0);
+      DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
+      int reportDnNum = 0;
+      String poolId = cluster.getNamesystem().getBlockPoolId();
+      // let two datanodes (other than the one that already has the data) to
+      // report to NN
+      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
+        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
+          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
+              poolId);
+          StorageReceivedDeletedBlocks[] report = { 
+              new StorageReceivedDeletedBlocks(dnR.getStorageID(),
+              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
+                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
+          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
+          reportDnNum++;
+        }
+      }
+
+      assertEquals(DATANODE_COUNT - 3,
+          blkManager.pendingReplications.getNumReplicas(blocks[0]));
+
+      // let the same datanodes report again
+      for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
+        if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
+          DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
+              poolId);
+          StorageReceivedDeletedBlocks[] report = 
+            { new StorageReceivedDeletedBlocks(dnR.getStorageID(),
+              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
+                  blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
+          cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
+          reportDnNum++;
+        }
+      }
+
+      assertEquals(DATANODE_COUNT - 3,
+          blkManager.pendingReplications.getNumReplicas(blocks[0]));
+
+      // re-enable heartbeat for the datanode that has data
+      for (int i = 0; i < DATANODE_COUNT; i++) {
+        DataNodeTestUtils
+            .setHeartbeatsDisabledForTests(datanodes.get(i), false);
+        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+      }
+
+      Thread.sleep(5000);
+      assertEquals(0, blkManager.pendingReplications.size());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  /**
    * Test if BlockManager can correctly remove corresponding pending records
    * when a file is deleted
    * 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java Mon Aug 12 21:25:49 2013
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.junit.Test;
 
 public class TestUnderReplicatedBlocks {
-  @Test(timeout=300000) // 5 min timeout
+  @Test(timeout=60000) // 1 min timeout
   public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
     Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = 2;
@@ -49,6 +49,7 @@ public class TestUnderReplicatedBlocks {
       ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next();
       bm.addToInvalidates(b.getLocalBlock(), dn);
+      Thread.sleep(5000);
       bm.blocksMap.removeNode(b.getLocalBlock(), dn);
       
       // increment this file's replication factor

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Mon Aug 12 21:25:49 2013
@@ -287,7 +287,8 @@ public class TestDataNodeVolumeFailure {
     BlockReader blockReader =
       BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf), file, block,
         lblock.getBlockToken(), 0, -1, true, "TestDataNodeVolumeFailure",
-        TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false);
+        TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false,
+        CachingStrategy.newDefaultStrategy());
     blockReader.close();
   }
   

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Mon Aug 12 21:25:49 2013
@@ -148,7 +148,7 @@ public class TestDiskError {
         BlockTokenSecretManager.DUMMY_TOKEN, "",
         new DatanodeInfo[0], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
-        checksum);
+        checksum, CachingStrategy.newDefaultStrategy());
     out.flush();
 
     // close the connection before sending the content of the block



Mime
View raw message