hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1539898 [1/3] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/ hadoop-hdfs-nfs/src/test/java/org/apache/had...
Date Fri, 08 Nov 2013 01:44:26 GMT
Author: arp
Date: Fri Nov  8 01:44:24 2013
New Revision: 1539898

URL: http://svn.apache.org/r1539898
Log:
Merging r1539737 through r1539896 from trunk to branch HDFS-2832

Added:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
      - copied unchanged from r1539896, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
      - copied unchanged from r1539896, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
Removed:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java
Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1539245-1539896

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java Fri Nov  8 01:44:24 2013
@@ -23,33 +23,47 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
+import org.apache.hadoop.mount.MountdBase;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgramNfs3}.
  * Currently Mountd program is also started inside this class.
  * Only TCP server is supported and UDP is not supported.
  */
 public class Nfs3 extends Nfs3Base {
+  private Mountd mountd;
+  
   static {
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
   }
   
   public Nfs3(List<String> exports) throws IOException {
-    super(new Mountd(exports), new RpcProgramNfs3());
+    super(new RpcProgramNfs3());
+    mountd = new Mountd(exports);
   }
 
+  @VisibleForTesting
   public Nfs3(List<String> exports, Configuration config) throws IOException {
-    super(new Mountd(exports, config), new RpcProgramNfs3(config), config);
+    super(new RpcProgramNfs3(config), config);
+    mountd = new Mountd(exports, config);
   }
 
+  public Mountd getMountd() {
+    return mountd;
+  }
+  
   public static void main(String[] args) throws IOException {
     StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
     List<String> exports = new ArrayList<String>();
     exports.add("/");
+    
     final Nfs3 nfsServer = new Nfs3(exports);
+    nfsServer.mountd.start(true); // Start mountd
     nfsServer.start(true);
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java Fri Nov  8 01:44:24 2013
@@ -109,6 +109,12 @@ public class Nfs3Utils {
    * Send a write response to the netty network socket channel
    */
   public static void writeChannel(Channel channel, XDR out, int xid) {
+    if (channel == null) {
+      RpcProgramNfs3.LOG
+          .info("Null channel should only happen in tests. Do nothing.");
+      return;
+    }
+    
     if (RpcProgramNfs3.LOG.isDebugEnabled()) {
       RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Fri Nov  8 01:44:24 2013
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
-import java.security.InvalidParameterException;
 import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.Map.Entry;
@@ -96,7 +95,7 @@ class OpenFileCtx {
   
   // It's updated after each sync to HDFS
   private Nfs3FileAttributes latestAttr;
-
+  
   private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites;
   
   private final ConcurrentNavigableMap<Long, CommitCtx> pendingCommits;
@@ -165,10 +164,22 @@ class OpenFileCtx {
     return System.currentTimeMillis() - lastAccessTime > streamTimeout;
   }
   
+  long getLastAccessTime() {
+    return lastAccessTime;  
+  }
+  
   public long getNextOffset() {
     return nextOffset.get();
   }
   
+  boolean getActiveState() {
+    return this.activeState;
+  }
+  
+  boolean hasPendingWork() {
+    return (pendingWrites.size() != 0 || pendingCommits.size() != 0);
+  }
+  
   // Increase or decrease the memory occupation of non-sequential writes
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
@@ -792,19 +803,18 @@ class OpenFileCtx {
    * @return true, remove stream; false, keep stream
    */
   public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
-    if (streamTimeout < WriteManager.MINIMIUM_STREAM_TIMEOUT) {
-      throw new InvalidParameterException("StreamTimeout" + streamTimeout
-          + "ms is less than MINIMIUM_STREAM_TIMEOUT "
-          + WriteManager.MINIMIUM_STREAM_TIMEOUT + "ms");
+    Preconditions
+        .checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+    if (!activeState) {
+      return true;
     }
     
     boolean flag = false;
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("closing stream for fileId:" + fileId);
+        LOG.debug("stream can be closed for fileId:" + fileId);
       }
-      cleanup();
       flag = true;
     }
     return flag;
@@ -975,7 +985,7 @@ class OpenFileCtx {
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
-          + offset + " length:" + count + " stableHow:" + stableHow.getValue());
+          + offset + " length:" + count + " stableHow:" + stableHow.name());
     }
 
     try {
@@ -1007,6 +1017,23 @@ class OpenFileCtx {
       }
       
       if (!writeCtx.getReplied()) {
+        if (stableHow != WriteStableHow.UNSTABLE) {
+          LOG.info("Do sync for stable write:" + writeCtx);
+          try {
+            if (stableHow == WriteStableHow.DATA_SYNC) {
+              fos.hsync();
+            } else {
+              Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
+                  "Unknown WriteStableHow:" + stableHow);
+              // Sync file data and length
+              fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+            }
+          } catch (IOException e) {
+            LOG.error("hsync failed with writeCtx:" + writeCtx + " error:" + e);
+            throw e;
+          }
+        }
+        
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
@@ -1039,7 +1066,7 @@ class OpenFileCtx {
     }
   }
 
-  private synchronized void cleanup() {
+  synchronized void cleanup() {
     if (!activeState) {
       LOG.info("Current OpenFileCtx is already inactive, no need to cleanup.");
       return;
@@ -1047,7 +1074,7 @@ class OpenFileCtx {
     activeState = false;
 
     // stop the dump thread
-    if (dumpThread != null) {
+    if (dumpThread != null && dumpThread.isAlive()) {
       dumpThread.interrupt();
       try {
         dumpThread.join(3000);
@@ -1129,4 +1156,10 @@ class OpenFileCtx {
   void setActiveStatusForTest(boolean activeState) {
     this.activeState = activeState;
   }
+  
+  @Override
+  public String toString() {
+    return String.format("activeState: %b asyncStatus: %b nextOffset: %d",
+        activeState, asyncStatus, nextOffset.get());
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Fri Nov  8 01:44:24 2013
@@ -126,6 +126,8 @@ import org.jboss.netty.buffer.ChannelBuf
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
  */
@@ -212,6 +214,11 @@ public class RpcProgramNfs3 extends RpcP
     }
   }
   
+  @Override
+  public void startDaemons() {
+     writeManager.startAsyncDataSerivce();
+  }
+  
   /******************************************************
    * RPC call handlers
    ******************************************************/
@@ -776,7 +783,8 @@ public class RpcProgramNfs3 extends RpcP
 
     int createMode = request.getMode();
     if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE)
-        && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
+        && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
+        && request.getObjAttr().getSize() != 0) {
       LOG.error("Setting file size is not supported when creating file: "
           + fileName + " dir fileId:" + dirHandle.getFileId());
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -829,6 +837,23 @@ public class RpcProgramNfs3 extends RpcP
       postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
           dfsClient, dirFileIdPath, iug);
+      
+      // Add open stream
+      OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr,
+          writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
+      fileHandle = new FileHandle(postOpObjAttr.getFileId());
+      if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
+        LOG.warn("Can't add more stream, close it."
+            + " Future write will become append");
+        fos.close();
+        fos = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Opened stream for file:" + fileName + ", fileId:"
+              + fileHandle.getFileId());
+        }
+      }
+      
     } catch (IOException e) {
       LOG.error("Exception", e);
       if (fos != null) {
@@ -857,16 +882,6 @@ public class RpcProgramNfs3 extends RpcP
       }
     }
     
-    // Add open stream
-    OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir
-        + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
-    fileHandle = new FileHandle(postOpObjAttr.getFileId());
-    writeManager.addOpenFileStream(fileHandle, openFileCtx);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("open stream for file:" + fileName + ", fileId:"
-          + fileHandle.getFileId());
-    }
-    
     return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr,
         dirWcc);
   }
@@ -1975,4 +1990,9 @@ public class RpcProgramNfs3 extends RpcP
     }
     return true;
   }
+  
+  @VisibleForTesting
+  WriteManager getWriteManager() {
+    return this.writeManager;
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Fri Nov  8 01:44:24 2013
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
@@ -29,11 +27,12 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.nfs.NfsFileType;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.IdUserGroup;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
@@ -45,6 +44,7 @@ import org.apache.hadoop.oncrpc.security
 import org.apache.hadoop.util.Daemon;
 import org.jboss.netty.channel.Channel;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Maps;
 
 /**
@@ -55,69 +55,70 @@ public class WriteManager {
 
   private final Configuration config;
   private final IdUserGroup iug;
-  private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
-      .newConcurrentMap();
-
+ 
   private AsyncDataService asyncDataService;
   private boolean asyncDataServiceStarted = false;
 
-  private final StreamMonitor streamMonitor;
-  
+  private final int maxStreams;
+
   /**
    * The time limit to wait for accumulate reordered sequential writes to the
    * same file before the write is considered done.
    */
   private long streamTimeout;
-  
-  public static final long DEFAULT_STREAM_TIMEOUT = 10 * 60 * 1000; //10 minutes
-  public static final long MINIMIUM_STREAM_TIMEOUT = 10 * 1000; //10 seconds
-  
-  void addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
-    openFileMap.put(h, ctx);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("After add the new stream " + h.getFileId()
-          + ", the stream number:" + openFileMap.size());
+
+  private final OpenFileCtxCache fileContextCache;
+
+  static public class MultipleCachedStreamException extends IOException {
+    private static final long serialVersionUID = 1L;
+
+    public MultipleCachedStreamException(String msg) {
+      super(msg);
     }
   }
 
+  boolean addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
+    return fileContextCache.put(h, ctx);
+  }
+  
   WriteManager(IdUserGroup iug, final Configuration config) {
     this.iug = iug;
     this.config = config;
-    
-    streamTimeout = config.getLong("dfs.nfs3.stream.timeout",
-        DEFAULT_STREAM_TIMEOUT);
+    streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
+        Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
     LOG.info("Stream timeout is " + streamTimeout + "ms.");
-    if (streamTimeout < MINIMIUM_STREAM_TIMEOUT) {
+    if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
       LOG.info("Reset stream timeout to minimum value "
-          + MINIMIUM_STREAM_TIMEOUT + "ms.");
-      streamTimeout = MINIMIUM_STREAM_TIMEOUT;
+          + Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
+      streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
     }
-    
-    this.streamMonitor = new StreamMonitor();
+    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
+        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
+    LOG.info("Maximum open streams is "+ maxStreams);
+    this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
   }
 
-  private void startAsyncDataSerivce() {
-    streamMonitor.start();
+  void startAsyncDataSerivce() {
+    if (asyncDataServiceStarted) {
+      return;
+    }
+    fileContextCache.start();
     this.asyncDataService = new AsyncDataService();
     asyncDataServiceStarted = true;
   }
 
-  private void shutdownAsyncDataService() {
-    asyncDataService.shutdown();
+  void shutdownAsyncDataService() {
+    if (!asyncDataServiceStarted) {
+      return;
+    }
     asyncDataServiceStarted = false;
-    streamMonitor.interrupt();
+    asyncDataService.shutdown();
+    fileContextCache.shutdown();
   }
 
   void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
       int xid, Nfs3FileAttributes preOpAttr) throws IOException {
-    // First write request starts the async data service
-    if (!asyncDataServiceStarted) {
-      startAsyncDataSerivce();
-    }
-
-    long offset = request.getOffset();
     int count = request.getCount();
-    WriteStableHow stableHow = request.getStableHow();
     byte[] data = request.getData().array();
     if (data.length < count) {
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -128,13 +129,12 @@ public class WriteManager {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("handleWrite fileId: " + handle.getFileId() + " offset: "
-          + offset + " length:" + count + " stableHow:" + stableHow.getValue());
+      LOG.debug("handleWrite " + request);
     }
 
     // Check if there is a stream to write
     FileHandle fileHandle = request.getHandle();
-    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
     if (openFileCtx == null) {
       LOG.info("No opened stream for fileId:" + fileHandle.getFileId());
 
@@ -149,6 +149,15 @@ public class WriteManager {
         fos = dfsClient.append(fileIdPath, bufferSize, null, null);
 
         latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
+      } catch (RemoteException e) {
+        IOException io = e.unwrapRemoteException();
+        if (io instanceof AlreadyBeingCreatedException) {
+          LOG.warn("Can't append file:" + fileIdPath
+              + ". Possibly the file is being closed. Drop the request:"
+              + request + ", wait for the client to retry...");
+          return;
+        }
+        throw e;
       } catch (IOException e) {
         LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
         if (fos != null) {
@@ -169,9 +178,26 @@ public class WriteManager {
           Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
       openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
           + fileHandle.getFileId(), dfsClient, iug);
-      addOpenFileStream(fileHandle, openFileCtx);
+
+      if (!addOpenFileStream(fileHandle, openFileCtx)) {
+        LOG.info("Can't add new stream. Close it. Tell client to retry.");
+        try {
+          fos.close();
+        } catch (IOException e) {
+          LOG.error("Can't close stream for fileId:" + handle.getFileId());
+        }
+        // Notify client to retry
+        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
+        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX,
+            fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
+        Nfs3Utils.writeChannel(channel,
+            response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
+            xid);
+        return;
+      }
+
       if (LOG.isDebugEnabled()) {
-        LOG.debug("opened stream for file:" + fileHandle.getFileId());
+        LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
       }
     }
 
@@ -184,7 +210,7 @@ public class WriteManager {
   void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
       long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
     int status;
-    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
 
     if (openFileCtx == null) {
       LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
@@ -237,7 +263,7 @@ public class WriteManager {
     String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
     Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
     if (attr != null) {
-      OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+      OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
       if (openFileCtx != null) {
         attr.setSize(openFileCtx.getNextOffset());
         attr.setUsed(openFileCtx.getNextOffset());
@@ -252,8 +278,8 @@ public class WriteManager {
     Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
 
     if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
-      OpenFileCtx openFileCtx = openFileMap
-          .get(new FileHandle(attr.getFileId()));
+      OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
+          .getFileId()));
 
       if (openFileCtx != null) {
         attr.setSize(openFileCtx.getNextOffset());
@@ -262,51 +288,9 @@ public class WriteManager {
     }
     return attr;
   }
-  
-  /**
-   * StreamMonitor wakes up periodically to find and closes idle streams.
-   */
-  class StreamMonitor extends Daemon {
-    private int rotation = 5 * 1000; // 5 seconds
-    private long lastWakeupTime = 0;
-
-    @Override
-    public void run() {
-      while (true) {
-        Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
-            .iterator();
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("openFileMap size:" + openFileMap.size());
-        }
-        while (it.hasNext()) {
-          Entry<FileHandle, OpenFileCtx> pairs = it.next();
-          OpenFileCtx ctx = pairs.getValue();
-          if (ctx.streamCleanup((pairs.getKey()).getFileId(), streamTimeout)) {
-            it.remove();
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("After remove stream " + pairs.getKey().getFileId()
-                  + ", the stream number:" + openFileMap.size());
-            }
-          }
-        }
-
-        // Check if it can sleep
-        try {
-          long workedTime = System.currentTimeMillis() - lastWakeupTime;
-          if (workedTime < rotation) {
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("StreamMonitor can still have a sleep:"
-                  + ((rotation - workedTime) / 1000));
-            }
-            Thread.sleep(rotation - workedTime);
-          }
-          lastWakeupTime = System.currentTimeMillis();
 
-        } catch (InterruptedException e) {
-          LOG.info("StreamMonitor got interrupted");
-          return;
-        }
-      }
-    }
+  @VisibleForTesting
+  OpenFileCtxCache getOpenFileCtxCache() {
+    return this.fileContextCache;
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java Fri Nov  8 01:44:24 2013
@@ -51,7 +51,7 @@ public class TestMountd {
     Nfs3 nfs3 = new Nfs3(exports, config);
     nfs3.start(false);
 
-    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
+    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
         .getRpcProgram();
     mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
     

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Fri Nov  8 01:44:24 2013
@@ -135,6 +135,7 @@ public class TestOutOfOrderWrite {
     @Override
     protected ChannelPipelineFactory setPipelineFactory() {
       this.pipelineFactory = new ChannelPipelineFactory() {
+        @Override
         public ChannelPipeline getPipeline() {
           return Channels.pipeline(
               RpcUtil.constructRpcFrameDecoder(),

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Fri Nov  8 01:44:24 2013
@@ -17,21 +17,41 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
+import java.net.InetAddress;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 
 import junit.framework.Assert;
 
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.IdUserGroup;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
+import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
+import org.apache.hadoop.nfs.nfs3.request.READ3Request;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
+import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
+import org.apache.hadoop.nfs.nfs3.response.READ3Response;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.SecurityHandler;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -105,7 +125,7 @@ public class TestWrites {
     Assert.assertTrue(limit - position == 1);
     Assert.assertTrue(appendedData.get(position) == (byte) 19);
   }
-  
+
   @Test
   // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
   // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
@@ -162,4 +182,116 @@ public class TestWrites {
     ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
   }
+
+  private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
+      throws InterruptedException {
+    int waitedTime = 0;
+    OpenFileCtx ctx = nfsd.getWriteManager()
+        .getOpenFileCtxCache().get(handle);
+    assertTrue(ctx != null);
+    do {
+      Thread.sleep(3000);
+      waitedTime += 3000;
+      if (ctx.getPendingWritesForTest().size() == 0) {
+        return;
+      }
+    } while (waitedTime < maxWaitTime);
+
+    fail("Write can't finish.");
+  }
+
+  @Test
+  public void testWriteStableHow() throws IOException, InterruptedException {
+    HdfsConfiguration config = new HdfsConfiguration();
+    DFSClient client = null;
+    MiniDFSCluster cluster = null;
+    RpcProgramNfs3 nfsd;
+    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
+    Mockito.when(securityHandler.getUser()).thenReturn(
+        System.getProperty("user.name"));
+
+    try {
+      cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+      cluster.waitActive();
+      client = new DFSClient(NameNode.getAddress(config), config);
+
+      // Start nfs
+      List<String> exports = new ArrayList<String>();
+      exports.add("/");
+      Nfs3 nfs3 = new Nfs3(exports, config);
+      nfs3.start(false);
+      nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
+
+      HdfsFileStatus status = client.getFileInfo("/");
+      FileHandle rootHandle = new FileHandle(status.getFileId());
+      // Create file1
+      CREATE3Request createReq = new CREATE3Request(rootHandle, "file1",
+          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
+      XDR createXdr = new XDR();
+      createReq.serialize(createXdr);
+      CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
+          securityHandler, InetAddress.getLocalHost());
+      FileHandle handle = createRsp.getObjHandle();
+
+      // Test DATA_SYNC
+      byte[] buffer = new byte[10];
+      for (int i = 0; i < 10; i++) {
+        buffer[i] = (byte) i;
+      }
+      WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
+          WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
+      XDR writeXdr = new XDR();
+      writeReq.serialize(writeXdr);
+      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
+          InetAddress.getLocalHost());
+
+      waitWrite(nfsd, handle, 60000);
+
+      // Readback
+      READ3Request readReq = new READ3Request(handle, 0, 10);
+      XDR readXdr = new XDR();
+      readReq.serialize(readXdr);
+      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
+          securityHandler, InetAddress.getLocalHost());
+
+      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
+
+      // Test FILE_SYNC
+
+      // Create file2
+      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
+          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
+      XDR createXdr2 = new XDR();
+      createReq2.serialize(createXdr2);
+      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
+          securityHandler, InetAddress.getLocalHost());
+      FileHandle handle2 = createRsp2.getObjHandle();
+
+      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
+          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
+      XDR writeXdr2 = new XDR();
+      writeReq2.serialize(writeXdr2);
+      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
+          InetAddress.getLocalHost());
+
+      waitWrite(nfsd, handle2, 60000);
+
+      // Readback
+      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
+      XDR readXdr2 = new XDR();
+      readReq2.serialize(readXdr2);
+      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
+          securityHandler, InetAddress.getLocalHost());
+
+      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
+      // FILE_SYNC should sync the file size
+      status = client.getFileInfo("/file2");
+      assertTrue(status.getLen() == 10);
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Nov  8 01:44:24 2013
@@ -188,6 +188,8 @@ Trunk (Unreleased)
     HDFS-5386. Add feature documentation for datanode caching.
     (Colin Patrick McCabe via cnauroth)
 
+    HDFS-5326. add modifyDirective to cacheAdmin.  (cmccabe)
+
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 
@@ -354,6 +356,9 @@ Trunk (Unreleased)
 
     HDFS-5419. Fixup test-patch.sh warnings on HDFS-4949 branch. (wang)
 
+    HDFS-5468. CacheAdmin help command does not recognize commands  (Stephen
+    Chu via Colin Patrick McCabe)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -498,6 +503,9 @@ Release 2.3.0 - UNRELEASED
     HDFS-5427. Not able to read deleted files from snapshot directly under 
     snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
 
+    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
+    is included in snapshot. (jing9)
+
 Release 2.2.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -588,6 +596,10 @@ Release 2.2.1 - UNRELEASED
     HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
     in getDataDirsFromURIs. (Mike Mellenthin via wang)
 
+    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
+
+    HDFS-5364. Add OpenFileCtx cache. (brandonli)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1539245-1539896

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Fri Nov  8 01:44:24 2013
@@ -117,7 +117,6 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
 import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -2291,7 +2290,7 @@ public class DFSClient implements java.i
     }
   }
 
-  public PathBasedCacheDescriptor addPathBasedCacheDirective(
+  public long addPathBasedCacheDirective(
       PathBasedCacheDirective directive) throws IOException {
     checkOpen();
     try {
@@ -2301,21 +2300,31 @@ public class DFSClient implements java.i
     }
   }
   
-  public void removePathBasedCacheDescriptor(long id)
+  public void modifyPathBasedCacheDirective(
+      PathBasedCacheDirective directive) throws IOException {
+    checkOpen();
+    try {
+      namenode.modifyPathBasedCacheDirective(directive);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException();
+    }
+  }
+
+  public void removePathBasedCacheDirective(long id)
       throws IOException {
     checkOpen();
     try {
-      namenode.removePathBasedCacheDescriptor(id);
+      namenode.removePathBasedCacheDirective(id);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
     }
   }
   
-  public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
-      String pool, String path) throws IOException {
+  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
+      PathBasedCacheDirective filter) throws IOException {
     checkOpen();
     try {
-      return namenode.listPathBasedCacheDescriptors(0, pool, path);
+      return namenode.listPathBasedCacheDirectives(0, filter);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException();
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Nov  8 01:44:24 2013
@@ -211,9 +211,9 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES =
       "dfs.namenode.list.cache.pools.num.responses";
   public static final int     DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100;
-  public static final String  DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES =
-      "dfs.namenode.list.cache.descriptors.num.responses";
-  public static final int     DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT = 100;
+  public static final String  DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES =
+      "dfs.namenode.list.cache.directives.num.responses";
+  public static final int     DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT = 100;
   public static final String  DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS =
       "dfs.namenode.path.based.cache.refresh.interval.ms";
   public static final long    DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT = 300000L;

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Nov  8 01:44:24 2013
@@ -68,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -82,6 +81,7 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.util.Progressable;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 
 /****************************************************************
@@ -1586,57 +1586,74 @@ public class DistributedFileSystem exten
   /**
    * Add a new PathBasedCacheDirective.
    * 
-   * @param directive A PathBasedCacheDirectives to add
-   * @return PathBasedCacheDescriptor associated with the added directive
+   * @param directive A directive to add.
+   * @return the ID of the directive that was created.
    * @throws IOException if the directive could not be added
    */
-  public PathBasedCacheDescriptor addPathBasedCacheDirective(
+  public long addPathBasedCacheDirective(
       PathBasedCacheDirective directive) throws IOException {
+    Preconditions.checkNotNull(directive.getPath());
     Path path = new Path(getPathName(fixRelativePart(directive.getPath()))).
         makeQualified(getUri(), getWorkingDirectory());
-    return dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
-        setPath(path).
-        setReplication(directive.getReplication()).
-        setPool(directive.getPool()).
-        build());
+    return dfs.addPathBasedCacheDirective(
+        new PathBasedCacheDirective.Builder(directive).
+            setPath(path).
+            build());
   }
   
+  public void modifyPathBasedCacheDirective(
+      PathBasedCacheDirective directive) throws IOException {
+    if (directive.getPath() != null) {
+      directive = new PathBasedCacheDirective.Builder(directive).
+          setPath(new Path(getPathName(fixRelativePart(directive.getPath()))).
+              makeQualified(getUri(), getWorkingDirectory())).build();
+    }
+    dfs.modifyPathBasedCacheDirective(directive);
+  }
+
   /**
-   * Remove a PathBasedCacheDescriptor.
+   * Remove a PathBasedCacheDirective.
    * 
-   * @param descriptor PathBasedCacheDescriptor to remove
-   * @throws IOException if the descriptor could not be removed
+   * @param id identifier of the PathBasedCacheDirective to remove
+   * @throws IOException if the directive could not be removed
    */
-  public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor)
+  public void removePathBasedCacheDirective(long id)
       throws IOException {
-    dfs.removePathBasedCacheDescriptor(descriptor.getEntryId());
+    dfs.removePathBasedCacheDirective(id);
   }
   
   /**
    * List the set of cached paths of a cache pool. Incrementally fetches results
    * from the server.
    * 
-   * @param pool The cache pool to list, or null to list all pools.
-   * @param path The path name to list, or null to list all paths.
-   * @return A RemoteIterator which returns PathBasedCacheDescriptor objects.
-   */
-  public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
-      String pool, final Path path) throws IOException {
-    String pathName = path != null ? getPathName(fixRelativePart(path)) : null;
-    final RemoteIterator<PathBasedCacheDescriptor> iter =
-        dfs.listPathBasedCacheDescriptors(pool, pathName);
-    return new RemoteIterator<PathBasedCacheDescriptor>() {
+   * @param filter Filter parameters to use when listing the directives, null to
+   *               list all directives visible to us.
+   * @return A RemoteIterator which returns PathBasedCacheDirective objects.
+   */
+  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
+      PathBasedCacheDirective filter) throws IOException {
+    if (filter == null) {
+      filter = new PathBasedCacheDirective.Builder().build();
+    }
+    if (filter.getPath() != null) {
+      filter = new PathBasedCacheDirective.Builder(filter).
+          setPath(filter.getPath().
+              makeQualified(getUri(), filter.getPath())).
+                build();
+    }
+    final RemoteIterator<PathBasedCacheDirective> iter =
+        dfs.listPathBasedCacheDirectives(filter);
+    return new RemoteIterator<PathBasedCacheDirective>() {
       @Override
       public boolean hasNext() throws IOException {
         return iter.hasNext();
       }
 
       @Override
-      public PathBasedCacheDescriptor next() throws IOException {
-        PathBasedCacheDescriptor desc = iter.next();
-        Path qualPath = desc.getPath().makeQualified(getUri(), path);
-        return new PathBasedCacheDescriptor(desc.getEntryId(), qualPath,
-            desc.getReplication(), desc.getPool());
+      public PathBasedCacheDirective next() throws IOException {
+        PathBasedCacheDirective desc = iter.next();
+        Path p = desc.getPath().makeQualified(getUri(), desc.getPath());
+        return new PathBasedCacheDirective.Builder(desc).setPath(p).build();
       }
     };
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Nov  8 01:44:24 2013
@@ -1100,35 +1100,46 @@ public interface ClientProtocol {
    * Add a PathBasedCache entry to the CacheManager.
    * 
    * @param directive A PathBasedCacheDirective to be added
-   * @return A PathBasedCacheDescriptor associated with the added directive
+   * @return A PathBasedCacheDirective associated with the added directive
    * @throws IOException if the directive could not be added
    */
   @AtMostOnce
-  public PathBasedCacheDescriptor addPathBasedCacheDirective(
+  public long addPathBasedCacheDirective(
       PathBasedCacheDirective directive) throws IOException;
 
   /**
-   * Remove a PathBasedCacheDescriptor from the CacheManager.
+   * Modify a PathBasedCache entry in the CacheManager.
    * 
-   * @param id of a PathBasedCacheDescriptor
-   * @throws IOException if the cache descriptor could not be removed
+   * @return directive The directive to modify.  Must contain 
+   *                   a directive ID.
+   * @throws IOException if the directive could not be modified
    */
   @AtMostOnce
-  public void removePathBasedCacheDescriptor(Long id) throws IOException;
+  public void modifyPathBasedCacheDirective(
+      PathBasedCacheDirective directive) throws IOException;
+
+  /**
+   * Remove a PathBasedCacheDirective from the CacheManager.
+   * 
+   * @param id of a PathBasedCacheDirective
+   * @throws IOException if the cache directive could not be removed
+   */
+  @AtMostOnce
+  public void removePathBasedCacheDirective(long id) throws IOException;
 
   /**
    * List the set of cached paths of a cache pool. Incrementally fetches results
    * from the server.
    * 
    * @param prevId The last listed entry ID, or -1 if this is the first call to
-   *          listPathBasedCacheDescriptors.
-   * @param pool The cache pool to list, or null to list all pools.
-   * @param path The path name to list, or null to list all paths.
-   * @return A RemoteIterator which returns PathBasedCacheDescriptor objects.
+   *               listPathBasedCacheDirectives.
+   * @param filter Parameters to use to filter the list results, 
+   *               or null to display all directives visible to us.
+   * @return A RemoteIterator which returns PathBasedCacheDirective objects.
    */
   @Idempotent
-  public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
-      long prevId, String pool, String path) throws IOException;
+  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
+      long prevId, PathBasedCacheDirective filter) throws IOException;
 
   /**
    * Add a new cache pool.

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java Fri Nov  8 01:44:24 2013
@@ -17,32 +17,27 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.IOException;
-
-import com.google.common.base.Preconditions;
+import java.net.URI;
 
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError;
 
 /**
- * A directive to add a path to a cache pool.
+ * Describes a path-based cache directive.
  */
 @InterfaceStability.Evolving
 @InterfaceAudience.Public
 public class PathBasedCacheDirective {
-
   /**
    * A builder for creating new PathBasedCacheDirective instances.
    */
   public static class Builder {
+    private Long id;
     private Path path;
-    private short replication = (short)1;
+    private Short replication;
     private String pool;
 
     /**
@@ -51,7 +46,37 @@ public class PathBasedCacheDirective {
      * @return New PathBasedCacheDirective.
      */
     public PathBasedCacheDirective build() {
-      return new PathBasedCacheDirective(path, replication, pool);
+      return new PathBasedCacheDirective(id, path, replication, pool);
+    }
+
+    /**
+     * Creates an empty builder.
+     */
+    public Builder() {
+    }
+
+    /**
+     * Creates a builder with all elements set to the same values as the
+     * given PathBasedCacheDirective.
+     */
+    public Builder(PathBasedCacheDirective directive) {
+      this.id = directive.getId();
+      // deep-copy URI
+      URI uri = directive.getPath().toUri();
+      this.path = new Path(uri.getScheme(), uri.getAuthority(), uri.getPath());
+      this.replication = directive.getReplication();
+      this.pool = directive.getPool();
+    }
+
+    /**
+     * Sets the id used in this request.
+     * 
+     * @param id The id used in this request.
+     * @return This builder, for call chaining.
+     */
+    public Builder setId(Long id) {
+      this.id = id;
+      return this;
     }
 
     /**
@@ -71,7 +96,7 @@ public class PathBasedCacheDirective {
      * @param replication The replication used in this request.
      * @return This builder, for call chaining.
      */
-    public Builder setReplication(short replication) {
+    public Builder setReplication(Short replication) {
       this.replication = replication;
       return this;
     }
@@ -88,10 +113,25 @@ public class PathBasedCacheDirective {
     }
   }
 
+  private final Long id;
   private final Path path;
-  private final short replication;
+  private final Short replication;
   private final String pool;
 
+  PathBasedCacheDirective(Long id, Path path, Short replication, String pool) {
+    this.id = id;
+    this.path = path;
+    this.replication = replication;
+    this.pool = pool;
+  }
+
+  /**
+   * @return The ID of this directive.
+   */
+  public Long getId() {
+    return id;
+  }
+
   /**
    * @return The path used in this request.
    */
@@ -102,7 +142,7 @@ public class PathBasedCacheDirective {
   /**
    * @return The number of times the block should be cached.
    */
-  public short getReplication() {
+  public Short getReplication() {
     return replication;
   }
 
@@ -113,25 +153,6 @@ public class PathBasedCacheDirective {
     return pool;
   }
 
-  /**
-   * Check if this PathBasedCacheDirective is valid.
-   * 
-   * @throws IOException
-   *     If this PathBasedCacheDirective is not valid.
-   */
-  public void validate() throws IOException {
-    if (!DFSUtil.isValidName(path.toUri().getPath())) {
-      throw new InvalidPathNameError(this);
-    }
-    if (replication <= 0) {
-      throw new IOException("Tried to request a cache replication " +
-          "factor of " + replication + ", but that is less than 1.");
-    }
-    if (pool.isEmpty()) {
-      throw new InvalidPoolNameError(this);
-    }
-  }
-
   @Override
   public boolean equals(Object o) {
     if (o == null) {
@@ -141,7 +162,8 @@ public class PathBasedCacheDirective {
       return false;
     }
     PathBasedCacheDirective other = (PathBasedCacheDirective)o;
-    return new EqualsBuilder().append(getPath(), other.getPath()).
+    return new EqualsBuilder().append(getId(), other.getId()).
+        append(getPath(), other.getPath()).
         append(getReplication(), other.getReplication()).
         append(getPool(), other.getPool()).
         isEquals();
@@ -149,34 +171,35 @@ public class PathBasedCacheDirective {
 
   @Override
   public int hashCode() {
-    return new HashCodeBuilder().append(getPath()).
+    return new HashCodeBuilder().append(id).
+        append(path).
         append(replication).
-        append(getPool()).
+        append(pool).
         hashCode();
   }
 
   @Override
   public String toString() {
     StringBuilder builder = new StringBuilder();
-    builder.append("{ path:").append(path).
-      append(", replication:").append(replication).
-      append(", pool:").append(pool).
-      append(" }");
+    builder.append("{");
+    String prefix = "";
+    if (id != null) {
+      builder.append(prefix).append("id: ").append(id);
+      prefix = ",";
+    }
+    if (path != null) {
+      builder.append(prefix).append("path: ").append(path);
+      prefix = ",";
+    }
+    if (replication != null) {
+      builder.append(prefix).append("replication: ").append(replication);
+      prefix = ",";
+    }
+    if (pool != null) {
+      builder.append(prefix).append("pool: ").append(pool);
+      prefix = ",";
+    }
+    builder.append("}");
     return builder.toString();
   }
-
-  /**
-   * Protected constructor.  Callers use Builder to create new instances.
-   * 
-   * @param path The path used in this request.
-   * @param replication The replication used in this request.
-   * @param pool The pool used in this request.
-   */
-  protected PathBasedCacheDirective(Path path, short replication, String pool) {
-    Preconditions.checkNotNull(path);
-    Preconditions.checkNotNull(pool);
-    this.path = path;
-    this.replication = replication;
-    this.pool = pool;
-  }
 };

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java Fri Nov  8 01:44:24 2013
@@ -64,6 +64,15 @@ public final class PathBasedCacheEntry {
     return replication;
   }
 
+  public PathBasedCacheDirective toDirective() {
+    return new PathBasedCacheDirective.Builder().
+        setId(entryId).
+        setPath(new Path(path)).
+        setReplication(replication).
+        setPool(pool.getPoolName()).
+        build();
+  }
+  
   @Override
   public String toString() {
     StringBuilder builder = new StringBuilder();
@@ -75,11 +84,6 @@ public final class PathBasedCacheEntry {
     return builder.toString();
   }
 
-  public PathBasedCacheDescriptor getDescriptor() {
-    return new PathBasedCacheDescriptor(entryId, new Path(path), replication,
-        pool.getPoolName());
-  }
-  
   @Override
   public boolean equals(Object o) {
     if (o == null) { return false; }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Fri Nov  8 01:44:24 2013
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Options.Rena
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -36,11 +35,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
 import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
@@ -111,24 +106,25 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesElementProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@@ -177,7 +173,6 @@ import org.apache.hadoop.security.proto.
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.security.token.Token;
-
 import org.apache.commons.lang.StringUtils;
 
 import com.google.common.primitives.Shorts;
@@ -1044,69 +1039,64 @@ public class ClientNamenodeProtocolServe
       RpcController controller, AddPathBasedCacheDirectiveRequestProto request)
       throws ServiceException {
     try {
-      PathBasedCacheDirectiveProto proto = request.getDirective();
-      if (StringUtils.isEmpty(proto.getPath())) {
-        throw new EmptyPathError();
-      }
-      PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder().
-          setPath(new Path(proto.getPath())).
-          setReplication(Shorts.checkedCast(proto.getReplication())).
-          setPool(proto.getPool()).
-          build();
-      PathBasedCacheDescriptor descriptor =
-          server.addPathBasedCacheDirective(directive);
-      AddPathBasedCacheDirectiveResponseProto.Builder builder =
-         AddPathBasedCacheDirectiveResponseProto.newBuilder();
-      builder.setDescriptorId(descriptor.getEntryId());
-      return builder.build();
+      return AddPathBasedCacheDirectiveResponseProto.newBuilder().
+              setId(server.addPathBasedCacheDirective(
+                  PBHelper.convert(request.getInfo()))).build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
   }
 
   @Override
-  public RemovePathBasedCacheDescriptorResponseProto removePathBasedCacheDescriptor(
-      RpcController controller,
-      RemovePathBasedCacheDescriptorRequestProto request)
+  public ModifyPathBasedCacheDirectiveResponseProto modifyPathBasedCacheDirective(
+      RpcController controller, ModifyPathBasedCacheDirectiveRequestProto request)
       throws ServiceException {
     try {
-      server.removePathBasedCacheDescriptor(request.getDescriptorId());
-      RemovePathBasedCacheDescriptorResponseProto.Builder builder =
-         RemovePathBasedCacheDescriptorResponseProto.newBuilder();
-      return builder.build();
+      server.modifyPathBasedCacheDirective(
+          PBHelper.convert(request.getInfo()));
+      return ModifyPathBasedCacheDirectiveResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public RemovePathBasedCacheDirectiveResponseProto
+      removePathBasedCacheDirective(RpcController controller,
+          RemovePathBasedCacheDirectiveRequestProto request)
+              throws ServiceException {
+    try {
+      server.removePathBasedCacheDirective(request.getId());
+      return RemovePathBasedCacheDirectiveResponseProto.
+          newBuilder().build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
   }
 
   @Override
-  public ListPathBasedCacheDescriptorsResponseProto listPathBasedCacheDescriptors(
-      RpcController controller, ListPathBasedCacheDescriptorsRequestProto request)
+  public ListPathBasedCacheDirectivesResponseProto listPathBasedCacheDirectives(
+      RpcController controller, ListPathBasedCacheDirectivesRequestProto request)
           throws ServiceException {
     try {
-      RemoteIterator<PathBasedCacheDescriptor> iter =
-         server.listPathBasedCacheDescriptors(request.getPrevId(),
-             request.hasPool() ? request.getPool() : null,
-             request.hasPath() ? request.getPath() : null);
-      ListPathBasedCacheDescriptorsResponseProto.Builder builder =
-          ListPathBasedCacheDescriptorsResponseProto.newBuilder();
+      PathBasedCacheDirective filter =
+          PBHelper.convert(request.getFilter());
+      RemoteIterator<PathBasedCacheDirective> iter =
+         server.listPathBasedCacheDirectives(request.getPrevId(), filter);
+      ListPathBasedCacheDirectivesResponseProto.Builder builder =
+          ListPathBasedCacheDirectivesResponseProto.newBuilder();
       long prevId = 0;
       while (iter.hasNext()) {
-        PathBasedCacheDescriptor directive = iter.next();
+        PathBasedCacheDirective directive = iter.next();
         builder.addElements(
-            ListPathBasedCacheDescriptorsElementProto.newBuilder().
-              setId(directive.getEntryId()).
-              setPath(directive.getPath().toUri().getPath()).
-              setReplication(directive.getReplication()).
-              setPool(directive.getPool()));
-        prevId = directive.getEntryId();
+            ListPathBasedCacheDirectivesElementProto.newBuilder().
+                setInfo(PBHelper.convert(directive)));
+        prevId = directive.getId();
       }
       if (prevId == 0) {
         builder.setHasMore(false);
       } else {
-        iter = server.listPathBasedCacheDescriptors(prevId, 
-            request.hasPool() ? request.getPool() : null,
-            request.hasPath() ? request.getPath() : null);
+        iter = server.listPathBasedCacheDirectives(prevId, filter);
         builder.setHasMore(iter.hasNext());
       }
       return builder.build();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Fri Nov  8 01:44:24 2013
@@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
 import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -101,17 +100,16 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@@ -1008,55 +1006,53 @@ public class ClientNamenodeProtocolTrans
   }
 
   @Override
-  public PathBasedCacheDescriptor addPathBasedCacheDirective(
+  public long addPathBasedCacheDirective(
       PathBasedCacheDirective directive) throws IOException {
     try {
-      AddPathBasedCacheDirectiveRequestProto.Builder builder =
-          AddPathBasedCacheDirectiveRequestProto.newBuilder();
-      builder.setDirective(PathBasedCacheDirectiveProto.newBuilder()
-          .setPath(directive.getPath().toUri().getPath())
-          .setReplication(directive.getReplication())
-          .setPool(directive.getPool())
-          .build());
-      AddPathBasedCacheDirectiveResponseProto result = 
-          rpcProxy.addPathBasedCacheDirective(null, builder.build());
-      return new PathBasedCacheDescriptor(result.getDescriptorId(),
-          directive.getPath(), directive.getReplication(),
-          directive.getPool());
+      return rpcProxy.addPathBasedCacheDirective(null, 
+              AddPathBasedCacheDirectiveRequestProto.newBuilder().
+                  setInfo(PBHelper.convert(directive)).build()).getId();
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
   }
 
   @Override
-  public void removePathBasedCacheDescriptor(Long id)
+  public void modifyPathBasedCacheDirective(
+      PathBasedCacheDirective directive) throws IOException {
+    try {
+      rpcProxy.modifyPathBasedCacheDirective(null,
+          ModifyPathBasedCacheDirectiveRequestProto.newBuilder().
+              setInfo(PBHelper.convert(directive)).build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public void removePathBasedCacheDirective(long id)
       throws IOException {
     try {
-      RemovePathBasedCacheDescriptorRequestProto.Builder builder =
-          RemovePathBasedCacheDescriptorRequestProto.newBuilder();
-      builder.setDescriptorId(id);
-      rpcProxy.removePathBasedCacheDescriptor(null, builder.build());
+      rpcProxy.removePathBasedCacheDirective(null,
+          RemovePathBasedCacheDirectiveRequestProto.newBuilder().
+              setId(id).build());
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
   }
 
   private static class BatchedPathBasedCacheEntries
-      implements BatchedEntries<PathBasedCacheDescriptor> {
-    private ListPathBasedCacheDescriptorsResponseProto response;
+      implements BatchedEntries<PathBasedCacheDirective> {
+    private ListPathBasedCacheDirectivesResponseProto response;
 
-    BatchedPathBasedCacheEntries(ListPathBasedCacheDescriptorsResponseProto response) {
+    BatchedPathBasedCacheEntries(
+        ListPathBasedCacheDirectivesResponseProto response) {
       this.response = response;
     }
 
     @Override
-    public PathBasedCacheDescriptor get(int i) {
-      ListPathBasedCacheDescriptorsElementProto elementProto =
-        response.getElements(i);
-      return new PathBasedCacheDescriptor(elementProto.getId(),
-          new Path(elementProto.getPath()),
-          Shorts.checkedCast(elementProto.getReplication()),
-          elementProto.getPool());
+    public PathBasedCacheDirective get(int i) {
+      return PBHelper.convert(response.getElements(i).getInfo());
     }
 
     @Override
@@ -1071,31 +1067,25 @@ public class ClientNamenodeProtocolTrans
   }
 
   private class PathBasedCacheEntriesIterator
-      extends BatchedRemoteIterator<Long, PathBasedCacheDescriptor> {
-    private final String pool;
-    private final String path;
+      extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
+    private final PathBasedCacheDirective filter;
 
-    public PathBasedCacheEntriesIterator(long prevKey, String pool, String path) {
+    public PathBasedCacheEntriesIterator(long prevKey,
+        PathBasedCacheDirective filter) {
       super(prevKey);
-      this.pool = pool;
-      this.path = path;
+      this.filter = filter;
     }
 
     @Override
-    public BatchedEntries<PathBasedCacheDescriptor> makeRequest(
+    public BatchedEntries<PathBasedCacheDirective> makeRequest(
         Long nextKey) throws IOException {
-      ListPathBasedCacheDescriptorsResponseProto response;
+      ListPathBasedCacheDirectivesResponseProto response;
       try {
-        ListPathBasedCacheDescriptorsRequestProto.Builder builder =
-            ListPathBasedCacheDescriptorsRequestProto.newBuilder().setPrevId(nextKey);
-        if (pool != null) {
-          builder.setPool(pool);
-        }
-        if (path != null) {
-          builder.setPath(path);
-        }
-        ListPathBasedCacheDescriptorsRequestProto req = builder.build();
-        response = rpcProxy.listPathBasedCacheDescriptors(null, req);
+        response = rpcProxy.listPathBasedCacheDirectives(null,
+            ListPathBasedCacheDirectivesRequestProto.newBuilder().
+                setPrevId(nextKey).
+                setFilter(PBHelper.convert(filter)).
+                build());
       } catch (ServiceException e) {
         throw ProtobufHelper.getRemoteException(e);
       }
@@ -1103,15 +1093,19 @@ public class ClientNamenodeProtocolTrans
     }
 
     @Override
-    public Long elementToPrevKey(PathBasedCacheDescriptor element) {
-      return element.getEntryId();
+    public Long elementToPrevKey(PathBasedCacheDirective element) {
+      return element.getId();
     }
   }
 
   @Override
-  public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(long prevId,
-      String pool, String path) throws IOException {
-    return new PathBasedCacheEntriesIterator(prevId, pool, path);
+  public RemoteIterator<PathBasedCacheDirective>
+      listPathBasedCacheDirectives(long prevId,
+          PathBasedCacheDirective filter) throws IOException {
+    if (filter == null) {
+      filter = new PathBasedCacheDirective.Builder().build();
+    }
+    return new PathBasedCacheEntriesIterator(prevId, filter);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Fri Nov  8 01:44:24 2013
@@ -29,6 +29,7 @@ import com.google.common.base.Preconditi
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -57,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
@@ -156,6 +159,7 @@ import org.apache.hadoop.util.DataChecks
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import com.google.common.primitives.Shorts;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 
@@ -1649,6 +1653,45 @@ public class PBHelper {
     return DataChecksum.Type.valueOf(type.getNumber());
   }
 
+  public static PathBasedCacheDirectiveInfoProto convert
+      (PathBasedCacheDirective directive) {
+    PathBasedCacheDirectiveInfoProto.Builder builder = 
+        PathBasedCacheDirectiveInfoProto.newBuilder();
+    if (directive.getId() != null) {
+      builder.setId(directive.getId());
+    }
+    if (directive.getPath() != null) {
+      builder.setPath(directive.getPath().toUri().getPath());
+    }
+    if (directive.getReplication() != null) {
+      builder.setReplication(directive.getReplication());
+    }
+    if (directive.getPool() != null) {
+      builder.setPool(directive.getPool());
+    }
+    return builder.build();
+  }
+
+  public static PathBasedCacheDirective convert
+      (PathBasedCacheDirectiveInfoProto proto) {
+    PathBasedCacheDirective.Builder builder =
+        new PathBasedCacheDirective.Builder();
+    if (proto.hasId()) {
+      builder.setId(proto.getId());
+    }
+    if (proto.hasPath()) {
+      builder.setPath(new Path(proto.getPath()));
+    }
+    if (proto.hasReplication()) {
+      builder.setReplication(Shorts.checkedCast(
+          proto.getReplication()));
+    }
+    if (proto.hasPool()) {
+      builder.setPool(proto.getPool());
+    }
+    return builder.build();
+  }
+  
   public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
     return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
   }



Mime
View raw message