hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1078146 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/...
Date Fri, 04 Mar 2011 21:40:30 GMT
Author: suresh
Date: Fri Mar  4 21:40:29 2011
New Revision: 1078146

URL: http://svn.apache.org/viewvc?rev=1078146&view=rev
Log:
Merging -r1035145:r1035410 from trunk to federation branch

Added:
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
      - copied unchanged from r1035386, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
Modified:
    hadoop/hdfs/branches/HDFS-1052/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/build.xml   (contents, props changed)
    hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1052/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1052/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1036738,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1036738,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Fri Mar  4 21:40:29 2011
@@ -292,6 +292,12 @@ Release 0.22.0 - Unreleased
     HDFS-811. Add metrics, failure reporting and additional tests for HDFS-457.
     (eli)
 
+    HDFS-895. Allow hflush/sync to occur in parallel with new writes
+    to the file. (Todd Lipcon via hairong)
+
+    HDFS-1500. TestOfflineImageViewer failing on trunk. (Todd Lipcon
+    via hairong)
+
   IMPROVEMENTS
 
     HDFS-1304. Add a new unit test for HftpFileSystem.open(..).  (szetszwo)
@@ -425,6 +431,8 @@ Release 0.22.0 - Unreleased
 
     HDFS-556. Provide info on failed volumes in the web ui. (eli)
 
+    HDFS-697. Enable asserts for tests by default. (eli)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
@@ -576,9 +584,14 @@ Release 0.22.0 - Unreleased
     other's service policies.  (Aaron T. Myers via tomwhite)
 
     HDFS-1440. Fix TestComputeInvalidateWork failure. (suresh)
+ 
+    HDFS-1498. FSDirectory#unprotectedConcat calls setModificationTime 
+    on a file. (eli)
 
     HDFS-1625. Ignore disk space values in TestDataNodeMXBean.  (szetszwo)
 
+    HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
+
 Release 0.21.1 - Unreleased
 
     HDFS-1411. Correct backup node startup command in hdfs user guide.

Modified: hadoop/hdfs/branches/HDFS-1052/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/build.xml?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1052/build.xml Fri Mar  4 21:40:29 2011
@@ -104,6 +104,7 @@
 
   <property name="test.hdfs.rpc.engine" value=""/>
   <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/>
+  <property name="test.junit.jvmargs" value="-ea" />
 
   <property name="web.src.dir" value="${basedir}/src/web"/>
   <property name="src.webapps" value="${basedir}/src/webapps"/>
@@ -592,6 +593,7 @@
         maxmemory="${test.junit.maxmemory}"
         dir="${basedir}" timeout="${test.timeout}"
         errorProperty="tests.failed" failureProperty="tests.failed">
+        <jvmarg value="${test.junit.jvmargs}" />
         <sysproperty key="test.build.data" value="@{test.dir}/data"/>
         <sysproperty key="test.cache.data" value="${test.cache.data}"/>     
         <sysproperty key="test.debug.data" value="${test.debug.data}"/>

Propchange: hadoop/hdfs/branches/HDFS-1052/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/build.xml:779102
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
-/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri
Mar  4 21:40:29 2011
@@ -114,12 +114,14 @@ class DFSOutputStream extends FSOutputSu
   private Packet currentPacket = null;
   private DataStreamer streamer;
   private long currentSeqno = 0;
+  private long lastQueuedSeqno = -1;
+  private long lastAckedSeqno = -1;
   private long bytesCurBlock = 0; // bytes writen in current block
   private int packetSize = 0; // write packet size, including the header.
   private int chunksPerPacket = 0;
   private volatile IOException lastException = null;
   private long artificialSlowdown = 0;
-  private long lastFlushOffset = -1; // offset when flush was invoked
+  private long lastFlushOffset = 0; // offset when flush was invoked
   //persist blocks on namenode
   private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
   private volatile boolean appendChunk = false;   // appending to existing partial block
@@ -433,6 +435,7 @@ class DFSOutputStream extends FSOutputSu
               one = dataQueue.getFirst(); // regular data packet
             }
           }
+          assert one != null;
 
           // get new block from namenode.
           if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
@@ -669,6 +672,7 @@ class DFSOutputStream extends FSOutputSu
             block.setNumBytes(one.getLastByteOffsetBlock());
 
             synchronized (dataQueue) {
+              lastAckedSeqno = seqno;
               ackQueue.removeFirst();
               dataQueue.notifyAll();
             }
@@ -719,8 +723,21 @@ class DFSOutputStream extends FSOutputSu
       
       if (!streamerClosed && dfsClient.clientRunning) {
         if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
+
+          // If we had an error while closing the pipeline, we go through a fast-path
+          // where the BlockReceiver does not run. Instead, the DataNode just finalizes
+          // the block immediately during the 'connect ack' process. So, we want to pull
+          // the end-of-block packet from the dataQueue, since we don't actually have
+          // a true pipeline to send it over.
+          //
+          // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that
+          // a client waiting on close() will be aware that the flush finished.
           synchronized (dataQueue) {
-            dataQueue.remove();  // remove the end of block packet
+            assert dataQueue.size() == 1;
+            Packet endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
+            assert endOfBlockPacket.lastPacketInBlock;
+            assert lastAckedSeqno == endOfBlockPacket.seqno - 1;
+            lastAckedSeqno = endOfBlockPacket.seqno;
             dataQueue.notifyAll();
           }
           endBlock();
@@ -1130,14 +1147,20 @@ class DFSOutputStream extends FSOutputSu
     }
   }
 
-  private void queuePacket(Packet packet) {
+  private void queueCurrentPacket() {
     synchronized (dataQueue) {
-      dataQueue.addLast(packet);
+      if (currentPacket == null) return;
+      dataQueue.addLast(currentPacket);
+      lastQueuedSeqno = currentPacket.seqno;
+      if (DFSClient.LOG.isDebugEnabled()) {
+        DFSClient.LOG.debug("Queued packet " + currentPacket.seqno);
+      }
+      currentPacket = null;
       dataQueue.notifyAll();
     }
   }
 
-  private void waitAndQueuePacket(Packet packet) throws IOException {
+  private void waitAndQueueCurrentPacket() throws IOException {
     synchronized (dataQueue) {
       // If queue is full, then wait till we have enough space
       while (!closed && dataQueue.size() + ackQueue.size()  > MAX_PACKETS) {
@@ -1147,7 +1170,7 @@ class DFSOutputStream extends FSOutputSu
         }
       }
       isClosed();
-      queuePacket(packet);
+      queueCurrentPacket();
     }
   }
 
@@ -1201,8 +1224,7 @@ class DFSOutputStream extends FSOutputSu
             ", blockSize=" + blockSize +
             ", appendChunk=" + appendChunk);
       }
-      waitAndQueuePacket(currentPacket);
-      currentPacket = null;
+      waitAndQueueCurrentPacket();
 
       // If the reopened file did not end at chunk boundary and the above
       // write filled up its partial chunk. Tell the summer to generate full 
@@ -1224,10 +1246,9 @@ class DFSOutputStream extends FSOutputSu
         currentPacket = new Packet(PacketHeader.PKT_HEADER_LEN, 0, 
             bytesCurBlock);
         currentPacket.lastPacketInBlock = true;
-        waitAndQueuePacket(currentPacket);
-        currentPacket = null;
+        waitAndQueueCurrentPacket();
         bytesCurBlock = 0;
-        lastFlushOffset = -1;
+        lastFlushOffset = 0;
       }
     }
   }
@@ -1244,60 +1265,88 @@ class DFSOutputStream extends FSOutputSu
    * but not neccessary on the DN's OS buffers. 
    *
    * It is a synchronous operation. When it returns,
-   * it gurantees that flushed data become visible to new readers. 
+   * it guarantees that flushed data become visible to new readers. 
    * It is not guaranteed that data has been flushed to 
    * persistent store on the datanode. 
    * Block allocations are persisted on namenode.
    */
   @Override
-  public synchronized void hflush() throws IOException {
+  public void hflush() throws IOException {
     dfsClient.checkOpen();
     isClosed();
     try {
-      /* Record current blockOffset. This might be changed inside
-       * flushBuffer() where a partial checksum chunk might be flushed.
-       * After the flush, reset the bytesCurBlock back to its previous value,
-       * any partial checksum chunk will be sent now and in next packet.
-       */
-      long saveOffset = bytesCurBlock;
-
-      // flush checksum buffer, but keep checksum buffer intact
-      flushBuffer(true);
-
-      if(DFSClient.LOG.isDebugEnabled()) {
-        DFSClient.LOG.debug("DFSClient flush() : saveOffset " + saveOffset +  
+      long toWaitFor;
+      synchronized (this) {
+        /* Record current blockOffset. This might be changed inside
+         * flushBuffer() where a partial checksum chunk might be flushed.
+         * After the flush, reset the bytesCurBlock back to its previous value,
+         * any partial checksum chunk will be sent now and in next packet.
+         */
+        long saveOffset = bytesCurBlock;
+        Packet oldCurrentPacket = currentPacket;
+        // flush checksum buffer, but keep checksum buffer intact
+        flushBuffer(true);
+        // bytesCurBlock potentially incremented if there was buffered data
+
+        if (DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug(
+            "DFSClient flush() : saveOffset " + saveOffset +  
             " bytesCurBlock " + bytesCurBlock +
             " lastFlushOffset " + lastFlushOffset);
-      }
-      
-      // Flush only if we haven't already flushed till this offset.
-      if (lastFlushOffset != bytesCurBlock) {
+        }
+        // Flush only if we haven't already flushed till this offset.
+        if (lastFlushOffset != bytesCurBlock) {
+          assert bytesCurBlock > lastFlushOffset;
+          // record the valid offset of this flush
+          lastFlushOffset = bytesCurBlock;
+          waitAndQueueCurrentPacket();
+        } else {
+          // We already flushed up to this offset.
+          // This means that we haven't written anything since the last flush
+          // (or the beginning of the file). Hence, we should not have any
+          // packet queued prior to this call, since the last flush set
+          // currentPacket = null.
+          assert oldCurrentPacket == null :
+            "Empty flush should not occur with a currentPacket";
 
-        // record the valid offset of this flush
-        lastFlushOffset = bytesCurBlock;
+          // just discard the current packet since it is already been sent.
+          currentPacket = null;
+        }
+        // Restore state of stream. Record the last flush offset 
+        // of the last full chunk that was flushed.
+        //
+        bytesCurBlock = saveOffset;
+        toWaitFor = lastQueuedSeqno;
+      } // end synchronized
 
-        // wait for all packets to be sent and acknowledged
-        flushInternal();
-      } else {
-        // just discard the current packet since it is already been sent.
-        currentPacket = null;
-      }
-      
-      // Restore state of stream. Record the last flush offset 
-      // of the last full chunk that was flushed.
-      //
-      bytesCurBlock = saveOffset;
+      waitForAckedSeqno(toWaitFor);
 
       // If any new blocks were allocated since the last flush, 
       // then persist block locations on namenode. 
       //
       if (persistBlocks.getAndSet(false)) {
-        dfsClient.namenode.fsync(src, dfsClient.clientName);
+        try {
+          dfsClient.namenode.fsync(src, dfsClient.clientName);
+        } catch (IOException ioe) {
+          DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
+          // If we got an error here, it might be because some other thread called
+          // close before our hflush completed. In that case, we should throw an
+          // exception that the stream is closed.
+          isClosed();
+          // If we aren't closed but failed to sync, we should expose that to the
+          // caller.
+          throw ioe;
+        }
       }
     } catch (IOException e) {
-        lastException = new IOException("IOException flush:" + e);
-        closeThreads(true);
-        throw e;
+      DFSClient.LOG.warn("Error while syncing", e);
+      synchronized (this) {
+        if (!closed) {
+          lastException = new IOException("IOException flush:" + e);
+          closeThreads(true);
+        }
+      }
+      throw e;
     }
   }
 
@@ -1338,26 +1387,39 @@ class DFSOutputStream extends FSOutputSu
    * Waits till all existing data is flushed and confirmations 
    * received from datanodes. 
    */
-  private synchronized void flushInternal() throws IOException {
-    dfsClient.checkOpen();
-    isClosed();
-    //
-    // If there is data in the current buffer, send it across
-    //
-    if (currentPacket != null) {
-      queuePacket(currentPacket);
-      currentPacket = null;
+  private void flushInternal() throws IOException {
+    long toWaitFor;
+    synchronized (this) {
+      dfsClient.checkOpen();
+      isClosed();
+      //
+      // If there is data in the current buffer, send it across
+      //
+      queueCurrentPacket();
+      toWaitFor = lastQueuedSeqno;
     }
 
+    waitForAckedSeqno(toWaitFor);
+  }
+
+  private void waitForAckedSeqno(long seqno) throws IOException {
+    if (DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Waiting for ack for: " + seqno);
+    }
     synchronized (dataQueue) {
-      while (!closed && dataQueue.size() + ackQueue.size() > 0) {
+      while (!closed) {
+        isClosed();
+        if (lastAckedSeqno >= seqno) {
+          break;
+        }
         try {
-          dataQueue.wait();
-        } catch (InterruptedException  e) {
+          dataQueue.wait(1000); // when we receive an ack, we notify on dataQueue
+        } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
         }
       }
-      isClosed();
     }
+    isClosed();
   }
 
   /**
@@ -1409,7 +1471,7 @@ class DFSOutputStream extends FSOutputSu
       flushBuffer();       // flush from all upper layers
 
       if (currentPacket != null) { 
-        waitAndQueuePacket(currentPacket);
+        waitAndQueueCurrentPacket();
       }
 
       if (bytesCurBlock != 0) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Fri Mar  4 21:40:29 2011
@@ -1650,8 +1650,10 @@ public class FSDataset implements FSCons
     FileChannel channel = file.getChannel();
     long oldPos = channel.position();
     long newPos = oldPos - checksumSize;
-    DataNode.LOG.info("Changing meta file offset of block " + b + " from " + 
-        oldPos + " to " + newPos);
+    if (DataNode.LOG.isDebugEnabled()) {
+      DataNode.LOG.debug("Changing meta file offset of block " + b + " from " +
+          oldPos + " to " + newPos);
+    }
     channel.position(newPos);
   }
 

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -4,4 +4,4 @@
 /hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Fri Mar  4 21:40:29 2011
@@ -990,7 +990,7 @@ class FSDirectory implements Closeable {
     }
     
     long now = now();
-    trgInode.setModificationTime(now);
+    trgInode.setModificationTimeForce(now);
     trgParent.setModificationTime(now);
     // update quota on the parent directory ('count' files removed, 0 space)
     unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Fri Mar  4 21:40:29 2011
@@ -335,6 +335,7 @@ public class SecondaryNameNode implement
           public Void run() throws Exception {
             checkpointImage.cTime = sig.cTime;
             checkpointImage.checkpointTime = sig.checkpointTime;
+            checkpointImage.imageDigest = sig.imageDigest;
         
             // get fsimage
             String fileid = "getimage=1";

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
Fri Mar  4 21:40:29 2011
@@ -120,7 +120,7 @@ class ImageLoaderCurrent implements Imag
   protected final DateFormat dateFormat = 
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int [] versions = 
-           {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27};
+    {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27};
   private int imageVersion = 0;
 
   /* (non-Javadoc)

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
Fri Mar  4 21:40:29 2011
@@ -86,13 +86,13 @@ privileged public aspect DFSClientAspect
     LOG.info("FI: before pipelineClose:");
   }
 
-  pointcut checkAckQueue(DFSOutputStream.Packet cp):
-    call (void DFSOutputStream.waitAndQueuePacket(
-            DFSOutputStream.Packet))
+  pointcut checkAckQueue(DFSOutputStream stream):
+    call (void DFSOutputStream.waitAndQueueCurrentPacket())
     && withincode (void DFSOutputStream.writeChunk(..))
-    && args(cp);
+    && this(stream);
 
-  after(DFSOutputStream.Packet cp) : checkAckQueue (cp) {
+  after(DFSOutputStream stream) : checkAckQueue (stream) {
+    DFSOutputStream.Packet cp = stream.currentPacket;
     PipelineTest pTest = DataTransferTestUtil.getDataTransferTest();
     if (pTest != null && pTest instanceof PipelinesTest) {
       LOG.debug("FI: Recording packet # " + cp.seqno

Propchange: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
Fri Mar  4 21:40:29 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import org.apache.hadoop.ipc.RemoteException;
 import static org.junit.Assert.*;
 import org.junit.Test;
@@ -46,11 +47,11 @@ public class TestFcHdfsSymlink extends F
     return "hdfs";
   }
 
-  protected String testBaseDir1() {
+  protected String testBaseDir1() throws IOException {
     return "/test1";
   }
   
-  protected String testBaseDir2() {
+  protected String testBaseDir2() throws IOException {
     return "/test2";
   }
 
@@ -83,11 +84,11 @@ public class TestFcHdfsSymlink extends F
   @Test
   /** Link from Hdfs to LocalFs */
   public void testLinkAcrossFileSystems() throws IOException {
-    Path localDir  = new Path("file:///tmp/test");
-    Path localFile = new Path("file:///tmp/test/file");
+    Path localDir  = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
+    Path localFile = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test/file");
     Path link      = new Path(testBaseDir1(), "linkToFile");
     FileContext localFc = FileContext.getLocalFSFileContext();
-    localFc.delete(new Path("file:///tmp/test"), true);
+    localFc.delete(localDir, true);
     localFc.mkdir(localDir, FileContext.DEFAULT_PERM, true);
     localFc.setWorkingDirectory(localDir);
     assertEquals(localDir, localFc.getWorkingDirectory());

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1078146&r1=1078145&r2=1078146&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
Fri Mar  4 21:40:29 2011
@@ -76,7 +76,7 @@ public class TestOfflineImageViewer exte
   
   // Main entry point into testing.  Necessary since we only want to generate
   // the fsimage file once and use it for multiple tests. 
-  public void testOIV() {
+  public void testOIV() throws Exception {
     File originalFsimage = null;
     try {
     originalFsimage = initFsimage();
@@ -98,7 +98,7 @@ public class TestOfflineImageViewer exte
 
   // Create a populated namespace for later testing.  Save its contents to a
   // data structure and store its fsimage location.
-  private File initFsimage() {
+  private File initFsimage() throws IOException {
     MiniDFSCluster cluster = null;
     File orig = null;
     try {
@@ -131,11 +131,9 @@ public class TestOfflineImageViewer exte
       URI [] files = cluster.getNameDirs(0).toArray(new URI[0]);
       orig =  new File(files[0].getPath(), "current/fsimage");
       
-      if(!orig.exists())
+      if (!orig.exists()) {
         fail("Didn't generate or can't find fsimage.");
-
-    } catch (IOException e) {
-      fail("Failed trying to generate fsimage file: " + e.getMessage());
+      }
     } finally {
       if(cluster != null)
         cluster.shutdown();
@@ -152,7 +150,7 @@ public class TestOfflineImageViewer exte
 
   // Verify that we can correctly generate an ls-style output for a valid 
   // fsimage
-  private void outputOfLSVisitor(File originalFsimage) {
+  private void outputOfLSVisitor(File originalFsimage) throws IOException {
     File testFile = new File(ROOT, "/basicCheck");
     File outputFile = new File(ROOT, "/basicCheckOutput");
     
@@ -167,8 +165,6 @@ public class TestOfflineImageViewer exte
       HashMap<String, LsElements> fileOutput = readLsfile(outputFile);
       
       compareNamespaces(writtenFiles, fileOutput);
-    } catch (IOException e) {
-      fail("Failed reading valid file: " + e.getMessage());
     } finally {
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();
@@ -178,7 +174,7 @@ public class TestOfflineImageViewer exte
   
   // Confirm that attempting to read an fsimage file with an unsupported
   // layout results in an error
-  public void unsupportedFSLayoutVersion(File originalFsimage) {
+  public void unsupportedFSLayoutVersion(File originalFsimage) throws IOException {
     File testFile = new File(ROOT, "/invalidLayoutVersion");
     File outputFile = new File(ROOT, "invalidLayoutVersionOutput");
     
@@ -196,8 +192,6 @@ public class TestOfflineImageViewer exte
           throw e; // wasn't error we were expecting
         System.out.println("Correctly failed at reading bad image version.");
       }
-    } catch (IOException e) {
-      fail("Problem testing unsupported layout version: " + e.getMessage());
     } finally {
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();
@@ -205,7 +199,7 @@ public class TestOfflineImageViewer exte
   }
   
   // Verify that image viewer will bail on a file that ends unexpectedly
-  private void truncatedFSImage(File originalFsimage) {
+  private void truncatedFSImage(File originalFsimage) throws IOException {
     File testFile = new File(ROOT, "/truncatedFSImage");
     File outputFile = new File(ROOT, "/trucnatedFSImageOutput");
     try {
@@ -221,9 +215,7 @@ public class TestOfflineImageViewer exte
       } catch (EOFException e) {
         System.out.println("Correctly handled EOF");
       }
-      
-    } catch (IOException e) {
-      fail("Failed testing truncatedFSImage: " + e.getMessage());
+
     } finally {
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();
@@ -373,7 +365,7 @@ public class TestOfflineImageViewer exte
     }
   }
 
-  private void outputOfFileDistributionVisitor(File originalFsimage) {
+  private void outputOfFileDistributionVisitor(File originalFsimage) throws IOException {
     File testFile = new File(ROOT, "/basicCheck");
     File outputFile = new File(ROOT, "/fileDistributionCheckOutput");
 
@@ -394,8 +386,6 @@ public class TestOfflineImageViewer exte
         assertEquals(row.length, 2);
         totalFiles += Integer.parseInt(row[1]);
       }
-    } catch (IOException e) {
-      fail("Failed reading valid file: " + e.getMessage());
     } finally {
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 21:40:29 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1052823,1060619,1061067,1062020



Mime
View raw message