hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tomwh...@apache.org
Subject svn commit: r906690 [4/4] - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/aop/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/...
Date Thu, 04 Feb 2010 22:20:03 GMT
Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj Thu Feb  4 22:20:02
2010
@@ -22,8 +22,8 @@
 import org.apache.hadoop.fi.DataTransferTestUtil;
 import org.apache.hadoop.fi.PipelineTest;
 import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream.DataStreamer;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream.DataStreamer;
 import org.apache.hadoop.hdfs.PipelinesTestUtil.PipelinesTest;
 import org.junit.Assert;
 
@@ -66,7 +66,7 @@
 
   pointcut pipelineErrorAfterInit(DataStreamer datastreamer):
     call(* processDatanodeError())
-    && within (DFSClient.DFSOutputStream.DataStreamer)
+    && within (DFSOutputStream.DataStreamer)
     && target(datastreamer);
 
   before(DataStreamer datastreamer) : pipelineErrorAfterInit(datastreamer) {
@@ -86,13 +86,13 @@
     LOG.info("FI: before pipelineClose:");
   }
 
-  pointcut checkAckQueue(DFSClient.DFSOutputStream.Packet cp):
-    call (void DFSClient.DFSOutputStream.waitAndQueuePacket(
-            DFSClient.DFSOutputStream.Packet))
-    && withincode (void DFSClient.DFSOutputStream.writeChunk(..))
+  pointcut checkAckQueue(DFSOutputStream.Packet cp):
+    call (void DFSOutputStream.waitAndQueuePacket(
+            DFSOutputStream.Packet))
+    && withincode (void DFSOutputStream.writeChunk(..))
     && args(cp);
 
-  after(DFSClient.DFSOutputStream.Packet cp) : checkAckQueue (cp) {
+  after(DFSOutputStream.Packet cp) : checkAckQueue (cp) {
     PipelineTest pTest = DataTransferTestUtil.getDataTransferTest();
     if (pTest != null && pTest instanceof PipelinesTest) {
       LOG.debug("FI: Recording packet # " + cp.seqno

Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/HFlushAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/HFlushAspects.aj?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/HFlushAspects.aj (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/HFlushAspects.aj Thu Feb  4 22:20:02
2010
@@ -25,7 +25,7 @@
 import org.apache.hadoop.fi.DataTransferTestUtil;
 import org.apache.hadoop.fi.PipelineTest;
 import org.apache.hadoop.fi.FiHFlushTestUtil.HFlushTest;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
 public aspect HFlushAspects {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Feb  4 22:20:02
2010
@@ -263,7 +263,7 @@
   }
 
   public static BlockAccessToken getAccessToken(FSDataOutputStream out) {
-    return ((DFSClient.DFSOutputStream) out.getWrappedStream()).getAccessToken();
+    return ((DFSOutputStream) out.getWrappedStream()).getAccessToken();
   }
 
   static void setLogLevel2All(org.apache.commons.logging.Log log) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
Thu Feb  4 22:20:02 2010
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
Thu Feb  4 22:20:02 2010
@@ -26,7 +26,6 @@
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSClient.BlockReader;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -91,7 +90,7 @@
     s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-    return DFSClient.BlockReader.newBlockReader(
+    return BlockReader.newBlockReader(
       s, targetAddr.toString()+ ":" + block.getBlockId(), block.getBlockId(),
       testBlock.getAccessToken(), block.getGenerationStamp(),
       offset, lenToRead,

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
Thu Feb  4 22:20:02 2010
@@ -23,7 +23,6 @@
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Feb
 4 22:20:02 2010
@@ -31,7 +31,7 @@
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.security.token.DelegationTokenIdentifier;
@@ -302,15 +302,15 @@
     conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
 
-    int maxBlockAcquires = DFSClient.getMaxBlockAcquireFailures(conf);
-    assertTrue(maxBlockAcquires > 0);
-
     try {
       cluster.waitActive();
       FileSystem fs = cluster.getFileSystem();
       NameNode preSpyNN = cluster.getNameNode();
       NameNode spyNN = spy(preSpyNN);
       DFSClient client = new DFSClient(null, spyNN, conf, null);
+      int maxBlockAcquires = client.getMaxBlockAcquireFailures();
+      assertTrue(maxBlockAcquires > 0);
+
 
       DFSTestUtil.createFile(fs, file, fileSize, (short)1, 12345L /*seed*/);
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Thu
Feb  4 22:20:02 2010
@@ -43,7 +43,6 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java Thu Feb
 4 22:20:02 2010
@@ -89,7 +89,7 @@
         try {
           System.out.println("Workload processing file " + filename);
           FSDataOutputStream stm = createFile(fs, filename, replication);
-          DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+          DFSOutputStream dfstream = (DFSOutputStream)
                                                  (stm.getWrappedStream());
           dfstream.setArtificialSlowdown(1000);
           writeFile(stm, myseed);
@@ -357,7 +357,7 @@
       // create a file and write one block of data
       System.out.println("SimpleTest creating file " + filename);
       FSDataOutputStream stm = createFile(fs, filename, repl);
-      DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+      DFSOutputStream dfstream = (DFSOutputStream)
                                              (stm.getWrappedStream());
 
       // these are test settings

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Thu Feb 
4 22:20:02 2010
@@ -528,12 +528,12 @@
 
       // instruct the dfsclient to use a new filename when it requests
       // new blocks for files that were renamed.
-      DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+      DFSOutputStream dfstream = (DFSOutputStream)
                                                  (stm.getWrappedStream());
       dfstream.setTestFilename(file1.toString());
-      dfstream = (DFSClient.DFSOutputStream) (stm3.getWrappedStream());
+      dfstream = (DFSOutputStream) (stm3.getWrappedStream());
       dfstream.setTestFilename(file3new.toString());
-      dfstream = (DFSClient.DFSOutputStream) (stm4.getWrappedStream());
+      dfstream = (DFSOutputStream) (stm4.getWrappedStream());
       dfstream.setTestFilename(file4new.toString());
 
       // write 1 byte to file.  This should succeed because the 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Thu Feb  4 22:20:02
2010
@@ -117,7 +117,7 @@
         // write to the file
         stm.write(fileContent, tenth * i, tenth);
         // Wait while hflush() pushes all packets through built pipeline
-        ((DFSClient.DFSOutputStream)stm.getWrappedStream()).hflush();
+        ((DFSOutputStream)stm.getWrappedStream()).hflush();
         byte [] toRead = new byte[tenth];
         byte [] expected = new byte[tenth];
         System.arraycopy(fileContent, tenth * i, expected, 0, tenth);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java Thu Feb  4 22:20:02
2010
@@ -94,7 +94,7 @@
     LOG.debug("Invoking append but doing nothing otherwise...");
     FSDataOutputStream ofs = fs.append(filePath);
     ofs.writeBytes("Some more stuff to write");
-    ((DFSClient.DFSOutputStream) ofs.getWrappedStream()).hflush();
+    ((DFSOutputStream) ofs.getWrappedStream()).hflush();
 
     List<LocatedBlock> lb = cluster.getNameNode().getBlockLocations(
       filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Thu Feb
 4 22:20:02 2010
@@ -77,7 +77,7 @@
         write(out, 0, half);
 
         //hflush
-        ((DFSClient.DFSOutputStream)out.getWrappedStream()).hflush();
+        ((DFSOutputStream)out.getWrappedStream()).hflush();
       }
 
       //b. On another machine M2, open file and verify that the half-block

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
Thu Feb  4 22:20:02 2010
@@ -32,7 +32,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -236,7 +236,7 @@
     throws IOException {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    DFSClient.BlockReader blockReader = null; 
+    BlockReader blockReader = null; 
     Block block = lblock.getBlock(); 
    
     targetAddr = NetUtils.createSocketAddr(datanode.getName());
@@ -246,7 +246,7 @@
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
     blockReader = 
-      DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
+      BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
           block.getBlockId(), 
           block.getBlockId(), 
           lblock.getAccessToken(),

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java?rev=906690&r1=906689&r2=906690&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
Thu Feb  4 22:20:02 2010
@@ -26,6 +26,7 @@
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -123,7 +124,7 @@
       boolean shouldSucceed) {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    DFSClient.BlockReader blockReader = null;
+    BlockReader blockReader = null;
     Block block = lblock.getBlock();
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
@@ -132,7 +133,7 @@
       s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-      blockReader = DFSClient.BlockReader.newBlockReader(s, targetAddr
+      blockReader = BlockReader.newBlockReader(s, targetAddr
           .toString()
           + ":" + block.getBlockId(), block.getBlockId(), lblock
           .getAccessToken(), block.getGenerationStamp(), 0, -1, conf.getInt(



Mime
View raw message