hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1213938 - in /hadoop/common/branches/branch-1: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/server/datanode/ src/test/org/apache/hadoop/hdfs/server/namenode/
Date Tue, 13 Dec 2011 21:37:24 GMT
Author: eli
Date: Tue Dec 13 21:37:23 2011
New Revision: 1213938

URL: http://svn.apache.org/viewvc?rev=1213938&view=rev
Log:
HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. Contributed by Eli Collins

Added:
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReader.java
Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Dec 13 21:37:23 2011
@@ -118,6 +118,8 @@ Release 1.1.0 - unreleased
 
     HDFS-2638. Improve a block recovery log. (eli)
 
+    HDFS-2654. Make BlockReaderLocal not extend RemoteBlockReader2. (eli)
+
 Release 1.0.0 - unreleased
 
   NEW FEATURES

Added: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReader.java?rev=1213938&view=auto
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReader.java (added)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReader.java Tue Dec
13 21:37:23 2011
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.Closeable;
+
+/**
+ * The API shared between local and remote block readers.
+ */
+public interface BlockReader extends Closeable {
+
+  public int read(byte buf[], int off, int len) throws IOException;
+
+  public int readAll(byte[] buf, int offset, int len) throws IOException;
+
+  public long skip(long n) throws IOException;
+
+}

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java
Tue Dec 13 21:37:23 2011
@@ -29,8 +29,8 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.BlockReader;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
@@ -57,7 +58,7 @@ import org.apache.hadoop.util.DataChecks
  * if security is enabled.</li>
  * </ul>
  */
-class BlockReaderLocal extends BlockReader {
+class BlockReaderLocal extends FSInputChecker implements BlockReader {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
 
   //Stores the cache and proxy for a local datanode.
@@ -119,6 +120,16 @@ class BlockReaderLocal extends BlockRead
   private FileInputStream dataIn; // reader for the data file
   private FileInputStream checksumIn;   // reader for the checksum file
   
+  private DataChecksum checksum;
+  private int bytesPerChecksum;
+  private int checksumSize;
+  private long firstChunkOffset;
+  private long lastChunkLen = -1;
+  private long lastChunkOffset = -1;
+  private long startOffset;
+  private boolean gotEOS = false;
+  private byte[] skipBuf = null;
+
   /**
    * The only way this object can be instantiated.
    */
@@ -142,7 +153,7 @@ class BlockReaderLocal extends BlockRead
     FileInputStream dataIn = null;
     FileInputStream checksumIn = null;
     BlockReaderLocal localBlockReader = null;
-    boolean skipChecksum = shortCircuitChecksum(conf);
+    boolean skipChecksum = skipChecksumCheck(conf);
     try {
       // get a local file system
       File blkfile = new File(pathinfo.getBlockPath());
@@ -226,7 +237,7 @@ class BlockReaderLocal extends BlockRead
     return pathinfo;
   }
   
-  private static boolean shortCircuitChecksum(Configuration conf) {
+  private static boolean skipChecksumCheck(Configuration conf) {
     return conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
         DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
   }
@@ -256,8 +267,10 @@ class BlockReaderLocal extends BlockRead
     super(
         new Path("/blk_" + block.getBlockId() + ":of:" + hdfsfile) /*too non path-like?*/,
         1,
-        checksum,
-        verifyChecksum);
+        verifyChecksum,
+        checksum.getChecksumSize() > 0? checksum : null,
+        checksum.getBytesPerChecksum(),
+        checksum.getChecksumSize());
     this.startOffset = startOffset;
     this.dataIn = dataIn;
     this.checksumIn = checksumIn;
@@ -278,34 +291,20 @@ class BlockReaderLocal extends BlockRead
 
     checksumSize = checksum.getChecksumSize();
 
-    long endOffset = blockLength;
-    if (startOffset < 0 || startOffset > endOffset
-        || (length + startOffset) > endOffset) {
+    if (startOffset < 0 || startOffset > blockLength
+        || (length + startOffset) > blockLength) {
       String msg = " Offset " + startOffset + " and length " + length
-      + " don't match block " + block + " ( blockLen " + endOffset + " )";
+      + " don't match block " + block + " ( blockLen " + blockLength + " )";
       LOG.warn("BlockReaderLocal requested with incorrect offset: " + msg);
       throw new IOException(msg);
     }
 
     firstChunkOffset = (startOffset - (startOffset % bytesPerChecksum));
 
-    if (length >= 0) {
-      // Make sure endOffset points to end of a checksumed chunk.
-      long tmpLen = startOffset + length;
-      if (tmpLen % bytesPerChecksum != 0) {
-        tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum);
-      }
-      if (tmpLen < endOffset) {
-        endOffset = tmpLen;
-      }
-    }
-
-    // seek to the right offsets
     if (firstChunkOffset > 0) {
       dataIn.getChannel().position(firstChunkOffset);
 
       long checksumSkip = (firstChunkOffset / bytesPerChecksum) * checksumSize;
-      // note blockInStream is  seeked when created below
       if (checksumSkip > 0) {
         checksumIn.skip(checksumSkip);
       }
@@ -322,9 +321,25 @@ class BlockReaderLocal extends BlockRead
     }
     if (checksum == null) {
       return dataIn.read(buf, off, len);
-    } else {
-      return super.read(buf, off, len);
     }
+    // For the first read, skip the extra bytes at the front.
+    if (lastChunkLen < 0 && startOffset > firstChunkOffset && len >
0) {
+      // Skip these bytes. But don't call this.skip()!
+      int toSkip = (int)(startOffset - firstChunkOffset);
+      if (skipBuf == null) {
+        skipBuf = new byte[bytesPerChecksum];
+      }
+      if (super.read(skipBuf, 0, toSkip) != toSkip) {
+        // Should never happen
+        throw new IOException("Could not skip " + toSkip + " bytes");
+      }
+    }
+    return super.read(buf, off, len);
+  }
+
+  @Override
+  public int readAll(byte[] buf, int offset, int len) throws IOException {
+    return readFully(this, buf, offset, len);
   }
 
   @Override
@@ -334,20 +349,44 @@ class BlockReaderLocal extends BlockRead
     }
     if (checksum == null) {
       return dataIn.skip(n);
-    } else {
-     return super.skip(n);
     }
+    // Skip by reading the data so we stay in sync with checksums.
+    // This could be implemented more efficiently in the future to
+    // skip to the beginning of the appropriate checksum chunk
+    // and then only read to the middle of that chunk.
+    if (skipBuf == null) {
+      skipBuf = new byte[bytesPerChecksum]; 
+    }
+    long nSkipped = 0;
+    while (nSkipped < n) {
+      int toSkip = (int)Math.min(n-nSkipped, skipBuf.length);
+      int ret = read(skipBuf, 0, toSkip);
+      if (ret <= 0) {
+        return nSkipped;
+      }
+      nSkipped += ret;
+    }
+    return nSkipped;
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    // Checksum errors are handled outside BlockReaderLocal 
+    return false;
   }
 
   @Override
   public synchronized void seek(long n) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("seek " + n);
-    }
     throw new IOException("Seek() is not supported in BlockReaderLocal");
   }
 
   @Override
+  protected long getChunkPosition(long pos) {
+    throw new RuntimeException("getChunkPosition() is not supported, " +
+        "since seek is not implemented");
+  }
+
+  @Override
   protected synchronized int readChunk(long pos, byte[] buf, int offset,
       int len, byte[] checksumBuf) throws IOException {
     if (LOG.isDebugEnabled()) {
@@ -393,13 +432,7 @@ class BlockReaderLocal extends BlockRead
 
   @Override
   public synchronized void close() throws IOException {
-    if (dataIn != null) {
-      dataIn.close();
-      dataIn = null;
-    }
-    if (checksumIn != null) {
-      checksumIn.close();
-      checksumIn = null;
-    }
+    IOUtils.closeStream(dataIn);
+    IOUtils.closeStream(checksumIn);
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Tue Dec
13 21:37:23 2011
@@ -1407,20 +1407,20 @@ public class DFSClient implements FSCons
   /** This is a wrapper around connection to datadone
    * and understands checksum, offset etc
    */
-  public static class BlockReader extends FSInputChecker {
+  public static class RemoteBlockReader extends FSInputChecker implements BlockReader {
 
     private Socket dnSock; //for now just sending checksumOk.
     private DataInputStream in;
-    protected DataChecksum checksum;
-    protected long lastChunkOffset = -1;
-    protected long lastChunkLen = -1;
+    private DataChecksum checksum;
+    private long lastChunkOffset = -1;
+    private long lastChunkLen = -1;
     private long lastSeqNo = -1;
 
-    protected long startOffset;
-    protected long firstChunkOffset;
-    protected int bytesPerChecksum;
-    protected int checksumSize;
-    protected boolean gotEOS = false;
+    private long startOffset;
+    private long firstChunkOffset;
+    private int bytesPerChecksum;
+    private int checksumSize;
+    private boolean gotEOS = false;
     
     byte[] skipBuf = null;
     ByteBuffer checksumBytes = null;
@@ -1613,7 +1613,7 @@ public class DFSClient implements FSCons
       return chunkLen;
     }
     
-    private BlockReader( String file, long blockId, DataInputStream in, 
+    private RemoteBlockReader( String file, long blockId, DataInputStream in, 
                          DataChecksum checksum, boolean verifyChecksum,
                          long startOffset, long firstChunkOffset, 
                          Socket dnSock ) {
@@ -1639,11 +1639,11 @@ public class DFSClient implements FSCons
     /**
      * Public constructor 
      */  
-    BlockReader(Path file, int numRetries) {
+    RemoteBlockReader(Path file, int numRetries) {
       super(file, numRetries);
     }
 
-    protected BlockReader(Path file, int numRetries, DataChecksum checksum,
+    protected RemoteBlockReader(Path file, int numRetries, DataChecksum checksum,
         boolean verifyChecksum) {
       super(file,
           numRetries,
@@ -1742,8 +1742,8 @@ public class DFSClient implements FSCons
                               startOffset + " for file " + file);
       }
 
-      return new BlockReader( file, blockId, in, checksum, verifyChecksum,
-                              startOffset, firstChunkOffset, sock );
+      return new RemoteBlockReader(file, blockId, in, checksum, verifyChecksum,
+                                   startOffset, firstChunkOffset, sock);
     }
 
     @Override
@@ -1752,10 +1752,11 @@ public class DFSClient implements FSCons
       checksum = null;
       // in will be closed when its Socket is closed.
     }
-    
+
     /** kind of like readFully(). Only reads as much as possible.
      * And allows use of protected readFully().
      */
+    @Override
     public int readAll(byte[] buf, int offset, int len) throws IOException {
       return readFully(this, buf, offset, len);
     }
@@ -2092,7 +2093,7 @@ public class DFSClient implements FSCons
           s = socketFactory.createSocket();
           NetUtils.connect(s, targetAddr, socketTimeout);
           s.setSoTimeout(socketTimeout);
-          blockReader = BlockReader.newBlockReader(s, src, blk.getBlockId(), 
+          blockReader = RemoteBlockReader.newBlockReader(s, src, blk.getBlockId(), 
               accessToken, 
               blk.getGenerationStamp(),
               offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
@@ -2303,9 +2304,10 @@ public class DFSClient implements FSCons
         InetSocketAddress targetAddr = retval.addr;
         BlockReader reader = null;
 
-        int len = (int) (end - start + 1);
         try {
           Token<BlockTokenIdentifier> accessToken = block.getBlockToken();
+          int len = (int) (end - start + 1);
+
           // first try reading the block locally.
           if (shouldTryShortCircuitRead(targetAddr)) {
             try {
@@ -2322,7 +2324,7 @@ public class DFSClient implements FSCons
             dn = socketFactory.createSocket();
             NetUtils.connect(dn, targetAddr, socketTimeout);
             dn.setSoTimeout(socketTimeout);
-            reader = BlockReader.newBlockReader(dn, src, 
+            reader = RemoteBlockReader.newBlockReader(dn, src, 
                 block.getBlock().getBlockId(), accessToken,
                 block.getBlock().getGenerationStamp(), start, len, buffersize, 
                 verifyChecksum, clientName);
@@ -2351,7 +2353,7 @@ public class DFSClient implements FSCons
             }
           }
         } finally {
-          IOUtils.closeStream(reader);
+          reader.close();
           IOUtils.closeSocket(dn);
         }
         // Put chosen node into dead list, continue

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
Tue Dec 13 21:37:23 2011
@@ -39,7 +39,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClient.RemoteBlockReader;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
@@ -145,12 +147,11 @@ public class JspHelper {
       long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     
       
       // Use the block name for file name. 
-      DFSClient.BlockReader blockReader = 
-        DFSClient.BlockReader.newBlockReader(s, addr.toString() + ":" + blockId,
-                                             blockId, accessToken, genStamp ,offsetIntoBlock,

-                                             amtToRead, 
-                                             conf.getInt("io.file.buffer.size",
-                                                         4096));
+      BlockReader blockReader = 
+        RemoteBlockReader.newBlockReader(s, addr.toString() + ":" + blockId,
+                                         blockId, accessToken, genStamp ,offsetIntoBlock,

+                                         amtToRead, 
+                                         conf.getInt("io.file.buffer.size", 4096));
         
     byte[] buf = new byte[(int)amtToRead];
     int readOffset = 0;

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
Tue Dec 13 21:37:23 2011
@@ -34,7 +34,9 @@ import java.util.TreeSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClient.RemoteBlockReader;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -428,7 +430,7 @@ public class NamenodeFsck {
     InetSocketAddress targetAddr = null;
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     Socket s = null;
-    DFSClient.BlockReader blockReader = null; 
+    BlockReader blockReader = null; 
     Block block = lblock.getBlock(); 
 
     while (s == null) {
@@ -456,13 +458,13 @@ public class NamenodeFsck {
         s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
         
         blockReader = 
-          DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
-                                               block.getBlockId(), 
-                                               block.getBlockId(), 
-                                               lblock.getBlockToken(),
-                                               block.getGenerationStamp(), 
-                                               0, -1,
-                                               conf.getInt("io.file.buffer.size", 4096));
+          RemoteBlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
+                                           block.getBlockId(), 
+                                           block.getBlockId(), 
+                                           lblock.getBlockToken(),
+                                           block.getGenerationStamp(), 
+                                           0, -1,
+                                           conf.getInt("io.file.buffer.size", 4096));
         
       }  catch (IOException ex) {
         // Put chosen node into dead list, continue

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
Tue Dec 13 21:37:23 2011
@@ -32,7 +32,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClient.RemoteBlockReader;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -237,7 +239,7 @@ public class TestDataNodeVolumeFailure e
     throws IOException {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    DFSClient.BlockReader blockReader = null; 
+    BlockReader blockReader = null; 
     Block block = lblock.getBlock(); 
    
     targetAddr = NetUtils.createSocketAddr(datanode.getName());
@@ -247,7 +249,7 @@ public class TestDataNodeVolumeFailure e
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
     blockReader = 
-      DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
+      RemoteBlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
           block.getBlockId(), block.getBlockId(), lblock.getBlockToken(),
           block.getGenerationStamp(), 0, -1, 4096);
 

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1213938&r1=1213937&r2=1213938&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
Tue Dec 13 21:37:23 2011
@@ -26,7 +26,9 @@ import java.util.Random;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClient.RemoteBlockReader;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -120,7 +122,7 @@ public class TestBlockTokenWithDFS exten
       boolean shouldSucceed) {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    DFSClient.BlockReader blockReader = null;
+    BlockReader blockReader = null;
     Block block = lblock.getBlock();
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
@@ -129,7 +131,7 @@ public class TestBlockTokenWithDFS exten
       s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-      blockReader = DFSClient.BlockReader.newBlockReader(s, targetAddr
+      blockReader = RemoteBlockReader.newBlockReader(s, targetAddr
           .toString()
           + ":" + block.getBlockId(), block.getBlockId(), lblock
           .getBlockToken(), block.getGenerationStamp(), 0, -1, conf.getInt(



Mime
View raw message