hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1147359 - in /hadoop/common/trunk/hdfs: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/tools/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/...
Date Sat, 16 Jul 2011 05:12:41 GMT
Author: szetszwo
Date: Sat Jul 16 05:12:40 2011
New Revision: 1147359

URL: http://svn.apache.org/viewvc?rev=1147359&view=rev
Log:
HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the deprecated warnings in
DFSAdmin.

Modified:
    hadoop/common/trunk/hdfs/CHANGES.txt
    hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java

Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Sat Jul 16 05:12:40 2011
@@ -560,6 +560,9 @@ Trunk (unreleased changes)
     HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in
     OfflineEditsViewerHelper.  (szetszwo)
 
+    HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the
+    deprecated warnings in DFSAdmin.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sat
Jul 16 05:12:40 2011
@@ -532,6 +532,9 @@ public class DistributedFileSystem exten
     return "DFS[" + dfs + "]";
   }
 
+  /** @deprecated DFSClient should not be accessed directly. */
+  @InterfaceAudience.Private
+  @Deprecated
   public DFSClient getClient() {
     return dfs;
   }        
@@ -624,9 +627,15 @@ public class DistributedFileSystem exten
     return new CorruptFileBlockIterator(dfs, path);
   }
 
-  /** Return statistics for each datanode. */
+  /** @return datanode statistics. */
   public DatanodeInfo[] getDataNodeStats() throws IOException {
-    return dfs.datanodeReport(DatanodeReportType.ALL);
+    return getDataNodeStats(DatanodeReportType.ALL);
+  }
+
+  /** @return datanode statistics for the given type. */
+  public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type
+      ) throws IOException {
+    return dfs.datanodeReport(type);
   }
 
   /**

Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Sat Jul 16
05:12:40 2011
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -95,7 +94,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       this.args = parameters.toArray(new String[parameters.size()]);
     }
@@ -140,7 +139,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     SetQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       this.quota = Long.parseLong(parameters.remove(0));
       this.args = parameters.toArray(new String[parameters.size()]);
@@ -180,7 +179,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       this.args = parameters.toArray(new String[parameters.size()]);
     }
@@ -228,7 +227,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       String str = parameters.remove(0).trim();
       quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
@@ -327,10 +326,8 @@ public class DFSAdmin extends FsShell {
 
       System.out.println("-------------------------------------------------");
       
-      DatanodeInfo[] live = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.LIVE);
-      DatanodeInfo[] dead = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.DEAD);
+      DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
+      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
       System.out.println("Datanodes available: " + live.length +
                          " (" + (live.length + dead.length) + " total, " + 
                          dead.length + " dead)\n");
@@ -691,9 +688,8 @@ public class DFSAdmin extends FsShell {
    */
   public int printTopology() throws IOException {
       DistributedFileSystem dfs = getDFS();
-      DFSClient client = dfs.getClient();
-      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
-      
+      final DatanodeInfo[] report = dfs.getDataNodeStats();
+
       // Build a map of rack -> nodes from the datanode report
       HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
       for(DatanodeInfo dni : report) {

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java Sat
Jul 16 05:12:40 2011
@@ -23,10 +23,13 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 public class DFSClientAdapter {
+  public static DFSClient getDFSClient(DistributedFileSystem dfs) {
+    return dfs.dfs;
+  }
   
-  public static void stopLeaseRenewer(DFSClient dfsClient) throws IOException {
+  public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
     try {
-      dfsClient.leaserenewer.interruptAndJoin();
+      dfs.dfs.leaserenewer.interruptAndJoin();
     } catch (InterruptedException e) {
       throw new IOException(e);
     }

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Sat
Jul 16 05:12:40 2011
@@ -71,7 +71,7 @@ public class TestAbandonBlock {
     fout.hflush();
 
     // Now abandon the last block
-    DFSClient dfsclient = ((DistributedFileSystem)fs).getClient();
+    DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
     LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
     LocatedBlock b = blocks.getLastLocatedBlock();
     dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Sat
Jul 16 05:12:40 2011
@@ -80,7 +80,7 @@ public class TestLeaseRecovery extends j
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
-      assertTrue(dfs.dfs.exists(filestr));
+      assertTrue(dfs.exists(filepath));
       DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
 
       //get block info for the last block

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
(original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
Sat Jul 16 05:12:40 2011
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -84,10 +85,11 @@ public class TestInterDatanodeProtocol {
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
-      assertTrue(dfs.getClient().exists(filestr));
+      assertTrue(dfs.exists(filepath));
 
       //get block info
-      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
+      LocatedBlock locatedblock = getLastLocatedBlock(
+          DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       assertTrue(datanodeinfo.length > 0);
 
@@ -236,7 +238,7 @@ public class TestInterDatanodeProtocol {
 
       //get block info
       final LocatedBlock locatedblock = getLastLocatedBlock(
-          dfs.getClient().getNamenode(), filestr);
+          DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
       final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       Assert.assertTrue(datanodeinfo.length > 0);
 

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
(original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
Sat Jul 16 05:12:40 2011
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@@ -124,7 +124,7 @@ public class TestTransferRbw {
         final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
             oldrbw.getGenerationStamp());
         final BlockOpResponseProto s = DFSTestUtil.transferRbw(
-            b, fs.getClient(), oldnodeinfo, newnodeinfo);
+            b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
         Assert.assertEquals(Status.SUCCESS, s.getStatus());
       }
 

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
(original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
Sat Jul 16 05:12:40 2011
@@ -221,7 +221,7 @@ public class OfflineEditsViewerHelper {
     // OP_REASSIGN_LEASE 22
     String filePath = "/hard-lease-recovery-test";
     byte[] bytes = "foo-bar-baz".getBytes();
-    DFSClientAdapter.stopLeaseRenewer(dfs.getClient());
+    DFSClientAdapter.stopLeaseRenewer(dfs);
     FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
     leaseRecoveryPath.write(bytes);
     leaseRecoveryPath.hflush();

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=1147359&r1=1147358&r2=1147359&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
(original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
Sat Jul 16 05:12:40 2011
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -72,7 +73,7 @@ public class TestBlockUnderConstruction 
     // wait until the block is allocated by DataStreamer
     BlockLocation[] locatedBlocks;
     while(blocksAfter <= blocksBefore) {
-      locatedBlocks = hdfs.getClient().getBlockLocations(
+      locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
           file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
       blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
     }



Mime
View raw message