hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1213389 [3/3] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/ hadoop-hdfs-httpfs/src/main/ hadoop-hdfs-httpfs/src/main/conf/ hadoop-hdfs-httpfs/src/main/java/ hadoop-hdfs-httpfs/sr...
Date Mon, 12 Dec 2011 19:41:31 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
Mon Dec 12 19:41:20 2011
@@ -29,6 +29,11 @@ public class PermissionParam extends Sho
   private static final Domain DOMAIN = new Domain(NAME, 8);
 
   private static final short DEFAULT_PERMISSION = 0755;
+
+  /** @return the default FsPermission. */
+  public static FsPermission getDefaultFsPermission() {
+    return new FsPermission(DEFAULT_PERMISSION);
+  }
   
   /**
    * Constructor.

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
Mon Dec 12 19:41:20 2011
@@ -26,6 +26,7 @@ public class PutOpParam extends HttpOpPa
     CREATE(true, HttpURLConnection.HTTP_CREATED),
 
     MKDIRS(false, HttpURLConnection.HTTP_OK),
+    CREATESYMLINK(false, HttpURLConnection.HTTP_OK),
     RENAME(false, HttpURLConnection.HTTP_OK),
     SETREPLICATION(false, HttpURLConnection.HTTP_OK),
 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Dec 12 19:41:20 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1211747
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1213339
 /hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663

Copied: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
(from r1213339, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?p2=hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto&r1=1213339&r2=1213389&rev=1213389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
Mon Dec 12 19:41:20 2011
@@ -167,10 +167,25 @@ message HeartbeatRequestProto {
 }
 
 /**
+ * state - State the NN is in when returning response to the DN
+ * txid - Highest transaction ID this NN has seen
+ */
+message NNHAStatusHeartbeatProto {
+  enum State {
+    ACTIVE = 0;
+    STANDBY = 1;
+  }
+  required State state = 1; 
+  required uint64 txid = 2;
+}
+
+/**
  * cmds - Commands from namenode to datanode.
+ * haStatus - Status (from an HA perspective) of the NN sending this response
  */
 message HeartbeatResponseProto {
   repeated DatanodeCommandProto cmds = 1;
+  required NNHAStatusHeartbeatProto haStatus = 2;
 }
 
 /**

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Dec 12 19:41:20 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1211747
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1213339
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Dec 12 19:41:20 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1211747
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1213339
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Dec 12 19:41:20 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1211747
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1213339
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Dec 12 19:41:20 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1211747
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1213339
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
Mon Dec 12 19:41:20 2011
@@ -17,29 +17,28 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.*;
+import static org.apache.hadoop.fs.FileContextTestHelper.getAbsoluteTestRootDir;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
 import java.net.URI;
 
 import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.log4j.Level;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
-
-import static org.junit.Assert.*;
-import org.junit.Test;
-import org.junit.BeforeClass;
+import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
 
 /**
  * Test symbolic links using FileContext and Hdfs.
@@ -51,6 +50,8 @@ public class TestFcHdfsSymlink extends F
   }
 
   private static MiniDFSCluster cluster;
+  private static WebHdfsFileSystem webhdfs;
+
   
   protected String getScheme() {
     return "hdfs";
@@ -79,10 +80,11 @@ public class TestFcHdfsSymlink extends F
   @BeforeClass
   public static void testSetUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     cluster = new MiniDFSCluster.Builder(conf).build();
     fc = FileContext.getFileContext(cluster.getURI(0));
+    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
   }
   
   @AfterClass
@@ -263,4 +265,17 @@ public class TestFcHdfsSymlink extends F
     FileStatus statLink = fc.getFileStatus(link);
     assertEquals(statLink.getOwner(), statFile.getOwner());
   }
+
+  @Test
+  /** Test WebHdfsFileSystem.craeteSymlink(..). */  
+  public void testWebHDFS() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    webhdfs.createSymlink(file, link, false);
+    fc.setReplication(link, (short)2);
+    assertEquals(0, fc.getFileLinkStatus(link).getReplication());
+    assertEquals(2, fc.getFileStatus(link).getReplication());      
+    assertEquals(2, fc.getFileStatus(file).getReplication());
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
Mon Dec 12 19:41:20 2011
@@ -55,7 +55,8 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -69,7 +70,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.ipc.RPC;
@@ -523,8 +523,8 @@ public class MiniDFSCluster {
         Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
         setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
         setRpcEngine(conf, ClientNamenodeWireProtocol.class, rpcEngine);
-        setRpcEngine(conf, ClientDatanodeWireProtocol.class, rpcEngine);
-        setRpcEngine(conf, NamenodeProtocol.class, rpcEngine);
+        setRpcEngine(conf, ClientDatanodeProtocolPB.class, rpcEngine);
+        setRpcEngine(conf, NamenodeProtocolPB.class, rpcEngine);
         setRpcEngine(conf, ClientProtocol.class, rpcEngine);
         setRpcEngine(conf, DatanodeProtocol.class, rpcEngine);
         setRpcEngine(conf, RefreshAuthorizationPolicyProtocol.class, rpcEngine);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Mon Dec 12 19:41:20 2011
@@ -638,7 +638,7 @@ public class TestDFSClientRetries extend
       proxy = DFSUtil.createClientDatanodeProtocolProxy(
           fakeDnId, conf, 500, fakeBlock);
 
-      proxy.getReplicaVisibleLength(null);
+      proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
       fail ("Did not get expected exception: SocketTimeoutException");
     } catch (SocketTimeoutException e) {
       LOG.info("Got the expected Exception: SocketTimeoutException");

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
Mon Dec 12 19:41:20 2011
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -32,9 +33,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import junit.framework.TestCase;
@@ -98,10 +97,8 @@ public class TestGetBlocks extends TestC
       // get RPC client to namenode
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
-      NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
-          NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
-          UserGroupInformation.getCurrentUser(), CONF,
-          NetUtils.getDefaultSocketFactory(CONF));
+      NamenodeProtocol namenode = new NamenodeProtocolTranslatorPB(addr, CONF,
+          UserGroupInformation.getCurrentUser());
 
       // get blocks of size fileLen from dataNodes[0]
       BlockWithLocations[] locs;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
Mon Dec 12 19:41:20 2011
@@ -21,25 +21,29 @@ import static junit.framework.Assert.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
@@ -47,14 +51,17 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.security.token.block.BlockKey;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.io.Text;
@@ -120,6 +127,10 @@ public class TestPBHelper {
     DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
     DatanodeIDProto dnProto = PBHelper.convert(dn);
     DatanodeID dn2 = PBHelper.convert(dnProto);
+    compare(dn, dn2);
+  }
+  
+  void compare(DatanodeID dn, DatanodeID dn2) {
     assertEquals(dn.getHost(), dn2.getHost());
     assertEquals(dn.getInfoPort(), dn2.getInfoPort());
     assertEquals(dn.getIpcPort(), dn2.getIpcPort());
@@ -177,7 +188,6 @@ public class TestPBHelper {
     assertEquals(k1.getExpiryDate(), k2.getExpiryDate());
     assertEquals(k1.getKeyId(), k2.getKeyId());
     assertTrue(Arrays.equals(k1.getEncodedKey(), k2.getEncodedKey()));
-
   }
 
   @Test
@@ -195,7 +205,10 @@ public class TestPBHelper {
         getBlockKey(1), keys);
     ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
     ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
-
+    compare(expKeys, expKeys1);
+  }
+  
+  void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
     BlockKey[] allKeys = expKeys.getAllKeys();
     BlockKey[] allKeys1 = expKeys1.getAllKeys();
     assertEquals(allKeys.length, allKeys1.length);
@@ -314,15 +327,108 @@ public class TestPBHelper {
   }
   
   @Test
-  public void testBlockTokenIdentifier() {
+  public void testConvertBlockToken() {
     Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
         "identifier".getBytes(), "password".getBytes(), new Text("kind"),
         new Text("service"));
     BlockTokenIdentifierProto tokenProto = PBHelper.convert(token);
     Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
-    assertTrue(Arrays.equals(token.getIdentifier(), token2.getIdentifier()));
-    assertTrue(Arrays.equals(token.getPassword(), token2.getPassword()));
-    assertEquals(token.getKind(), token2.getKind());
-    assertEquals(token.getService(), token2.getService());
+    compare(token, token2);
+  }
+  
+  @Test
+  public void testConvertNamespaceInfo() {
+    NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300, 53);
+    NamespaceInfoProto proto = PBHelper.convert(info);
+    NamespaceInfo info2 = PBHelper.convert(proto);
+    compare(info, info2); //Compare the StorageInfo
+    assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
+    assertEquals(info.getBuildVersion(), info2.getBuildVersion());
+    assertEquals(info.getDistributedUpgradeVersion(),
+        info2.getDistributedUpgradeVersion());
+  }
+
+  private void compare(StorageInfo expected, StorageInfo actual) {
+    assertEquals(expected.clusterID, actual.clusterID);
+    assertEquals(expected.namespaceID, actual.namespaceID);
+    assertEquals(expected.cTime, actual.cTime);
+    assertEquals(expected.layoutVersion, actual.layoutVersion);
+  }
+
+  private void compare(Token<BlockTokenIdentifier> expected,
+      Token<BlockTokenIdentifier> actual) {
+    assertTrue(Arrays.equals(expected.getIdentifier(), actual.getIdentifier()));
+    assertTrue(Arrays.equals(expected.getPassword(), actual.getPassword()));
+    assertEquals(expected.getKind(), actual.getKind());
+    assertEquals(expected.getService(), actual.getService());
+  }
+  
+  @Test
+  public void testConvertLocatedBlock() {
+    DatanodeInfo [] dnInfos = new DatanodeInfo[3];
+    dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999,
+        59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS);
+    dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999,
+        59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED);
+    dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999,
+        59, 69, 32, "local", "host1", AdminStates.NORMAL);
+    LocatedBlock lb = new LocatedBlock(
+        new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
+    LocatedBlockProto lbProto = PBHelper.convert(lb);
+    LocatedBlock lb2 = PBHelper.convert(lbProto);
+    assertEquals(lb.getBlock(), lb2.getBlock());
+    compare(lb.getBlockToken(), lb2.getBlockToken());
+    assertEquals(lb.getStartOffset(), lb2.getStartOffset());
+    assertEquals(lb.isCorrupt(), lb2.isCorrupt());
+    DatanodeInfo [] dnInfos2 = lb2.getLocations();
+    assertEquals(dnInfos.length, dnInfos2.length);
+    for (int i = 0; i < dnInfos.length ; i++) {
+      compare(dnInfos[i], dnInfos2[i]);
+    }
+  }
+  
+  @Test
+  public void testConvertDatanodeRegistration() {
+    DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0);
+    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
+    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
+        getBlockKey(1), keys);
+    DatanodeRegistration reg = new DatanodeRegistration(dnId,
+        new StorageInfo(), expKeys);
+    DatanodeRegistrationProto proto = PBHelper.convert(reg);
+    DatanodeRegistration reg2 = PBHelper.convert(proto);
+    compare(reg.storageInfo, reg2.storageInfo);
+    compare(reg.exportedKeys, reg2.exportedKeys);
+    compare((DatanodeID)reg, (DatanodeID)reg2);
+  }
+  
+  @Test
+  public void testConvertBlockCommand() {
+    Block[] blocks = new Block[] { new Block(21), new Block(22) };
+    DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
+        new DatanodeInfo[2] };
+    dnInfos[0][0] = new DatanodeInfo();
+    dnInfos[1][0] = new DatanodeInfo();
+    dnInfos[1][1] = new DatanodeInfo();
+    BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
+        blocks, dnInfos);
+    BlockCommandProto bcProto = PBHelper.convert(bc);
+    BlockCommand bc2 = PBHelper.convert(bcProto);
+    assertEquals(bc.getAction(), bc2.getAction());
+    assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
+    Block[] blocks2 = bc2.getBlocks();
+    for (int i = 0; i < blocks.length; i++) {
+      assertEquals(blocks[i], blocks2[i]);
+    }
+    DatanodeInfo[][] dnInfos2 = bc2.getTargets();
+    assertEquals(dnInfos.length, dnInfos2.length);
+    for (int i = 0; i < dnInfos.length; i++) {
+      DatanodeInfo[] d1 = dnInfos[i];
+      DatanodeInfo[] d2 = dnInfos2[i];
+      assertEquals(d1.length, d2.length);
+      for (int j = 0; j < d1.length; j++) {
+        compare(d1[j], d2[j]);
+      }
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
Mon Dec 12 19:41:20 2011
@@ -25,6 +25,7 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -183,23 +184,44 @@ public class TestDelegationToken {
       }
     });
 
-    final Token<DelegationTokenIdentifier> token = webhdfs
-        .getDelegationToken("JobTracker");
-    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-    byte[] tokenId = token.getIdentifier();
-    identifier
-        .readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
-    LOG.info("A valid token should have non-null password, and should be renewed successfully");
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
-    dtSecretManager.renewToken(token, "JobTracker");
-    ugi.doAs(new PrivilegedExceptionAction<Object>() {
-      @Override
-      public Object run() throws Exception {
-        token.renew(config);
-        token.cancel(config);
-        return null;
-      }
-    });
+    { //test getDelegationToken(..)
+      final Token<DelegationTokenIdentifier> token = webhdfs
+          .getDelegationToken("JobTracker");
+      DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+      byte[] tokenId = token.getIdentifier();
+      identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+      LOG.info("A valid token should have non-null password, and should be renewed successfully");
+      Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+      dtSecretManager.renewToken(token, "JobTracker");
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          token.renew(config);
+          token.cancel(config);
+          return null;
+        }
+      });
+    }
+
+    { //test getDelegationTokens(..)
+      final List<Token<?>> tokenlist = webhdfs.getDelegationTokens("JobTracker");
+      DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+      @SuppressWarnings("unchecked")
+      final Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>)tokenlist.get(0);
+      byte[] tokenId = token.getIdentifier();
+      identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+      LOG.info("A valid token should have non-null password, and should be renewed successfully");
+      Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+      dtSecretManager.renewToken(token, "JobTracker");
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          token.renew(config);
+          token.cancel(config);
+          return null;
+        }
+      });
+    }
   }
 
   @SuppressWarnings("deprecation")

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
Mon Dec 12 19:41:20 2011
@@ -57,8 +57,14 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
@@ -76,6 +82,10 @@ import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
 /** Unit tests for block tokens */
 public class TestBlockToken {
   public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
@@ -106,22 +116,24 @@ public class TestBlockToken {
   ExtendedBlock block2 = new ExtendedBlock("10", 10L);
   ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
 
-  private static class getLengthAnswer implements Answer<Long> {
+  private static class GetLengthAnswer implements
+      Answer<GetReplicaVisibleLengthResponseProto> {
     BlockTokenSecretManager sm;
     BlockTokenIdentifier ident;
 
-    public getLengthAnswer(BlockTokenSecretManager sm,
+    public GetLengthAnswer(BlockTokenSecretManager sm,
         BlockTokenIdentifier ident) {
       this.sm = sm;
       this.ident = ident;
     }
 
     @Override
-    public Long answer(InvocationOnMock invocation) throws IOException {
+    public GetReplicaVisibleLengthResponseProto answer(
+        InvocationOnMock invocation) throws IOException {
       Object args[] = invocation.getArguments();
-      assertEquals(1, args.length);
-      org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable block = 
-          (org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable) args[0];
+      assertEquals(2, args.length);
+      GetReplicaVisibleLengthRequestProto req = 
+          (GetReplicaVisibleLengthRequestProto) args[1];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
       assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
@@ -130,12 +142,12 @@ public class TestBlockToken {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
         assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
-        sm.checkAccess(id, null, org.apache.hadoop.hdfs.protocolR23Compatible.
-            ExtendedBlockWritable.convertExtendedBlock(block),
+        sm.checkAccess(id, null, PBHelper.convert(req.getBlock()),
             BlockTokenSecretManager.AccessMode.WRITE);
         result = id.getBlockId();
       }
-      return result;
+      return GetReplicaVisibleLengthResponseProto.newBuilder()
+          .setLength(result).build();
     }
   }
 
@@ -208,25 +220,29 @@ public class TestBlockToken {
   }
 
   private Server createMockDatanode(BlockTokenSecretManager sm,
-      Token<BlockTokenIdentifier> token) throws IOException {
-    org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol mockDN =
-        mock(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class);
+      Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
+    ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
     when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
-        org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID);
+        RPC.getProtocolVersion(ClientDatanodeProtocolPB.class));
     doReturn(
         ProtocolSignature.getProtocolSignature(mockDN,
-            org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class.getName(),
-            org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID,
0)).when(mockDN)
-        .getProtocolSignature(anyString(), anyLong(), anyInt());
+            ClientDatanodeProtocolPB.class.getName(),
+            RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), 0)).when(
+        mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
 
     BlockTokenIdentifier id = sm.createIdentifier();
     id.readFields(new DataInputStream(new ByteArrayInputStream(token
         .getIdentifier())));
-    doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
-        any(org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable.class));
-
-    return RPC.getServer(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class,

-        mockDN, ADDRESS, 0, 5,
+    
+    doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
+        .getReplicaVisibleLength(any(RpcController.class),
+            any(GetReplicaVisibleLengthRequestProto.class));
+
+    RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    BlockingService service = ClientDatanodeProtocolService
+        .newReflectiveBlockingService(mockDN);
+    return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
         true, conf, sm);
   }
 
@@ -323,7 +339,7 @@ public class TestBlockToken {
   /**
    * @return the current number of file descriptors open by this process.
    */
-  private static int countOpenFileDescriptors() throws IOException {
+  private static int countOpenFileDescriptors() {
     return FD_DIR.list().length;
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
Mon Dec 12 19:41:20 2011
@@ -150,7 +150,6 @@ public class TestInterDatanodeProtocol {
       DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
       InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
           datanodeinfo[0], conf, datanode.getDnConf().socketTimeout);
-      assertTrue(datanode != null);
       
       //stop block scanner, so we could compare lastScanTime
       if (datanode.blockScanner != null) {
@@ -347,8 +346,8 @@ public class TestInterDatanodeProtocol {
   /** Test to verify that InterDatanode RPC timesout as expected when
    *  the server DN does not respond.
    */
-  @Test
-  public void testInterDNProtocolTimeout() throws Exception {
+  @Test(expected=SocketTimeoutException.class)
+  public void testInterDNProtocolTimeout() throws Throwable {
     final Server server = new TestServer(1, true);
     server.start();
 
@@ -361,10 +360,9 @@ public class TestInterDatanodeProtocol {
     try {
       proxy = DataNode.createInterDataNodeProtocolProxy(
           dInfo, conf, 500);
-      proxy.initReplicaRecovery(null);
+      proxy.initReplicaRecovery(new RecoveringBlock(
+          new ExtendedBlock("bpid", 1), null, 100));
       fail ("Expected SocketTimeoutException exception, but did not get.");
-    } catch (SocketTimeoutException e) {
-      DataNode.LOG.info("Got expected Exception: SocketTimeoutException" + e);
     } finally {
       if (proxy != null) {
         RPC.stopProxy(proxy);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/pom.xml?rev=1213389&r1=1213388&r2=1213389&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/pom.xml Mon Dec 12 19:41:20 2011
@@ -29,6 +29,7 @@
 
   <modules>
     <module>hadoop-hdfs</module>
+    <module>hadoop-hdfs-httpfs</module>
   </modules>
 
   <build>



Mime
View raw message