hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1177130 [2/2] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ hadoop-hdfs/s...
Date Thu, 29 Sep 2011 00:43:03 GMT
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:42:47 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1177128
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:42:47 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1177128
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:42:47 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1177128
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
Thu Sep 29 00:42:47 2011
@@ -72,6 +72,7 @@ public class TestDFSPermission extends T
   final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
 
   private FileSystem fs;
+  private MiniDFSCluster cluster;
   private static Random r;
 
   static {
@@ -105,18 +106,25 @@ public class TestDFSPermission extends T
     }
   }
 
+  @Override
+  public void setUp() throws IOException {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster.waitActive();
+  }
+  
+  @Override
+  public void tearDown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+  
   /** This tests if permission setting in create, mkdir, and 
    * setPermission works correctly
    */
   public void testPermissionSetting() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testPermissionSetting(OpType.CREATE); // test file creation
-      testPermissionSetting(OpType.MKDIRS); // test directory creation
-    } finally {
-      cluster.shutdown();
-    }
+    testPermissionSetting(OpType.CREATE); // test file creation
+    testPermissionSetting(OpType.MKDIRS); // test directory creation
   }
 
   private void initFileSystem(short umask) throws Exception {
@@ -245,17 +253,22 @@ public class TestDFSPermission extends T
     }
   }
 
+  /**
+   * check that ImmutableFsPermission can be used as the argument
+   * to setPermission
+   */
+  public void testImmutableFsPermission() throws IOException {
+    fs = FileSystem.get(conf);
+
+    // set the permission of the root to be world-wide rwx
+    fs.setPermission(new Path("/"),
+        FsPermission.createImmutable((short)0777));
+  }
+  
   /* check if the ownership of a file/directory is set correctly */
   public void testOwnership() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testOwnership(OpType.CREATE); // test file creation
-      testOwnership(OpType.MKDIRS); // test directory creation
-    } finally {
-      fs.close();
-      cluster.shutdown();
-    }
+    testOwnership(OpType.CREATE); // test file creation
+    testOwnership(OpType.MKDIRS); // test directory creation
   }
 
   /* change a file/directory's owner and group.
@@ -342,9 +355,7 @@ public class TestDFSPermission extends T
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
   public void testPermissionChecking() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
-      cluster.waitActive();
       fs = FileSystem.get(conf);
 
       // set the permission of the root to be world-wide rwx
@@ -401,7 +412,6 @@ public class TestDFSPermission extends T
           parentPermissions, permissions, parentPaths, filePaths, dirPaths);
     } finally {
       fs.close();
-      cluster.shutdown();
     }
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
Thu Sep 29 00:42:47 2011
@@ -29,8 +29,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import junit.framework.Assert;
-
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 public class TestDFSUtil {
   /**
@@ -76,79 +74,141 @@ public class TestDFSUtil {
       }
     }
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount, 
-               corruptCount == 1);
-    
+    assertTrue("expected 1 corrupt files but got " + corruptCount,
+        corruptCount == 1);
+
     // test an empty location
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
   }
 
-  /** 
-   * Test for
-   * {@link DFSUtil#getNameServiceIds(Configuration)}
-   * {@link DFSUtil#getNameServiceId(Configuration)}
-   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+
+  private Configuration setupAddress(String key) {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
+    return conf;
+  }
+
+  /**
+   * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
+   * nameserviceId from the configuration returned
    */
   @Test
-  public void testMultipleNamenodes() throws IOException {
+  public void getNameServiceId() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+  
+  /**
+   * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
+   * nameserviceId for namenode is determined based on matching the address with
+   * local node's address
+   */
+  @Test
+  public void getNameNodeNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getBackupNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getSecondaryNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
+   * exception is thrown when multiple rpc addresses match the local node's
+   * address
+   */
+  @Test(expected = HadoopIllegalArgumentException.class)
+  public void testGetNameServiceIdException() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        "localhost:9000");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        "localhost:9001");
+    DFSUtil.getNamenodeNameServiceId(conf);
+    fail("Expected exception is not thrown");
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+   */
+  @Test
+  public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-    
-    // Test - The configured nameserviceIds are returned
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
-    
-    // Tests default nameserviceId is returned
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
-    
+  }
+
+  /**
+   * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
+   * (Configuration)}
+   */
+  @Test
+  public void testMultipleNamenodes() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN3_ADDRESS = "localhost:9002";
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
-    
-    Collection<InetSocketAddress> nnAddresses = 
-      DFSUtil.getNNServiceRpcAddresses(conf);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        NN2_ADDRESS);
+
+    Collection<InetSocketAddress> nnAddresses = DFSUtil
+        .getNNServiceRpcAddresses(conf);
     assertEquals(2, nnAddresses.size());
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
-    assertEquals(2, nameserviceIds.size());
     InetSocketAddress addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9000, addr.getPort());
     addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9001, addr.getPort());
-    
+
     // Test - can look up nameservice ID from service address
-    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
-    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn1", nameserviceId);
-    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn2", nameserviceId);
-    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress3,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertNull(nameserviceId);
+    checkNameServiceId(conf, NN1_ADDRESS, "nn1");
+    checkNameServiceId(conf, NN2_ADDRESS, "nn2");
+    checkNameServiceId(conf, NN3_ADDRESS, null);
   }
-  
-  /** 
+
+  public void checkNameServiceId(Configuration conf, String addr,
+      String expectedNameServiceId) {
+    InetSocketAddress s = NetUtils.createSocketAddr(addr);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals(expectedNameServiceId, nameserviceId);
+  }
+
+  /**
    * Test for
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    */
@@ -157,27 +217,25 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
-    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertTrue(isDefault);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertFalse(isDefault);
   }
-  
+
   /** Tests to ensure default namenode is used as fallback */
   @Test
   public void testDefaultNamenode() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String hdfs_default = "hdfs://localhost:9999/";
-    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
-    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
+    conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFS_FEDERATION_NAMESERVICES is not set, verify that
     // default namenode address is returned.
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(1, addrList.size());
@@ -191,26 +249,26 @@ public class TestDFSUtil {
   @Test
   public void testConfModification() throws IOException {
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
-    
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+
     // Set the nameservice specific keys with nameserviceId in the config key
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
     }
-    
+
     // Initialize generic keys from specific keys
-    NameNode.initializeGenericKeys(conf);
-    
+    NameNode.initializeGenericKeys(conf, nameserviceId);
+
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
-  
+
   /**
    * Tests for empty configuration, an exception is thrown from
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -238,16 +296,16 @@ public class TestDFSUtil {
     } catch (IOException expected) {
     }
   }
-  
+
   @Test
-  public void testGetServerInfo(){
+  public void testGetServerInfo() {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    Assert.assertEquals("0.0.0.0:50470", httpsport);
+    assertEquals("0.0.0.0:50470", httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    Assert.assertEquals("0.0.0.0:50070", httpport);
+    assertEquals("0.0.0.0:50070", httpport);
   }
 
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
Thu Sep 29 00:42:47 2011
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
@@ -24,17 +28,15 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /** A class for testing quota-related commands */
 public class TestQuota {
@@ -841,6 +843,14 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     try {
+      
+      //Test for deafult NameSpace Quota
+      long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
+          .getNamesystem());
+      assertTrue(
+          "Default namespace quota expected as long max. But the value is :"
+              + nsQuota, nsQuota == Long.MAX_VALUE);
+      
       Path dir = new Path("/test");
       boolean exceededQuota = false;
       ContentSummary c;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
Thu Sep 29 00:42:47 2011
@@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 
-import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,12 +38,16 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.apache.log4j.Level;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -56,12 +60,13 @@ public class TestDelegationToken {
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
+    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.set("hadoop.security.auth_to_local",
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster.Builder(config).build();
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
     cluster.waitActive();
     dtSecretManager = NameNodeAdapter.getDtSecretManager(
         cluster.getNamesystem());
@@ -154,6 +159,31 @@ public class TestDelegationToken {
   }
   
   @Test
+  public void testDelegationTokenWebHdfsApi() throws Exception {
+    ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+    final String uri = WebHdfsFileSystem.SCHEME  + "://"
+        + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    //get file system as JobTracker
+    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+        "JobTracker", new String[]{"user"});
+    final WebHdfsFileSystem webhdfs = ugi.doAs(
+        new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+      @Override
+      public WebHdfsFileSystem run() throws Exception {
+        return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
+      }
+    });
+
+    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+    LOG.info("A valid token should have non-null password, and should be renewed successfully");
+    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken(token, "JobTracker");
+  }
+
+  @Test
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     final Token<DelegationTokenIdentifier> token = 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
Thu Sep 29 00:42:47 2011
@@ -18,31 +18,34 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
+import org.junit.Before;
+import org.junit.Test;
 
-public class TestHost2NodesMap extends TestCase {
-  static private Host2NodesMap map = new Host2NodesMap();
-  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+public class TestHost2NodesMap {
+  private Host2NodesMap map = new Host2NodesMap();
+  private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
     new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
   };
-  private final static DatanodeDescriptor NULL_NODE = null; 
-  private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
+  private final DatanodeDescriptor NULL_NODE = null; 
+  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+      "/d1/r4");
 
-  static {
+  @Before
+  public void setup() {
     for(DatanodeDescriptor node:dataNodes) {
       map.add(node);
     }
     map.add(NULL_NODE);
   }
   
+  @Test
   public void testContains() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
       assertTrue(map.contains(dataNodes[i]));
@@ -51,6 +54,7 @@ public class TestHost2NodesMap extends T
     assertFalse(map.contains(NODE));
   }
 
+  @Test
   public void testGetDatanodeByHost() throws Exception {
     assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
     assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@@ -59,6 +63,7 @@ public class TestHost2NodesMap extends T
     assertTrue(null==map.getDatanodeByHost("h4"));
   }
 
+  @Test
   public void testGetDatanodeByName() throws Exception {
     assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
     assertTrue(map.getDatanodeByName("h1:5030")==null);
@@ -71,6 +76,7 @@ public class TestHost2NodesMap extends T
     assertTrue(map.getDatanodeByName(null)==null);
   }
 
+  @Test
   public void testRemove() throws Exception {
     assertFalse(map.remove(NODE));
     

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
Thu Sep 29 00:42:47 2011
@@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockSca
 
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       for (int i = 0; i < 2; i++) {
-        String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
+        String nsId = DFSUtil.getNamenodeNameServiceId(cluster
+            .getConfiguration(i));
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(",");
       }
@@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockSca
         LOG.info(ex.getMessage());
       }
 
-      namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
+      namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
           .toString());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
Thu Sep 29 00:42:47 2011
@@ -17,21 +17,24 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
 import org.apache.hadoop.hdfs.protocol.Block;
-import static org.junit.Assert.*;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
  * Unit test for ReplicasMap class
  */
 public class TestReplicasMap {
-  private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
-  private static final String bpid = "BP-TEST";
-  private static final  Block block = new Block(1234, 1234, 1234);
+  private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
+  private final String bpid = "BP-TEST";
+  private final  Block block = new Block(1234, 1234, 1234);
   
-  @BeforeClass
-  public static void setup() {
+  @Before
+  public void setup() {
     map.add(bpid, new FinalizedReplica(block, null, null));
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
Thu Sep 29 00:42:47 2011
@@ -412,4 +412,11 @@ public abstract class FSImageTestUtil {
   public static FSImage getFSImage(NameNode node) {
     return node.getFSImage();
   }
+
+  /**
+   * get NameSpace quota.
+   */
+  public static long getNSQuota(FSNamesystem ns) {
+    return ns.dir.rootDir.getNsQuota();
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
(original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
Thu Sep 29 00:42:47 2011
@@ -18,17 +18,23 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -114,4 +120,42 @@ public class TestWebHdfsFileSystemContra
       // also okay for HDFS.
     }    
   }
+  
+  public void testGetFileBlockLocations() throws IOException {
+    final String f = "/test/testGetFileBlockLocations";
+    createFile(path(f));
+    final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
+    final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
+        new Path(f), 0L, 1L);
+    assertEquals(expected.length, computed.length);
+    for(int i = 0; i < computed.length; i++) {
+      assertEquals(expected[i].toString(), computed[i].toString());
+    }
+  }
+
+  public void testCaseInsensitive() throws IOException {
+    final Path p = new Path("/test/testCaseInsensitive");
+    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
+
+    //replace query with mix case letters
+    final URL url = webhdfs.toUrl(op, p);
+    WebHdfsFileSystem.LOG.info("url      = " + url);
+    final URL replaced = new URL(url.toString().replace(op.toQueryString(),
+        "Op=mkDIrs"));
+    WebHdfsFileSystem.LOG.info("replaced = " + replaced);
+
+    //connect with the replaced URL.
+    final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
+    conn.setRequestMethod(op.getType().toString());
+    conn.connect();
+    final BufferedReader in = new BufferedReader(new InputStreamReader(
+        conn.getInputStream()));
+    for(String line; (line = in.readLine()) != null; ) {
+      WebHdfsFileSystem.LOG.info("> " + line);
+    }
+
+    //check if the command successes.
+    assertTrue(fs.getFileStatus(p).isDirectory());
+  }
 }



Mime
View raw message