hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sra...@apache.org
Subject svn commit: r1101753 - in /hadoop/hdfs/trunk: ./ src/test/hdfs/org/apache/hadoop/fs/viewfs/
Date Wed, 11 May 2011 05:48:19 GMT
Author: sradia
Date: Wed May 11 05:48:19 2011
New Revision: 1101753

URL: http://svn.apache.org/viewvc?rev=1101753&view=rev
Log:
    HDFS 1911 HDFS tests for the newly added viewfs (sanjay Radia)

Added:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1101753&r1=1101752&r2=1101753&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed May 11 05:48:19 2011
@@ -273,6 +273,9 @@ Trunk (unreleased changes)
     HDFS-1873. Federation: Add cluster management web console.
     (Tanping Wang via suresh)
 
+    HDFS 1911 HDFS tests for the newly added viewfs
+
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1101753&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
(added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
Wed May 11 05:48:19 2011
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.List;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
+
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  private static Configuration CONF = new Configuration();
+  private static FileSystem fHdfs;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    SupportsBlocks = true;
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+    cluster.waitClusterUp();
+    cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    fHdfs = cluster.getFileSystem();
+    defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
+        UserGroupInformation.getCurrentUser().getShortUserName()));
+    fHdfs.mkdirs(defaultWorkingDirectory);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fsTarget = fHdfs;
+    super.setUp();
+    
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  /*
+   * This overides the default implementation since hdfs does have delegation
+   * tokens.
+   */
+  @Override
+  @Test
+  public void testGetDelegationTokens() throws IOException {
+    List<Token<?>> delTokens = 
+        fsView.getDelegationTokens("sanjay");
+    Assert.assertEquals(7, delTokens.size()); 
+  }
+
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1101753&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
(added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
Wed May 11 05:48:19 2011
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+/**
+ * The FileStatus is being serialized in MR as jobs are submitted.
+ * Since viewfs has overlayed ViewFsFileStatus, we ran into
+ * serialization problems. THis test is test the fix.
+ */
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestViewFsFileStatusHdfs {
+  
+  static final String testfilename = "/tmp/testFileStatusSerialziation";
+
+  
+  
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  private static Configuration CONF = new Configuration();
+  private static FileSystem fHdfs;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+    cluster.waitClusterUp();
+    fHdfs = cluster.getFileSystem();
+    defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
+        UserGroupInformation.getCurrentUser().getShortUserName()));
+    fHdfs.mkdirs(defaultWorkingDirectory);
+  }
+
+  @Test
+  public void testFileStatusSerialziation()
+      throws IOException, URISyntaxException {
+
+   long len = FileSystemTestHelper.createFile(fHdfs, testfilename);
+
+    Configuration conf = ViewFileSystemTestSetup.configWithViewfsScheme();
+    ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() + "/tmp"));
+    FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+    assertEquals(ViewFileSystem.class, vfs.getClass());
+    FileStatus stat = vfs.getFileStatus(new Path(testfilename));
+    assertEquals(len, stat.getLen());
+    // check serialization/deserialization
+    DataOutputBuffer dob = new DataOutputBuffer();
+    stat.write(dob);
+    DataInputBuffer dib = new DataInputBuffer();
+    dib.reset(dob.getData(), 0, dob.getLength());
+    FileStatus deSer = new FileStatus();
+    deSer.readFields(dib);
+    assertEquals(len, deSer.getLen());
+  }
+
+  @AfterClass
+  public static void cleanup() throws IOException {
+    fHdfs.delete(new Path(testfilename), true);
+  }
+
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1101753&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Wed May
11 05:48:19 2011
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.List;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+public class TestViewFsHdfs extends ViewFsBaseTest {
+
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  private static HdfsConfiguration CONF = new HdfsConfiguration();
+  private static FileContext fc;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    SupportsBlocks = true;
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+    cluster.waitClusterUp();
+    cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    fc = FileContext.getFileContext(cluster.getURI(0), CONF);
+    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+        UserGroupInformation.getCurrentUser().getShortUserName()));
+    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fcTarget = fc;
+    super.setUp();
+    
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  
+  /*
+   * This overides the default implementation since hdfs does have delegation
+   * tokens.
+   */
+  @Override
+  @Test
+  public void testGetDelegationTokens() throws IOException {
+    List<Token<?>> delTokens = 
+        fcView.getDelegationTokens(new Path("/"), "sanjay");
+    Assert.assertEquals(7, delTokens.size()); 
+  }
+ 
+}



Mime
View raw message