tajo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jh...@apache.org
Subject tajo git commit: TAJO-2037: tajo-storage-hdfs tests occasionally hangs.
Date Wed, 06 Jan 2016 07:24:11 GMT
Repository: tajo
Updated Branches:
  refs/heads/master 62cc6f686 -> f9825e71b


TAJO-2037: tajo-storage-hdfs tests occasionally hangs.

closes #927


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/f9825e71
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/f9825e71
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/f9825e71

Branch: refs/heads/master
Commit: f9825e71bcfca9e187206756d369ee14bcb9be2e
Parents: 62cc6f6
Author: Jinho Kim <jhkim@apache.org>
Authored: Wed Jan 6 16:23:07 2016 +0900
Committer: Jinho Kim <jhkim@apache.org>
Committed: Wed Jan 6 16:23:07 2016 +0900

----------------------------------------------------------------------
 CHANGES                                         |  2 +
 .../org/apache/tajo/TajoTestingCluster.java     |  7 +-
 tajo-core/pom.xml                               |  2 +-
 tajo-project/pom.xml                            | 18 +++-
 .../org/apache/tajo/HttpFileServerHandler.java  | 12 +--
 .../tajo/storage/TestByteBufLineReader.java     | 13 +--
 .../apache/tajo/storage/TestFileTablespace.java | 99 ++++++++++----------
 .../tajo/storage/raw/TestDirectRawFile.java     | 73 +++++++--------
 8 files changed, 116 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index fea896c..c8dd652 100644
--- a/CHANGES
+++ b/CHANGES
@@ -71,6 +71,8 @@ Release 0.12.0 - unreleased
 
   BUG FIXES
 
+    TAJO-2037: tajo-storage-hdfs tests occasionally hangs. (jinho)
+
     TAJO-2025: HBASE_LIB/metrics-core-*.jar should be included in CLASSPATH.
     (Contributed by Dongkyu Hwangbo, committed by jinho)
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-cluster-tests/src/test/java/org/apache/tajo/TajoTestingCluster.java
----------------------------------------------------------------------
diff --git a/tajo-cluster-tests/src/test/java/org/apache/tajo/TajoTestingCluster.java b/tajo-cluster-tests/src/test/java/org/apache/tajo/TajoTestingCluster.java
index 6801ff4..c519a53 100644
--- a/tajo-cluster-tests/src/test/java/org/apache/tajo/TajoTestingCluster.java
+++ b/tajo-cluster-tests/src/test/java/org/apache/tajo/TajoTestingCluster.java
@@ -254,15 +254,16 @@ public class TajoTestingCluster {
                                             final String hosts[])
       throws IOException {
 
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.toString());
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.getAbsolutePath());
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0);
+
     MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
     builder.hosts(hosts);
     builder.numDataNodes(servers);
     builder.format(true);
-    builder.manageNameDfsDirs(true);
-    builder.manageDataDfsDirs(true);
+    builder.storagesPerDatanode(1);
     builder.waitSafeMode(true);
     this.dfsCluster = builder.build();
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-core/pom.xml
----------------------------------------------------------------------
diff --git a/tajo-core/pom.xml b/tajo-core/pom.xml
index ed006a7..bea3486 100644
--- a/tajo-core/pom.xml
+++ b/tajo-core/pom.xml
@@ -280,7 +280,7 @@
     </dependency>
     <dependency>
       <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jsp-2.1</artifactId>
+      <artifactId>jsp-2.1-jetty</artifactId>
     </dependency>
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-project/pom.xml
----------------------------------------------------------------------
diff --git a/tajo-project/pom.xml b/tajo-project/pom.xml
index f8a82d0..d32f991 100644
--- a/tajo-project/pom.xml
+++ b/tajo-project/pom.xml
@@ -40,7 +40,7 @@
     <hive.version>1.1.0</hive.version>
     <netty.version>4.0.33.Final</netty.version>
     <jersey.version>2.6</jersey.version>
-    <jetty.version>6.1.14</jetty.version>
+    <jetty.version>6.1.26</jetty.version>
     <tajo.root>${project.parent.relativePath}/..</tajo.root>
     <extra.source.path>src/main/hadoop-${hadoop.version}</extra.source.path>
   </properties>
@@ -485,6 +485,9 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
           <version>2.19</version>
+          <configuration>
+            <trimStackTrace>false</trimStackTrace>
+          </configuration>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
@@ -1190,7 +1193,7 @@
       </dependency>
       <dependency>
         <groupId>org.mortbay.jetty</groupId>
-        <artifactId>jsp-2.1</artifactId>
+        <artifactId>jsp-2.1-jetty</artifactId>
         <version>${jetty.version}</version>
         <exclusions>
           <exclusion>
@@ -1199,6 +1202,17 @@
           </exclusion>
         </exclusions>
       </dependency>
+      <dependency>
+        <groupId>org.mortbay.jetty</groupId>
+        <artifactId>jsp-2.1-glassfish</artifactId>
+        <version>2.1.v20091210</version>
+        <exclusions>
+          <exclusion>
+            <groupId>org.eclipse.jdt.core.compiler</groupId>
+            <artifactId>ecj</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
     </dependencies>
   </dependencyManagement>
   <profiles>

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/HttpFileServerHandler.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/HttpFileServerHandler.java
b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/HttpFileServerHandler.java
index 78902f3..a58cb83 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/HttpFileServerHandler.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/HttpFileServerHandler.java
@@ -25,6 +25,8 @@ import io.netty.handler.codec.http.*;
 import io.netty.handler.ssl.SslHandler;
 import io.netty.handler.stream.ChunkedFile;
 import io.netty.util.CharsetUtil;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -32,9 +34,6 @@ import java.io.RandomAccessFile;
 import java.io.UnsupportedEncodingException;
 import java.net.URLDecoder;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 public class HttpFileServerHandler extends SimpleChannelInboundHandler<FullHttpRequest>
{
   
   private final Log LOG = LogFactory.getLog(HttpFileServerHandler.class);
@@ -84,11 +83,12 @@ public class HttpFileServerHandler extends SimpleChannelInboundHandler<FullHttpR
     ChannelFuture lastContentFuture;
     if (ctx.pipeline().get(SslHandler.class) != null) {
       // Cannot use zero-copy with HTTPS.
-      lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0,
fileLength, 8192)));
+      lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0,
fileLength, 8192)),
+          ctx.newProgressivePromise());
     } else {
       // No encryption - use zero-copy.
       final FileRegion region = new DefaultFileRegion(raf.getChannel(), 0, fileLength);
-      writeFuture = ctx.write(region);
+      writeFuture = ctx.write(region, ctx.newProgressivePromise());
       lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
       writeFuture.addListener(new ChannelProgressiveFutureListener() {
         @Override
@@ -99,7 +99,7 @@ public class HttpFileServerHandler extends SimpleChannelInboundHandler<FullHttpR
 
         @Override
         public void operationComplete(ChannelProgressiveFuture future) throws Exception {
-          region.release();
+          LOG.trace(future.channel() + " Transfer complete.");
         }
       });
     }

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestByteBufLineReader.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestByteBufLineReader.java
b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestByteBufLineReader.java
index 8472095..2cee3bb 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestByteBufLineReader.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestByteBufLineReader.java
@@ -21,9 +21,7 @@ package org.apache.tajo.storage;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.storage.text.ByteBufLineReader;
@@ -35,7 +33,6 @@ import org.junit.Test;
 import java.io.File;
 import java.io.FileInputStream;
 import java.nio.charset.Charset;
-import java.util.UUID;
 
 import static org.junit.Assert.*;
 
@@ -85,16 +82,12 @@ public class TestByteBufLineReader {
     fs.close();
   }
 
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testReaderWithDFS() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);
+    final Configuration conf = TestFileTablespace.getTestHdfsConfiguration();
 
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(2).waitSafeMode(true).build();
+        .numDataNodes(2).storagesPerDatanode(1).format(true).build();
 
     TajoConf tajoConf = new TajoConf(conf);
     tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileTablespace.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileTablespace.java
b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileTablespace.java
index c99e0dc..0370302 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileTablespace.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileTablespace.java
@@ -39,6 +39,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.util.List;
@@ -48,7 +49,7 @@ import static org.junit.Assert.*;
 
 public class TestFileTablespace {
 	private TajoConf conf;
-	private static String TEST_PATH = "target/test-data/TestFileTablespace";
+	private static String TEST_PATH = "target/test-data/hdfs";
   private Path testDir;
   private FileSystem localFs;
 
@@ -63,6 +64,21 @@ public class TestFileTablespace {
 	public void tearDown() throws Exception {
 	}
 
+  public static HdfsConfiguration getTestHdfsConfiguration() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    String testDataPath = new File(TEST_PATH + "/" + UUID.randomUUID().toString()).getAbsolutePath();
+
+    String namenodeDir = new File(testDataPath, "name").getAbsolutePath();
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+
+    return conf;
+  }
+
   @Test
 	public final void testGetScannerAndAppender() throws IOException {
 		Schema schema = new Schema();
@@ -102,19 +118,11 @@ public class TestFileTablespace {
     localFs.delete(path, true);
 	}
 
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testGetSplit() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);
-
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(1).build();
-    TajoConf tajoConf = new TajoConf(conf);
-    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");
+    final Configuration hdfsConf = getTestHdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .numDataNodes(1).storagesPerDatanode(1).format(true).build();
 
     int testCount = 10;
     Path tablePath = new Path("/testGetSplit");
@@ -131,7 +139,9 @@ public class TestFileTablespace {
 
       assertTrue(fs.exists(tablePath));
       FileTablespace space = new FileTablespace("testGetSplit", fs.getUri(), null);
-      space.init(new TajoConf(conf));
+      space.init(conf);
+
+      TablespaceManager.addTableSpaceForTest(space);
       assertEquals(fs.getUri(), space.getUri());
 
       Schema schema = new Schema();
@@ -153,24 +163,18 @@ public class TestFileTablespace {
       assertEquals(testCount / 2, splits.size());
       assertEquals(1, splits.get(0).getHosts().length);
       assertEquals(-1, ((FileFragment)splits.get(0)).getDiskIds()[0]);
+
       fs.close();
     } finally {
       cluster.shutdown();
     }
   }
 
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testZeroLengthSplit() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(1).build();
-    TajoConf tajoConf = new TajoConf(conf);
-    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");
+    final Configuration hdfsConf = getTestHdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .numDataNodes(1).storagesPerDatanode(1).format(true).build();
 
     int testCount = 10;
     Path tablePath = new Path("/testZeroLengthSplit");
@@ -188,40 +192,37 @@ public class TestFileTablespace {
       }
 
       assertTrue(fs.exists(tablePath));
+
       FileTablespace space = new FileTablespace("testZeroLengthSplit", fs.getUri(), new JSONObject());
-      space.init(new TajoConf(conf));
+      space.init(conf);
+      TablespaceManager.addTableSpaceForTest(space);
       assertEquals(fs.getUri(), space.getUri());
 
       Schema schema = new Schema();
       schema.addColumn("id", Type.INT4);
       schema.addColumn("age",Type.INT4);
       schema.addColumn("name",Type.TEXT);
-      TableMeta meta = CatalogUtil.newTableMeta("TEXT");
+      TableMeta meta = CatalogUtil.newTableMeta(BuiltinStorages.TEXT);
 
       List<Fragment> splits = Lists.newArrayList();
       // Get FileFragments in partition batch
       splits.addAll(space.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
       assertEquals(0, splits.size());
+
       fs.close();
     } finally {
       cluster.shutdown();
     }
   }
 
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
-    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);
-
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(2).build();
+    final Configuration hdfsConf = getTestHdfsConfiguration();
 
-    TajoConf tajoConf = new TajoConf(conf);
-    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");
+    hdfsConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
+    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .numDataNodes(2).storagesPerDatanode(1).format(true).build();
 
     int testCount = 10;
     Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
@@ -236,15 +237,15 @@ public class TestFileTablespace {
       assertTrue(fs.exists(tablePath));
 
       FileTablespace sm = new FileTablespace("testGetSplitWithBlockStorageLocationsBatching",
fs.getUri(), null);
-      sm.init(new TajoConf(conf));
-
+      sm.init(new TajoConf(hdfsConf));
+      TablespaceManager.addTableSpaceForTest(sm);
       assertEquals(fs.getUri(), sm.getUri());
 
       Schema schema = new Schema();
       schema.addColumn("id", Type.INT4);
       schema.addColumn("age", Type.INT4);
       schema.addColumn("name", Type.TEXT);
-      TableMeta meta = CatalogUtil.newTableMeta("TEXT");
+      TableMeta meta = CatalogUtil.newTableMeta(BuiltinStorages.TEXT);
 
       List<Fragment> splits = Lists.newArrayList();
       splits.addAll(sm.getSplits("data", meta, schema, tablePath));
@@ -253,23 +254,19 @@ public class TestFileTablespace {
       assertEquals(2, splits.get(0).getHosts().length);
       assertEquals(2, ((FileFragment)splits.get(0)).getDiskIds().length);
       assertNotEquals(-1, ((FileFragment)splits.get(0)).getDiskIds()[0]);
+
       fs.close();
     } finally {
       cluster.shutdown();
     }
   }
 
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testGetFileTablespace() throws Exception {
-    final Configuration hdfsConf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    hdfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-    hdfsConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);
+    final Configuration hdfsConf = getTestHdfsConfiguration();
 
-    final MiniDFSCluster cluster =
-        new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).build();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf)
+            .numDataNodes(1).storagesPerDatanode(1).format(true).build();
     URI uri = URI.create(cluster.getFileSystem().getUri() + "/tajo");
 
     try {

http://git-wip-us.apache.org/repos/asf/tajo/blob/f9825e71/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/raw/TestDirectRawFile.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/raw/TestDirectRawFile.java
b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/raw/TestDirectRawFile.java
index 78e3390..d027fb8 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/raw/TestDirectRawFile.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/raw/TestDirectRawFile.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.tajo.BuiltinStorages;
 import org.apache.tajo.catalog.CatalogUtil;
@@ -37,6 +35,7 @@ import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.DatumFactory;
 import org.apache.tajo.datum.ProtobufDatum;
 import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos;
+import org.apache.tajo.storage.TestFileTablespace;
 import org.apache.tajo.storage.Tuple;
 import org.apache.tajo.storage.fragment.FileFragment;
 import org.apache.tajo.storage.rawfile.DirectRawFileScanner;
@@ -46,9 +45,11 @@ import org.apache.tajo.tuple.memory.RowWriter;
 import org.apache.tajo.unit.StorageUnit;
 import org.apache.tajo.util.FileUtil;
 import org.apache.tajo.util.ProtoUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -56,6 +57,7 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 import static org.junit.Assert.*;
 
@@ -66,13 +68,16 @@ public class TestDirectRawFile {
   public static Schema schema;
 
   private static String TEST_PATH = "target/test-data/TestDirectRawFile";
-  private static MiniDFSCluster cluster;
-  private static FileSystem dfs;
-  private static FileSystem localFs;
+  private MiniDFSCluster cluster;
+  private FileSystem fs;
+  private boolean isLocal;
 
   private TajoConf tajoConf;
   private Path testDir;
 
+  @Rule
+  public Timeout timeout = new Timeout(120, TimeUnit.SECONDS);
+
   @Parameterized.Parameters
   public static Collection<Object[]> generateParameters() throws IOException {
     return Arrays.asList(new Object[][]{
@@ -83,41 +88,35 @@ public class TestDirectRawFile {
 
 
   public TestDirectRawFile(boolean isLocal) throws IOException {
-    FileSystem fs;
+    this.isLocal = isLocal;
+  }
+
+  @Before
+  public void setup() throws IOException {
     if (isLocal) {
-      fs = localFs;
+      fs = FileSystem.getLocal(new TajoConf());
     } else {
-      fs = dfs;
+      final Configuration conf = TestFileTablespace.getTestHdfsConfiguration();
+
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(1)
+          .format(true)
+          .storagesPerDatanode(1).build();
+
+      fs = cluster.getFileSystem();
     }
 
     this.tajoConf = new TajoConf(fs.getConf());
     this.testDir = getTestDir(fs, TEST_PATH);
   }
 
-  @BeforeClass
-  public static void setUpClass() throws IOException, InterruptedException {
-    final Configuration conf = new HdfsConfiguration();
-    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);
-
-    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
-    builder.numDataNodes(1);
-    builder.format(true);
-    builder.manageNameDfsDirs(true);
-    builder.manageDataDfsDirs(true);
-    builder.waitSafeMode(true);
-    cluster = builder.build();
-
-    dfs = cluster.getFileSystem();
-    localFs = FileSystem.getLocal(new TajoConf());
-  }
-
-  @AfterClass
-  public static void tearDownClass() throws InterruptedException {
-    cluster.shutdown();
+  @After
+  public void tearDown() throws IOException {
+    if (isLocal) {
+      fs.delete(testDir, true);
+    } else {
+      cluster.shutdown();
+    }
   }
 
   public Path getTestDir(FileSystem fs, String dir) throws IOException {
@@ -168,7 +167,7 @@ public class TestDirectRawFile {
     return writeRowBlock(conf, meta, rowBlock, outputFile);
   }
 
-  @Test(timeout = 60000)
+  @Test
   public void testRWForAllTypesWithNextTuple() throws IOException {
     int rowNum = 10000;
 
@@ -198,7 +197,7 @@ public class TestDirectRawFile {
     assertEquals(rowNum, j);
   }
 
-  @Test(timeout = 60000)
+  @Test
   public void testRepeatedScan() throws IOException {
     int rowNum = 2;
 
@@ -226,7 +225,7 @@ public class TestDirectRawFile {
     reader.close();
   }
 
-  @Test(timeout = 60000)
+  @Test
   public void testReset() throws IOException {
     int rowNum = 2;
 


Mime
View raw message