tajo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hyun...@apache.org
Subject [02/57] [abbrv] [partial] TAJO-752: Escalate sub modules in tajo-core into the top-level modules. (hyunsik)
Date Fri, 18 Apr 2014 11:44:05 GMT
http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
new file mode 100644
index 0000000..87262e8
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.physical;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.LocalTajoTestingUtility;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.TpchTestBase;
+import org.apache.tajo.algebra.Expr;
+import org.apache.tajo.catalog.*;
+import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
+import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.datum.Datum;
+import org.apache.tajo.datum.DatumFactory;
+import org.apache.tajo.engine.parser.SQLAnalyzer;
+import org.apache.tajo.engine.planner.*;
+import org.apache.tajo.engine.planner.enforce.Enforcer;
+import org.apache.tajo.engine.planner.logical.LogicalNode;
+import org.apache.tajo.storage.*;
+import org.apache.tajo.storage.fragment.FileFragment;
+import org.apache.tajo.util.CommonTestingUtil;
+import org.apache.tajo.worker.TaskAttemptContext;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.junit.Assert.assertTrue;
+
+public class TestSortExec {
+  private static TajoConf conf;
+  private static final String TEST_PATH = "target/test-data/TestPhysicalPlanner";
+  private static TajoTestingCluster util;
+  private static CatalogService catalog;
+  private static SQLAnalyzer analyzer;
+  private static LogicalPlanner planner;
+  private static LogicalOptimizer optimizer;
+  private static AbstractStorageManager sm;
+  private static Path workDir;
+  private static Path tablePath;
+  private static TableMeta employeeMeta;
+
+  private static Random rnd = new Random(System.currentTimeMillis());
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    conf = new TajoConf();
+    util = TpchTestBase.getInstance().getTestingCluster();
+    catalog = util.getMaster().getCatalog();
+    workDir = CommonTestingUtil.getTestDir(TEST_PATH);
+    sm = StorageManagerFactory.getStorageManager(conf, workDir);
+
+    Schema schema = new Schema();
+    schema.addColumn("managerid", Type.INT4);
+    schema.addColumn("empid", Type.INT4);
+    schema.addColumn("deptname", Type.TEXT);
+
+    employeeMeta = CatalogUtil.newTableMeta(StoreType.CSV);
+
+    tablePath = StorageUtil.concatPath(workDir, "employee", "table1");
+    sm.getFileSystem().mkdirs(tablePath.getParent());
+
+    Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(employeeMeta, schema, tablePath);
+    appender.init();
+    Tuple tuple = new VTuple(schema.size());
+    for (int i = 0; i < 100; i++) {
+      tuple.put(new Datum[] {
+          DatumFactory.createInt4(rnd.nextInt(5)),
+          DatumFactory.createInt4(rnd.nextInt(10)),
+          DatumFactory.createText("dept_" + rnd.nextInt(10))});
+      appender.addTuple(tuple);
+    }
+    appender.flush();
+    appender.close();
+
+    TableDesc desc = new TableDesc(
+        CatalogUtil.buildFQName(TajoConstants.DEFAULT_DATABASE_NAME, "employee"), schema, employeeMeta,
+        tablePath);
+    catalog.createTable(desc);
+
+    analyzer = new SQLAnalyzer();
+    planner = new LogicalPlanner(catalog);
+    optimizer = new LogicalOptimizer(conf);
+  }
+
+  public static String[] QUERIES = {
+      "select managerId, empId, deptName from employee order by managerId, empId desc" };
+
+  @Test
+  public final void testNext() throws IOException, PlanningException {
+    FileFragment[] frags = StorageManager.splitNG(conf, "default.employee", employeeMeta, tablePath, Integer.MAX_VALUE);
+    Path workDir = CommonTestingUtil.getTestDir("target/test-data/TestSortExec");
+    TaskAttemptContext ctx = new TaskAttemptContext(conf, LocalTajoTestingUtility
+        .newQueryUnitAttemptId(), new FileFragment[] { frags[0] }, workDir);
+    ctx.setEnforcer(new Enforcer());
+    Expr context = analyzer.parse(QUERIES[0]);
+    LogicalPlan plan = planner.createPlan(LocalTajoTestingUtility.createDummySession(), context);
+    LogicalNode rootNode = optimizer.optimize(plan);
+
+    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf, sm);
+    PhysicalExec exec = phyPlanner.createPlan(ctx, rootNode);
+
+    Tuple tuple;
+    Datum preVal = null;
+    Datum curVal;
+    exec.init();
+    while ((tuple = exec.next()) != null) {
+      curVal = tuple.get(0);
+      if (preVal != null) {
+        assertTrue(preVal.lessThanEqual(curVal).asBool());
+      }
+
+      preVal = curVal;
+    }
+    exec.close();
+  }
+
+  @Test
+  /**
+   * TODO - Now, in FSM branch, TestUniformRangePartition is ported to Java.
+   * So, this test is located in here.
+   * Later it should be moved TestUniformPartitions.
+   */
+  public void testTAJO_946() {
+    Schema schema = new Schema();
+    schema.addColumn("l_orderkey", Type.INT8);
+    SortSpec [] sortSpecs = PlannerUtil.schemaToSortSpecs(schema);
+
+    Tuple s = new VTuple(1);
+    s.put(0, DatumFactory.createInt8(0));
+    Tuple e = new VTuple(1);
+    e.put(0, DatumFactory.createInt8(6000000000l));
+    TupleRange expected = new TupleRange(sortSpecs, s, e);
+    RangePartitionAlgorithm partitioner
+        = new UniformRangePartition(expected, sortSpecs, true);
+    TupleRange [] ranges = partitioner.partition(967);
+
+    TupleRange prev = null;
+    for (TupleRange r : ranges) {
+      if (prev == null) {
+        prev = r;
+      } else {
+        assertTrue(prev.compareTo(r) < 0);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
new file mode 100644
index 0000000..57d8b32
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.List;
+
+@Category(IntegrationTest.class)
+public class TestAlterTable extends QueryTestCaseBase {
+  @Test
+  public final void testAlterTableName() throws Exception {
+    List<String> createdNames = executeDDL("table1_ddl.sql", "table1.tbl", "ABC");
+    assertTableExists(createdNames.get(0));
+    executeDDL("alter_table_rename_table_ddl.sql", null);
+    assertTableExists("DEF");
+  }
+
+  @Test
+  public final void testAlterTableColumnName() throws Exception {
+    List<String> createdNames = executeDDL("table1_ddl.sql", "table1.tbl", "XYZ");
+    executeDDL("alter_table_rename_column_ddl.sql", null);
+    assertColumnExists(createdNames.get(0),"renum");
+  }
+
+  @Test
+  public final void testAlterTableAddNewColumn() throws Exception {
+    List<String> createdNames = executeDDL("table1_ddl.sql", "table1.tbl", "EFG");
+    executeDDL("alter_table_add_new_column_ddl.sql", null);
+    assertColumnExists(createdNames.get(0),"cool");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTablespace.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTablespace.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTablespace.java
new file mode 100644
index 0000000..47e98a9
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTablespace.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.*;
+
+@Category(IntegrationTest.class)
+public class TestAlterTablespace extends QueryTestCaseBase {
+
+  @Test
+  public final void testAlterLocation() throws Exception {
+    if (!testingCluster.isHCatalogStoreRunning()) {
+      //////////////////////////////////////////////////////////////////////////////
+      // Create two table spaces
+      //////////////////////////////////////////////////////////////////////////////
+
+      assertFalse(catalog.existTablespace("space1"));
+      assertTrue(catalog.createTablespace("space1", "hdfs://xxx.com/warehouse"));
+      assertTrue(catalog.existTablespace("space1"));
+
+      // pre verification
+      CatalogProtos.TablespaceProto space1 = catalog.getTablespace("space1");
+      assertEquals("space1", space1.getSpaceName());
+      assertEquals("hdfs://xxx.com/warehouse", space1.getUri());
+
+      executeString("ALTER TABLESPACE space1 LOCATION 'hdfs://yyy.com/warehouse';");
+
+      // Verify ALTER TABLESPACE space1
+      space1 = catalog.getTablespace("space1");
+      assertEquals("space1", space1.getSpaceName());
+      assertEquals("hdfs://yyy.com/warehouse", space1.getUri());
+
+      assertTrue(catalog.dropTablespace("space1"));
+      assertFalse(catalog.existTablespace("space1"));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCTASQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCTASQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCTASQuery.java
new file mode 100644
index 0000000..a4e31e0
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCTASQuery.java
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import com.google.common.collect.Maps;
+import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.catalog.CatalogService;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.Options;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.partition.PartitionMethodDesc;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.storage.StorageConstants;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+import java.util.Map;
+
+import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
+import static org.apache.tajo.catalog.CatalogUtil.buildFQName;
+import static org.junit.Assert.*;
+
+
+/**
+ * Test CREATE TABLE AS SELECT statements
+ */
+@Category(IntegrationTest.class)
+public class TestCTASQuery extends QueryTestCaseBase {
+
+  public TestCTASQuery() {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+  }
+
+  @Test
+  public final void testCtasWithoutTableDefinition() throws Exception {
+    ResultSet res = executeQuery();
+    res.close();
+
+    String tableName = CatalogUtil.normalizeIdentifier("testCtasWithoutTableDefinition");
+    CatalogService catalog = testBase.getTestingCluster().getMaster().getCatalog();
+    String qualifiedTableName = buildFQName(DEFAULT_DATABASE_NAME, tableName);
+    TableDesc desc = catalog.getTableDesc(qualifiedTableName);
+    assertTrue(catalog.existsTable(qualifiedTableName));
+
+    assertTrue(desc.getSchema().contains("default.testctaswithouttabledefinition.col1"));
+    PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
+    assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
+    assertEquals("key", partitionDesc.getExpressionSchema().getColumns().get(0).getSimpleName());
+
+    FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
+    Path path = desc.getPath();
+    assertTrue(fs.isDirectory(path));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
+    if (!testingCluster.isHCatalogStoreRunning()) {
+      assertEquals(5, desc.getStats().getNumRows().intValue());
+    }
+
+    ResultSet res2 = executeFile("check1.sql");
+
+    Map<Double, int []> resultRows1 = Maps.newHashMap();
+    resultRows1.put(45.0d, new int[]{3, 2});
+    resultRows1.put(38.0d, new int[]{2, 2});
+
+    int i = 0;
+    while(res2.next()) {
+      assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
+      assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
+      i++;
+    }
+    res2.close();
+    assertEquals(2, i);
+  }
+
+  @Test
+  public final void testCtasWithColumnedPartition() throws Exception {
+    ResultSet res = executeQuery();
+    res.close();
+
+    String tableName = CatalogUtil.normalizeIdentifier("testCtasWithColumnedPartition");
+
+    TajoTestingCluster cluster = testBase.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+    PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
+    assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
+    assertEquals("key", partitionDesc.getExpressionSchema().getColumns().get(0).getSimpleName());
+
+    FileSystem fs = FileSystem.get(cluster.getConfiguration());
+    Path path = desc.getPath();
+    assertTrue(fs.isDirectory(path));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
+    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(5, desc.getStats().getNumRows().intValue());
+    }
+
+    ResultSet res2 = executeFile("check2.sql");
+
+    Map<Double, int []> resultRows1 = Maps.newHashMap();
+    resultRows1.put(45.0d, new int[]{3, 2});
+    resultRows1.put(38.0d, new int[]{2, 2});
+
+    int i = 0;
+    while(res2.next()) {
+      assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
+      assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
+      i++;
+    }
+    res2.close();
+    assertEquals(2, i);
+  }
+
+  @Test
+  public final void testCtasWithGroupby() throws Exception {
+    ResultSet res = executeFile("CtasWithGroupby.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    assertResultSet(res2);
+    res2.close();
+  }
+
+  @Test
+  public final void testCtasWithOrderby() throws Exception {
+    ResultSet res = executeFile("CtasWithOrderby.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    assertResultSet(res2);
+    res2.close();
+  }
+
+  @Test
+  public final void testCtasWithLimit() throws Exception {
+    ResultSet res = executeFile("CtasWithLimit.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    assertResultSet(res2);
+    res2.close();
+  }
+
+  @Test
+  public final void testCtasWithUnion() throws Exception {
+    ResultSet res = executeFile("CtasWithUnion.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    resultSetToString(res2);
+    res2.close();
+  }
+
+  @Test
+  public final void testCtasWithStoreType() throws Exception {
+    ResultSet res = executeFile("CtasWithStoreType.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    resultSetToString(res2);
+    res2.close();
+
+    TableDesc desc =  client.getTableDesc(CatalogUtil.normalizeIdentifier(res2.getMetaData().getTableName(1)));
+    assertNotNull(desc);
+    assertEquals(CatalogProtos.StoreType.RCFILE, desc.getMeta().getStoreType());
+  }
+
+  @Test
+  public final void testCtasWithOptions() throws Exception {
+    ResultSet res = executeFile("CtasWithOptions.sql");
+    res.close();
+
+    ResultSet res2 = executeQuery();
+    resultSetToString(res2);
+    res2.close();
+
+    TableDesc desc =  client.getTableDesc(CatalogUtil.normalizeIdentifier(res2.getMetaData().getTableName(1)));
+    assertNotNull(desc);
+    assertEquals(CatalogProtos.StoreType.CSV, desc.getMeta().getStoreType());
+
+
+    Options options = desc.getMeta().getOptions();
+    assertNotNull(options);
+    assertEquals(StringEscapeUtils.escapeJava("\u0001"), options.get(StorageConstants.CSVFILE_DELIMITER));
+  }
+
+  @Test
+  public final void testCtasWithManagedTable() throws Exception {
+    ResultSet res = executeFile("CtasWithManagedTable.sql");
+    res.close();
+
+    if (testingCluster.isHCatalogStoreRunning()) {
+      assertTrue(client.existTable("managed_table1"));
+
+      TableDesc desc =  client.getTableDesc("managed_table1");
+
+      assertNotNull(desc);
+      assertEquals("managed_table1", desc.getPath().getName());
+    } else {
+      assertFalse(client.existTable("managed_Table1"));
+      assertTrue(client.existTable("MANAGED_TABLE1"));
+
+      TableDesc desc =  client.getTableDesc("MANAGED_TABLE1");
+
+      assertNotNull(desc);
+      assertEquals("MANAGED_TABLE1", desc.getPath().getName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCaseByCases.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCaseByCases.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCaseByCases.java
new file mode 100644
index 0000000..9836a57
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCaseByCases.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.junit.Test;
+
+import java.sql.ResultSet;
+
+public class TestCaseByCases extends QueryTestCaseBase {
+
+  public TestCaseByCases() {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+  }
+
+  @Test
+  public final void testTAJO415Case() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testTAJO418Case() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  /**
+   * It's an unit test to reproduce TAJO-619 (https://issues.apache.org/jira/browse/TAJO-619).
+   */
+  @Test
+  public final void testTAJO619Case() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testTAJO718Case() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testTAJO739Case() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateDatabase.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateDatabase.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateDatabase.java
new file mode 100644
index 0000000..453c174
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateDatabase.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+
+@Category(IntegrationTest.class)
+public class TestCreateDatabase extends QueryTestCaseBase {
+
+  @Test
+  public final void testCreateAndDropDatabase() throws Exception {
+    String databaseName = CatalogUtil.normalizeIdentifier("testCreateAndDropDatabase");
+
+    ResultSet res = null;
+    try {
+      res = executeString("CREATE DATABASE testCreateAndDropDatabase;");
+      assertDatabaseExists(databaseName);
+      executeString("DROP DATABASE testCreateAndDropDatabase;");
+      assertDatabaseNotExists(databaseName);
+    } finally {
+      cleanupQuery(res);
+    }
+  }
+
+  @Test
+  public final void testCreateIfNotExists() throws Exception {
+    String databaseName = CatalogUtil.normalizeIdentifier("testCreateIfNotExists");
+
+    assertDatabaseNotExists(databaseName);
+    executeString("CREATE DATABASE " + databaseName + ";").close();
+    assertDatabaseExists(databaseName);
+
+    executeString("CREATE DATABASE IF NOT EXISTS " + databaseName + ";").close();
+    assertDatabaseExists(databaseName);
+
+    executeString("DROP DATABASE " + databaseName + ";").close();
+    assertDatabaseNotExists(databaseName);
+  }
+
+  @Test
+  public final void testDropIfExists() throws Exception {
+    String databaseName = CatalogUtil.normalizeIdentifier("testDropIfExists");
+    assertDatabaseNotExists(databaseName);
+    executeString("CREATE DATABASE " + databaseName + ";").close();
+    assertDatabaseExists(databaseName);
+
+    executeString("DROP DATABASE " + databaseName + ";").close();
+    assertDatabaseNotExists(databaseName);
+
+    executeString("DROP DATABASE IF EXISTS " + databaseName + ";");
+    assertDatabaseNotExists(databaseName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateTable.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateTable.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateTable.java
new file mode 100644
index 0000000..2d289ba
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestCreateTable.java
@@ -0,0 +1,360 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.storage.StorageUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+@Category(IntegrationTest.class)
+public class TestCreateTable extends QueryTestCaseBase {
+
+  @Test
+  public final void testVariousTypes() throws Exception {
+    List<String> createdNames;
+    if (testingCluster.isHCatalogStoreRunning()) {
+      createdNames = executeDDL("create_table_various_types_for_hcatalog.sql", null);
+    } else {
+      createdNames = executeDDL("create_table_various_types.sql", null);
+    }
+    assertTableExists(createdNames.get(0));
+  }
+
+  @Test
+  public final void testCreateTable1() throws Exception {
+    List<String> createdNames = executeDDL("table1_ddl.sql", "table1", "table1");
+    assertTableExists(createdNames.get(0));
+  }
+
+  @Test
+  public final void testCreateTable2() throws Exception {
+    executeString("CREATE DATABASE D1;").close();
+    executeString("CREATE DATABASE D2;").close();
+
+    executeString("CREATE TABLE D1.table1 (age int);").close();
+    executeString("CREATE TABLE D1.table2 (age int);").close();
+    executeString("CREATE TABLE d2.table3 (age int);").close();
+    executeString("CREATE TABLE d2.table4 (age int);").close();
+
+    assertTableExists("d1.table1");
+    assertTableExists("d1.table2");
+    assertTableNotExists("d2.table1");
+    assertTableNotExists("d2.table2");
+
+    assertTableExists("d2.table3");
+    assertTableExists("d2.table4");
+    assertTableNotExists("d1.table3");
+    assertTableNotExists("d1.table4");
+
+    executeString("DROP TABLE D1.table1");
+    executeString("DROP TABLE D1.table2");
+    executeString("DROP TABLE D2.table3");
+    executeString("DROP TABLE D2.table4");
+
+    assertDatabaseExists("d1");
+    assertDatabaseExists("d2");
+    executeString("DROP DATABASE D1").close();
+    executeString("DROP DATABASE D2").close();
+    assertDatabaseNotExists("d1");
+    assertDatabaseNotExists("d2");
+  }
+
+  private final void assertPathOfCreatedTable(final String databaseName,
+                                              final String originalTableName,
+                                              final String newTableName,
+                                              String createTableStmt) throws Exception {
+    // create one table
+    executeString("CREATE DATABASE " + CatalogUtil.denormalizeIdentifier(databaseName)).close();
+    getClient().existDatabase(CatalogUtil.denormalizeIdentifier(databaseName));
+    final String oldFQTableName = CatalogUtil.buildFQName(databaseName, originalTableName);
+
+    ResultSet res = executeString(createTableStmt);
+    res.close();
+    assertTableExists(oldFQTableName);
+    TableDesc oldTableDesc = client.getTableDesc(oldFQTableName);
+
+
+    // checking the existence of the table directory and validating the path
+    FileSystem fs = testingCluster.getMaster().getStorageManager().getFileSystem();
+    Path warehouseDir = TajoConf.getWarehouseDir(testingCluster.getConfiguration());
+    assertTrue(fs.exists(oldTableDesc.getPath()));
+    assertEquals(StorageUtil.concatPath(warehouseDir, databaseName, originalTableName), oldTableDesc.getPath());
+
+    // Rename
+    client.executeQuery("ALTER TABLE " + CatalogUtil.denormalizeIdentifier(oldFQTableName)
+        + " RENAME to " + CatalogUtil.denormalizeIdentifier(newTableName));
+
+    // checking the existence of the new table directory and validating the path
+    final String newFQTableName = CatalogUtil.buildFQName(databaseName, newTableName);
+    TableDesc newTableDesc = client.getTableDesc(newFQTableName);
+    assertTrue(fs.exists(newTableDesc.getPath()));
+    assertEquals(StorageUtil.concatPath(warehouseDir, databaseName, newTableName), newTableDesc.getPath());
+  }
+
+  @Test
+  public final void testCreatedTableViaCTASAndVerifyPath() throws Exception {
+    assertPathOfCreatedTable("d4", "old_table", "new_mgmt_table",
+        "CREATE TABLE d4.old_table AS SELECT * FROM default.lineitem;");
+  }
+
+  @Test
+  public final void testCreatedTableJustCreatedAndVerifyPath() throws Exception {
+    assertPathOfCreatedTable("d5", "old_table", "new_mgmt_table", "CREATE TABLE d5.old_table (age integer);");
+  }
+
+  @Test
+  public final void testCreatedTableWithQuotedIdentifierAndVerifyPath() throws Exception {
+    if (!testingCluster.isHCatalogStoreRunning()) {
+      assertPathOfCreatedTable("D6", "OldTable", "NewMgmtTable", "CREATE TABLE \"D6\".\"OldTable\" (age integer);");
+    }
+  }
+
+  @Test
+  public final void testCreateTableIfNotExists() throws Exception {
+    executeString("CREATE DATABASE D3;").close();
+
+    assertTableNotExists("d3.table1");
+    executeString("CREATE TABLE D3.table1 (age int);").close();
+    assertTableExists("d3.table1");
+
+    executeString("CREATE TABLE IF NOT EXISTS D3.table1 (age int);").close();
+    assertTableExists("d3.table1");
+
+    executeString("DROP TABLE D3.table1");
+  }
+
+  @Test
+  public final void testDropTableIfExists() throws Exception {
+    executeString("CREATE DATABASE D4;").close();
+
+    assertTableNotExists("d4.table1");
+    executeString("CREATE TABLE d4.table1 (age int);").close();
+    assertTableExists("d4.table1");
+
+    executeString("DROP TABLE d4.table1;").close();
+    assertTableNotExists("d4.table1");
+
+    executeString("DROP TABLE IF EXISTS d4.table1");
+    assertTableNotExists("d4.table1");
+  }
+
+  @Test
+  public final void testDelimitedIdentifierWithNonAsciiCharacters() throws Exception {
+
+    if (!testingCluster.isHCatalogStoreRunning()) {
+      ResultSet res = null;
+      try {
+        List<String> tableNames = executeDDL("quoted_identifier_non_ascii_ddl.sql", "table1", "\"테이블1\"");
+        assertTableExists(tableNames.get(0));
+
+        // SELECT "아이디", "텍스트", "숫자" FROM "테이블1";
+        res = executeFile("quoted_identifier_non_ascii_1.sql");
+        assertResultSet(res, "quoted_identifier_non_ascii_1.result");
+      } finally {
+        cleanupQuery(res);
+      }
+
+      // SELECT "아이디" as "진짜아이디", "텍스트" as text, "숫자" FROM "테이블1" as "테이블 별명"
+      try {
+        res = executeFile("quoted_identifier_non_ascii_2.sql");
+        assertResultSet(res, "quoted_identifier_non_ascii_2.result");
+      } finally {
+        cleanupQuery(res);
+      }
+
+      // SELECT "아이디" "진짜아이디", char_length("텍스트") as "길이", "숫자" * 2 FROM "테이블1" "테이블 별명"
+      try {
+        res = executeFile("quoted_identifier_non_ascii_3.sql");
+        assertResultSet(res, "quoted_identifier_non_ascii_3.result");
+      } finally {
+        cleanupQuery(res);
+      }
+    }
+  }
+
+  @Test
+  public final void testDelimitedIdentifierWithMixedCharacters() throws Exception {
+    if (!testingCluster.isHCatalogStoreRunning()) {
+      ResultSet res = null;
+
+      try {
+        List<String> tableNames = executeDDL("quoted_identifier_mixed_chars_ddl_1.sql", "table1", "\"TABLE1\"");
+        assertTableExists(tableNames.get(0));
+
+        tableNames = executeDDL("quoted_identifier_mixed_chars_ddl_1.sql", "table2", "\"tablE1\"");
+        assertTableExists(tableNames.get(0));
+
+        // SELECT "aGe", "tExt", "Number" FROM "TABLE1";
+        res = executeFile("quoted_identifier_mixed_chars_1.sql");
+        assertResultSet(res, "quoted_identifier_mixed_chars_1.result");
+      } finally {
+        cleanupQuery(res);
+      }
+
+      try {
+        res = executeFile("quoted_identifier_mixed_chars_2.sql");
+        assertResultSet(res, "quoted_identifier_mixed_chars_2.result");
+      } finally {
+        cleanupQuery(res);
+      }
+
+      try {
+        res = executeFile("quoted_identifier_mixed_chars_3.sql");
+        assertResultSet(res, "quoted_identifier_mixed_chars_3.result");
+      } finally {
+        cleanupQuery(res);
+      }
+    }
+  }
+
+  @Test
+  public final void testNonreservedKeywordTableNames() throws Exception {
+    List<String> createdNames = null;
+    createdNames = executeDDL("table1_ddl.sql", "table1", "filter");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "first");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "format");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "grouping");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "hash");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "index");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "insert");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "last");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "location");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "max");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "min");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "national");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "nullif");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "overwrite");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "precision");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "range");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "regexp");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "rlike");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "set");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "unknown");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "var_pop");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "var_samp");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "varying");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "zone");
+    assertTableExists(createdNames.get(0));
+
+    createdNames = executeDDL("table1_ddl.sql", "table1", "bigint");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "bit");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "blob");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "bool");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "boolean");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "bytea");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "char");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "date");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "decimal");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "double");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "float");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "float4");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "float8");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "inet4");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "int");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "int1");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "int2");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "int4");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "int8");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "integer");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "nchar");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "numeric");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "nvarchar");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "real");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "smallint");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "text");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "time");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "timestamp");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "timestamptz");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "timetz");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "tinyint");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "varbinary");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "varbit");
+    assertTableExists(createdNames.get(0));
+    createdNames = executeDDL("table1_ddl.sql", "table1", "varchar");
+    assertTableExists(createdNames.get(0));
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestDropTable.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestDropTable.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestDropTable.java
new file mode 100644
index 0000000..0020156
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestDropTable.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.List;
+
+@Category(IntegrationTest.class)
+public class TestDropTable extends QueryTestCaseBase {
+
+  @Test
+  public final void testDropManagedTable() throws Exception {
+    List<String> createdNames = executeDDL("table1_ddl.sql", "table1.tbl", "abc");
+    assertTableExists(createdNames.get(0));
+    executeDDL("drop_table_ddl.sql", null);
+    assertTableNotExists("abc");
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
new file mode 100644
index 0000000..9e3c375
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+
+@Category(IntegrationTest.class)
+public class TestGroupByQuery extends QueryTestCaseBase {
+
+  public TestGroupByQuery() {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+  }
+
+  @Test
+  public final void testGroupBy() throws Exception {
+    // select count(1) as unique_key from lineitem;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupBy2() throws Exception {
+    // select count(1) as unique_key from lineitem group by l_linenumber;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupBy3() throws Exception {
+    // select l_orderkey as gkey from lineitem group by gkey order by gkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupBy4() throws Exception {
+    // select l_orderkey as gkey, count(1) as unique_key from lineitem group by lineitem.l_orderkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupBy5() throws Exception {
+    // select l_orderkey as gkey, '00' as num from lineitem group by lineitem.l_orderkey order by gkey
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByNested1() throws Exception {
+    // select l_orderkey + l_partkey as unique_key from lineitem group by l_orderkey + l_partkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByNested2() throws Exception {
+    // select sum(l_orderkey) + sum(l_partkey) as total from lineitem group by l_orderkey + l_partkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByWithSameExprs1() throws Exception {
+    // select sum(l_orderkey) + sum(l_orderkey) as total from lineitem group by l_orderkey + l_partkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByWithSameExprs2() throws Exception {
+    // select sum(l_orderkey) as total1, sum(l_orderkey) as total2 from lineitem group by l_orderkey + l_partkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByWithExpressionKeys1() throws Exception {
+    // select upper(lower(l_orderkey::text)) as key, count(1) as total from lineitem
+    // group by key order by upper(lower(l_orderkey::text)), total;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByWithExpressionKeys2() throws Exception {
+    // select upper(lower(l_orderkey::text)) as key, count(1) as total from lineitem
+    // group by upper(lower(l_orderkey::text)) order by upper(l_orderkey::text), total;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testGroupByWithConstantKeys1() throws Exception {
+    ResultSet res = executeQuery();
+    System.out.println(resultSetToString(res));
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregation1() throws Exception {
+    // select l_orderkey, max(l_orderkey) as maximum, count(distinct l_linenumber) as unique_key from lineitem
+    // group by l_orderkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  /**
+   * This is an unit test for a combination of aggregation and distinct aggregation functions.
+   */
+  public final void testDistinctAggregation2() throws Exception {
+    // select l_orderkey, count(*) as cnt, count(distinct l_linenumber) as unique_key from lineitem group by l_orderkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregation3() throws Exception {
+    // select count(*), count(distinct l_orderkey), sum(distinct l_orderkey) from lineitem;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregation4() throws Exception {
+    // select l_linenumber, count(*), count(distinct l_orderkey), sum(distinct l_orderkey)
+    // from lineitem group by l_linenumber;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregation5() throws Exception {
+    // select sum(distinct l_orderkey), l_linenumber, count(distinct l_orderkey), count(*) as total
+    // from lineitem group by l_linenumber;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregation6() throws Exception {
+    // select count(distinct l_orderkey), sum(l_orderkey), sum(l_linenumber), count(*) as v4 from lineitem
+    // group by l_orderkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregationWithHaving1() throws Exception {
+    // select l_linenumber, count(*), count(distinct l_orderkey), sum(distinct l_orderkey) from lineitem
+    // group by l_linenumber having sum(distinct l_orderkey) >= 6;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testDistinctAggregationWithUnion1() throws Exception {
+    // select sum(distinct l_orderkey), l_linenumber, count(distinct l_orderkey), count(*) as total
+    // from (select * from lineitem union select * from lineitem) group by l_linenumber;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testComplexParameter() throws Exception {
+    // select sum(l_extendedprice*l_discount) as revenue from lineitem;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testComplexParameterWithSubQuery() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testComplexParameter2() throws Exception {
+    // select count(*) + max(l_orderkey) as merged from lineitem;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testHavingWithNamedTarget() throws Exception {
+    // select l_orderkey, avg(l_partkey) total, sum(l_linenumber) as num from lineitem group by l_orderkey
+    // having total >= 2 or num = 3;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testHavingWithAggFunction() throws Exception {
+    // select l_orderkey, avg(l_partkey) total, sum(l_linenumber) as num from lineitem group by l_orderkey
+    // having avg(l_partkey) = 2.5 or num = 1;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
new file mode 100644
index 0000000..e058943
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
@@ -0,0 +1,262 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.apache.hadoop.io.compress.DeflateCodec;
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.TpchTestBase;
+import org.apache.tajo.catalog.CatalogService;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.TableDesc;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.sql.ResultSet;
+
+import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
+import static org.junit.Assert.*;
+
+@Category(IntegrationTest.class)
+public class TestInsertQuery {
+  private static TpchTestBase tpch;
+  public TestInsertQuery() throws IOException {
+    super();
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    tpch = TpchTestBase.getInstance();
+  }
+
+  @Test
+  public final void testInsertOverwrite() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("InsertOverwrite");
+    ResultSet res = tpch.execute("create table " + tableName +" (col1 int4, col2 int4, col3 float8)");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = tpch.execute("insert overwrite into " + tableName
+        + " select l_orderkey, l_partkey, l_quantity from lineitem");
+    res.close();
+
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(5, desc.getStats().getNumRows().intValue());
+    }
+  }
+
+  @Test
+  public final void testInsertOverwriteSmallerColumns() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("insertoverwritesmallercolumns");
+    ResultSet res = tpch.execute("create table " + tableName + " (col1 int4, col2 int4, col3 float8)");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+    TableDesc originalDesc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+
+    res = tpch.execute("insert overwrite into " + tableName + " select l_orderkey from lineitem");
+    res.close();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(5, desc.getStats().getNumRows().intValue());
+    }
+    assertEquals(originalDesc.getSchema(), desc.getSchema());
+  }
+
+  @Test
+  public final void testInsertOverwriteWithTargetColumns() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("InsertOverwriteWithTargetColumns");
+    ResultSet res = tpch.execute("create table " + tableName + " (col1 int4, col2 int4, col3 float8)");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+    TableDesc originalDesc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+
+    res = tpch.execute(
+        "insert overwrite into " + tableName + " (col1, col3) select l_orderkey, l_quantity from lineitem");
+    res.close();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(5, desc.getStats().getNumRows().intValue());
+    }
+
+    res = tpch.execute("select * from " + tableName);
+
+    assertTrue(res.next());
+    assertEquals(1, res.getLong(1));
+    assertTrue(0f == res.getFloat(2));
+    assertTrue(res.wasNull());
+    assertTrue(17.0 == res.getFloat(3));
+
+    assertTrue(res.next());
+    assertEquals(1, res.getLong(1));
+    assertTrue(0f == res.getFloat(2));
+    assertTrue(res.wasNull());
+    assertTrue(36.0 == res.getFloat(3));
+
+    assertTrue(res.next());
+    assertEquals(2, res.getLong(1));
+    assertTrue(0f == res.getFloat(2));
+    assertTrue(res.wasNull());
+    assertTrue(38.0 == res.getFloat(3));
+
+    assertTrue(res.next());
+    assertTrue(0f == res.getFloat(2));
+    assertTrue(res.wasNull());
+    assertTrue(45.0 == res.getFloat(3));
+
+    assertTrue(res.next());
+    assertEquals(3, res.getLong(1));
+    assertTrue(0f == res.getFloat(2));
+    assertTrue(res.wasNull());
+    assertTrue(49.0 == res.getFloat(3));
+
+    assertFalse(res.next());
+    res.close();
+
+    assertEquals(originalDesc.getSchema(), desc.getSchema());
+  }
+
+  @Test
+  public final void testInsertOverwriteWithAsterisk() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testinsertoverwritewithasterisk");
+    ResultSet res = tpch.execute("create table " + tableName + " as select * from lineitem");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = tpch.execute("insert overwrite into " + tableName + " select * from lineitem where l_orderkey = 3");
+    res.close();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(2, desc.getStats().getNumRows().intValue());
+    }
+  }
+
+  @Test
+  public final void testInsertOverwriteIntoSelect() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("insertoverwriteintoselect");
+    ResultSet res = tpch.execute(
+        "create table " + tableName + " as select l_orderkey from lineitem");
+    assertFalse(res.next());
+    res.close();
+
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+    TableDesc orderKeys = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(5, orderKeys.getStats().getNumRows().intValue());
+    }
+
+    // this query will result in the two rows.
+    res = tpch.execute(
+        "insert overwrite into " + tableName + " select l_orderkey from lineitem where l_orderkey = 3");
+    assertFalse(res.next());
+    res.close();
+
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+    orderKeys = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(2, orderKeys.getStats().getNumRows().intValue());
+    }
+  }
+
+  @Test
+  public final void testInsertOverwriteCapitalTableName() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testInsertOverwriteCapitalTableName");
+    ResultSet res = tpch.execute("create table " + tableName + " as select * from lineitem");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = tpch.execute("insert overwrite into " + tableName + " select * from lineitem where l_orderkey = 3");
+    res.close();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(2, desc.getStats().getNumRows().intValue());
+    }
+  }
+
+  @Test
+  public final void testInsertOverwriteLocation() throws Exception {
+    ResultSet res =
+        tpch.execute("insert overwrite into location '/tajo-data/testInsertOverwriteCapitalTableName' select * from lineitem where l_orderkey = 3");
+    res.close();
+    FileSystem fs = FileSystem.get(tpch.getTestingCluster().getConfiguration());
+    assertTrue(fs.exists(new Path("/tajo-data/testInsertOverwriteCapitalTableName")));
+    assertEquals(1, fs.listStatus(new Path("/tajo-data/testInsertOverwriteCapitalTableName")).length);
+  }
+
+  @Test
+  public final void testInsertOverwriteWithCompression() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testInsertOverwriteWithCompression");
+    ResultSet res = tpch.execute("create table " + tableName + " (col1 int4, col2 int4, col3 float8) USING csv WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec')");
+    res.close();
+    TajoTestingCluster cluster = tpch.getTestingCluster();
+    CatalogService catalog = cluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = tpch.execute("insert overwrite into " + tableName + " select  l_orderkey, l_partkey, l_quantity from lineitem where l_orderkey = 3");
+    res.close();
+    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+    if (!cluster.isHCatalogStoreRunning()) {
+      assertEquals(2, desc.getStats().getNumRows().intValue());
+    }
+
+    FileSystem fs = FileSystem.get(tpch.getTestingCluster().getConfiguration());
+    assertTrue(fs.exists(desc.getPath()));
+    CompressionCodecFactory factory = new CompressionCodecFactory(tpch.getTestingCluster().getConfiguration());
+
+    for (FileStatus file : fs.listStatus(desc.getPath())) {
+      CompressionCodec codec = factory.getCodec(file.getPath());
+      assertTrue(codec instanceof DeflateCodec);
+    }
+  }
+
+  @Test
+  public final void testInsertOverwriteLocationWithCompression() throws Exception {
+    ResultSet res = tpch.execute("insert overwrite into location '/tajo-data/testInsertOverwriteLocationWithCompression' USING csv WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') select * from lineitem where l_orderkey = 3");
+    res.close();
+    FileSystem fs = FileSystem.get(tpch.getTestingCluster().getConfiguration());
+    Path path = new Path("/tajo-data/testInsertOverwriteLocationWithCompression");
+    assertTrue(fs.exists(path));
+    assertEquals(1, fs.listStatus(path).length);
+
+    CompressionCodecFactory factory = new CompressionCodecFactory(tpch.getTestingCluster().getConfiguration());
+    for (FileStatus file : fs.listStatus(path)){
+      CompressionCodec codec = factory.getCodec(file.getPath());
+      assertTrue(codec instanceof DeflateCodec);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
new file mode 100644
index 0000000..89519ef
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
@@ -0,0 +1,377 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryId;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.engine.planner.global.ExecutionBlock;
+import org.apache.tajo.engine.planner.global.MasterPlan;
+import org.apache.tajo.engine.planner.logical.NodeType;
+import org.apache.tajo.jdbc.TajoResultSet;
+import org.apache.tajo.master.querymaster.QueryMasterTask;
+import org.apache.tajo.worker.TajoWorker;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertTrue;
+import static junit.framework.TestCase.fail;
+import static org.junit.Assert.assertNotNull;
+
+@Category(IntegrationTest.class)
+public class TestJoinBroadcast extends QueryTestCaseBase {
+  public TestJoinBroadcast() throws Exception {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+    testingCluster.setAllTajoDaemonConfValue(TajoConf.ConfVars.DIST_QUERY_BROADCAST_JOIN_AUTO.varname, "true");
+    testingCluster.setAllTajoDaemonConfValue(
+        TajoConf.ConfVars.DIST_QUERY_BROADCAST_JOIN_THRESHOLD.varname, "" + (5 * 1024));
+
+    executeDDL("create_lineitem_large_ddl.sql", "lineitem_large");
+    executeDDL("create_customer_large_ddl.sql", "customer_large");
+  }
+
+  @Test
+  public final void testCrossJoin() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin5() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWhereClauseJoin6() throws Exception {
+    ResultSet res = executeQuery();
+    System.out.println(resultSetToString(res));
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testTPCHQ2Join() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoin1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithConstantExpr1() throws Exception {
+    // outer join with constant projections
+    //
+    // select c_custkey, orders.o_orderkey, 'val' as val from customer
+    // left outer join orders on c_custkey = o_orderkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithConstantExpr2() throws Exception {
+    // outer join with constant projections
+    //
+    // select c_custkey, o.o_orderkey, 'val' as val from customer left outer join
+    // (select * from orders) o on c_custkey = o.o_orderkey
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithConstantExpr3() throws Exception {
+    // outer join with constant projections
+    //
+    // select a.c_custkey, 123::INT8 as const_val, b.min_name from customer a
+    // left outer join ( select c_custkey, min(c_name) as min_name from customer group by c_custkey) b
+    // on a.c_custkey = b.c_custkey;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testRightOuterJoin1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testFullOuterJoin1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testJoinCoReferredEvals1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testJoinCoReferredEvalsWithSameExprs1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testJoinCoReferredEvalsWithSameExprs2() throws Exception {
+    // including grouping operator
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testCrossJoinAndCaseWhen() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testCrossJoinWithAsterisk1() throws Exception {
+    // select region.*, customer.* from region, customer;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testCrossJoinWithAsterisk2() throws Exception {
+    // select region.*, customer.* from customer, region;
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testCrossJoinWithAsterisk3() throws Exception {
+    // select * from customer, region
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public void testCrossJoinWithAsterisk4() throws Exception {
+    // select length(r_regionkey), *, c_custkey*10 from customer, region
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testInnerJoinWithEmptyTable() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithEmptyTable1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithEmptyTable2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithEmptyTable3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeftOuterJoinWithEmptyTable4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testRightOuterJoinWithEmptyTable1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testFullOuterJoinWithEmptyTable1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testCrossJoinWithEmptyTable1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testJoinOnMultipleDatabases() throws Exception {
+    executeString("CREATE DATABASE JOINS");
+    assertDatabaseExists("joins");
+    executeString("CREATE TABLE JOINS.part_ as SELECT * FROM part");
+    assertTableExists("joins.part_");
+    executeString("CREATE TABLE JOINS.supplier_ as SELECT * FROM supplier");
+    assertTableExists("joins.supplier_");
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  private MasterPlan getQueryPlan(QueryId queryId) {
+    for (TajoWorker eachWorker: testingCluster.getTajoWorkers()) {
+      QueryMasterTask queryMasterTask = eachWorker.getWorkerContext().getQueryMaster().getQueryMasterTask(queryId, true);
+      if (queryMasterTask != null) {
+        return queryMasterTask.getQuery().getPlan();
+      }
+    }
+
+    fail("Can't find query from workers" + queryId);
+    return null;
+  }
+
+  @Test
+  public final void testBroadcastBasicJoin() throws Exception {
+    ResultSet res = executeQuery();
+    TajoResultSet ts = (TajoResultSet)res;
+    assertResultSet(res);
+    cleanupQuery(res);
+
+    MasterPlan plan = getQueryPlan(ts.getQueryId());
+    ExecutionBlock rootEB = plan.getRoot();
+
+    /*
+    |-eb_1395998037360_0001_000006
+       |-eb_1395998037360_0001_000005
+     */
+    assertEquals(1, plan.getChildCount(rootEB.getId()));
+
+    ExecutionBlock firstEB = plan.getChild(rootEB.getId(), 0);
+
+    assertNotNull(firstEB);
+    assertEquals(2, firstEB.getBroadcastTables().size());
+    assertTrue(firstEB.getBroadcastTables().contains("default.supplier"));
+    assertTrue(firstEB.getBroadcastTables().contains("default.part"));
+  }
+
+  @Test
+  public final void testBroadcastTwoPartJoin() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+
+    MasterPlan plan = getQueryPlan(((TajoResultSet)res).getQueryId());
+    ExecutionBlock rootEB = plan.getRoot();
+
+    /*
+    |-eb_1395996354406_0001_000010
+       |-eb_1395996354406_0001_000009
+          |-eb_1395996354406_0001_000008
+          |-eb_1395996354406_0001_000005
+     */
+    assertEquals(1, plan.getChildCount(rootEB.getId()));
+
+    ExecutionBlock firstJoinEB = plan.getChild(rootEB.getId(), 0);
+    assertNotNull(firstJoinEB);
+    assertEquals(NodeType.JOIN, firstJoinEB.getPlan().getType());
+    assertEquals(0, firstJoinEB.getBroadcastTables().size());
+
+    ExecutionBlock leafEB1 = plan.getChild(firstJoinEB.getId(), 0);
+    assertTrue(leafEB1.getBroadcastTables().contains("default.orders"));
+    assertTrue(leafEB1.getBroadcastTables().contains("default.part"));
+
+    ExecutionBlock leafEB2 = plan.getChild(firstJoinEB.getId(), 1);
+    assertTrue(leafEB2.getBroadcastTables().contains("default.nation"));
+  }
+
+  @Test
+  public final void testBroadcastSubquery() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  // It doesn't run as expected because of TAJO-747 bug.
+  // Thus, we need to block this method until resolving this bug.
+//  @Test
+//  public final void testBroadcastSubquery2() throws Exception {
+//    ResultSet res = executeQuery();
+//    assertResultSet(res);
+//    cleanupQuery(res);
+//  }
+
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/6594ac1c/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinOnPartitionedTables.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinOnPartitionedTables.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinOnPartitionedTables.java
new file mode 100644
index 0000000..3e28f9e
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinOnPartitionedTables.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.junit.Test;
+
+import java.sql.ResultSet;
+
+public class TestJoinOnPartitionedTables extends QueryTestCaseBase {
+
+  public TestJoinOnPartitionedTables() {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+  }
+
+  @Test
+  public void testPartitionTableJoinSmallTable() throws Exception {
+
+    executeDDL("customer_ddl.sql", null);
+    ResultSet res = executeFile("insert_into_customer.sql");
+    res.close();
+
+    res = executeQuery();
+    assertResultSet(res);
+    res.close();
+
+    res = executeFile("selfJoinOfPartitionedTable.sql");
+    assertResultSet(res, "selfJoinOfPartitionedTable.result");
+    res.close();
+
+    res = executeFile("testNoProjectionJoinQual.sql");
+    assertResultSet(res, "testNoProjectionJoinQual.result");
+    res.close();
+
+    res = executeFile("testPartialFilterPushDown.sql");
+    assertResultSet(res, "testPartialFilterPushDown.result");
+    res.close();
+  }
+}


Mime
View raw message