tajo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hyun...@apache.org
Subject [31/51] [partial] tajo git commit: TAJO-1761: Separate an integration unit test kit into an independent module.
Date Fri, 14 Aug 2015 14:30:10 GMT
http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
new file mode 100644
index 0000000..9993992
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
@@ -0,0 +1,465 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.query;
+
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.common.TajoDataTypes;
+import org.apache.tajo.storage.StorageConstants;
+import org.apache.tajo.util.KeyValueSet;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.ResultSet;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(IntegrationTest.class)
+public class TestWindowQuery extends QueryTestCaseBase {
+
+  public TestWindowQuery() {
+    super(TajoConstants.DEFAULT_DATABASE_NAME);
+  }
+
+  @Test
+  public final void testWindow1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow5() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow6() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow7() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindow8() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithOrderBy1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithOrderBy2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithOrderBy3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithOrderBy4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithOrderBy5() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowBeforeLimit() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery5() throws Exception {
+    // filter push down test
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithSubQuery6() throws Exception {
+    // filter push down test
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation4() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation5() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testWindowWithAggregation6() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testComplexOrderBy1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testRowNumber1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testRowNumber2() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testRowNumber3() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testFirstValue1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testFirstValueTime() throws Exception {
+    KeyValueSet tableOptions = new KeyValueSet();
+    tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+    tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
+
+    Schema schema = new Schema();
+    schema.addColumn("id", TajoDataTypes.Type.INT4);
+    schema.addColumn("time", TajoDataTypes.Type.TIME);
+    String[] data = new String[]{ "1|12:11:12", "2|10:11:13", "2|05:42:41" };
+    TajoTestingCluster.createTable("firstvaluetime", schema, tableOptions, data, 1);
+
+    try {
+      ResultSet res = executeString(
+          "select id, first_value(time) over ( partition by id order by time ) as time_first from firstvaluetime");
+      String ascExpected = "id,time_first\n" +
+          "-------------------------------\n" +
+          "1,12:11:12\n" +
+          "2,05:42:41\n" +
+          "2,05:42:41\n";
+
+      assertEquals(ascExpected, resultSetToString(res));
+      res.close();
+    } finally {
+      executeString("DROP TABLE firstvaluetime PURGE");
+    }
+  }
+
+  @Test
+  public final void testLastValue1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLastValueTime() throws Exception {
+    KeyValueSet tableOptions = new KeyValueSet();
+    tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+    tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
+
+    Schema schema = new Schema();
+    schema.addColumn("id", TajoDataTypes.Type.INT4);
+    schema.addColumn("time", TajoDataTypes.Type.TIME);
+    String[] data = new String[]{ "1|12:11:12", "2|10:11:13", "2|05:42:41" };
+    TajoTestingCluster.createTable("lastvaluetime", schema, tableOptions, data, 1);
+
+    try {
+      ResultSet res = executeString(
+          "select id, last_value(time) over ( partition by id order by time ) as time_last from lastvaluetime");
+      String ascExpected = "id,time_last\n" +
+          "-------------------------------\n" +
+          "1,12:11:12\n" +
+          "2,10:11:13\n" +
+          "2,10:11:13\n";
+
+      assertEquals(ascExpected, resultSetToString(res));
+      res.close();
+    } finally {
+      executeString("DROP TABLE lastvaluetime PURGE");
+    }
+  }
+
+  @Test
+  public final void testLag1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLagTime() throws Exception {
+    KeyValueSet tableOptions = new KeyValueSet();
+    tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+    tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
+
+    Schema schema = new Schema();
+    schema.addColumn("id", TajoDataTypes.Type.INT4);
+    schema.addColumn("time", TajoDataTypes.Type.TIME);
+    String[] data = new String[]{ "1|12:11:12", "2|10:11:13", "2|05:42:41" };
+    TajoTestingCluster.createTable("lagtime", schema, tableOptions, data, 1);
+
+    try {
+      ResultSet res = executeString(
+          "select id, lag(time, 1) over ( partition by id order by time ) as time_lag from lagtime");
+      String ascExpected = "id,time_lag\n" +
+          "-------------------------------\n" +
+          "1,null\n" +
+          "2,null\n" +
+          "2,05:42:41\n";
+
+      assertEquals(ascExpected, resultSetToString(res));
+      res.close();
+    } finally {
+      executeString("DROP TABLE lagtime PURGE");
+    }
+  }
+
+  @Test
+  public final void testLagWithNoArgs() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLagWithDefault() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLead1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeadTime() throws Exception {
+    KeyValueSet tableOptions = new KeyValueSet();
+    tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+    tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
+
+    Schema schema = new Schema();
+    schema.addColumn("id", TajoDataTypes.Type.INT4);
+    schema.addColumn("time", TajoDataTypes.Type.TIME);
+    String[] data = new String[]{ "1|12:11:12", "2|10:11:13", "2|05:42:41" };
+    TajoTestingCluster.createTable("leadtime", schema, tableOptions, data, 1);
+
+    try {
+      ResultSet res = executeString(
+          "select id, lead(time, 1) over ( partition by id order by time ) as time_lead from leadtime");
+      String ascExpected = "id,time_lead\n" +
+          "-------------------------------\n" +
+          "1,null\n" +
+          "2,10:11:13\n" +
+          "2,null\n";
+
+      assertEquals(ascExpected, resultSetToString(res));
+      res.close();
+    } finally {
+      executeString("DROP TABLE leadtime PURGE");
+    }
+  }
+
+  @Test
+  public final void testLeadWithNoArgs() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testLeadWithDefault() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testStdDevSamp1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testStdDevPop1() throws Exception {
+    ResultSet res = executeQuery();
+    assertResultSet(res);
+    cleanupQuery(res);
+  }
+
+  @Test
+  public final void testMultipleWindow() throws Exception {
+    KeyValueSet tableOptions = new KeyValueSet();
+    tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+    tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
+
+    Schema schema = new Schema();
+    schema.addColumn("id", TajoDataTypes.Type.INT4);
+    schema.addColumn("time", TajoDataTypes.Type.TIME);
+    schema.addColumn("name", TajoDataTypes.Type.TEXT);
+    String[] data = new String[]{ "1|12:11:12|abc", "2|10:11:13|def", "2|05:42:41|ghi" };
+    TajoTestingCluster.createTable("multiwindow", schema, tableOptions, data, 1);
+
+    try {
+      ResultSet res = executeString(
+          "select id, last_value(time) over ( partition by id order by time ) as time_last, last_value(name) over ( partition by id order by time ) as name_last from multiwindow");
+      String ascExpected = "id,time_last,name_last\n" +
+          "-------------------------------\n" +
+          "1,12:11:12,abc\n" +
+          "2,10:11:13,def\n" +
+          "2,10:11:13,def\n";
+
+      assertEquals(ascExpected, resultSetToString(res));
+      res.close();
+    } finally {
+      executeString("DROP TABLE multiwindow PURGE");
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTableCache.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTableCache.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTableCache.java
new file mode 100644
index 0000000..f10f2a1
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTableCache.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.util;
+
+import org.apache.tajo.ExecutionBlockId;
+import org.apache.tajo.QueryIdFactory;
+import org.apache.tajo.catalog.statistics.TableStats;
+import org.apache.tajo.engine.utils.CacheHolder;
+import org.apache.tajo.engine.utils.TableCache;
+import org.apache.tajo.engine.utils.TableCacheKey;
+import org.apache.tajo.worker.ExecutionBlockSharedResource;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+public class TestTableCache {
+
+  @Test
+  public void testBroadcastTableCache() throws Exception {
+
+    ExecutionBlockId ebId = QueryIdFactory.newExecutionBlockId(
+        QueryIdFactory.newQueryId(System.currentTimeMillis(), 0));
+
+    final TableCacheKey key = new TableCacheKey(ebId.toString(), "testBroadcastTableCache", "path");
+    final ExecutionBlockSharedResource resource = new ExecutionBlockSharedResource();
+
+    final int parallelCount = 30;
+    ExecutorService executor = Executors.newFixedThreadPool(parallelCount);
+    List<Future<CacheHolder<Long>>> tasks = new ArrayList<Future<CacheHolder<Long>>>();
+    for (int i = 0; i < parallelCount; i++) {
+      tasks.add(executor.submit(createTask(key, resource)));
+    }
+
+    long expected = tasks.get(0).get().getData().longValue();
+
+    for (Future<CacheHolder<Long>> future : tasks) {
+      assertEquals(expected, future.get().getData().longValue());
+    }
+
+    resource.releaseBroadcastCache(ebId);
+    assertFalse(resource.hasBroadcastCache(key));
+    executor.shutdown();
+  }
+
+  private Callable<CacheHolder<Long>> createTask(final TableCacheKey key, final ExecutionBlockSharedResource resource) {
+    return new Callable<CacheHolder<Long>>() {
+      @Override
+      public CacheHolder<Long> call() throws Exception {
+        CacheHolder<Long> result;
+        synchronized (resource.getLock()) {
+          if (!TableCache.getInstance().hasCache(key)) {
+            final long nanoTime = System.nanoTime();
+            final TableStats tableStats = new TableStats();
+            tableStats.setNumRows(100);
+            tableStats.setNumBytes(1000);
+
+            final CacheHolder<Long> cacheHolder = new CacheHolder<Long>() {
+
+              @Override
+              public Long getData() {
+                return nanoTime;
+              }
+
+              @Override
+              public TableStats getTableStats() {
+                return tableStats;
+              }
+
+              @Override
+              public void release() {
+
+              }
+            };
+
+            resource.addBroadcastCache(key, cacheHolder);
+          }
+        }
+
+        CacheHolder<?> holder = resource.getBroadcastCache(key);
+        result = (CacheHolder<Long>) holder;
+        return result;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
new file mode 100644
index 0000000..4a3565e
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.util;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.SortSpec;
+import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.datum.Datum;
+import org.apache.tajo.datum.DatumFactory;
+import org.apache.tajo.plan.util.PlannerUtil;
+import org.apache.tajo.engine.planner.RangePartitionAlgorithm;
+import org.apache.tajo.engine.planner.UniformRangePartition;
+import org.apache.tajo.plan.rewrite.rules.PartitionedTableRewriter;
+import org.apache.tajo.storage.*;
+import org.apache.tajo.storage.RowStoreUtil.RowStoreDecoder;
+import org.apache.tajo.storage.RowStoreUtil.RowStoreEncoder;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestTupleUtil {
+  @Test
+  public final void testFixedSizeChar() {
+    Schema schema = new Schema();
+    schema.addColumn("col1", Type.CHAR, 5);
+
+    Tuple tuple = new VTuple(1);
+    tuple.put(new Datum[] {
+      DatumFactory.createChar("abc\0\0")
+    });
+
+    RowStoreEncoder encoder = RowStoreUtil.createEncoder(schema);
+    RowStoreDecoder decoder = RowStoreUtil.createDecoder(schema);
+    byte [] bytes = encoder.toBytes(tuple);
+    Tuple tuple2 = decoder.toTuple(bytes);
+
+    assertEquals(tuple, tuple2);
+  }
+
+  @Test
+  public final void testToBytesAndToTuple() {
+    Schema schema = new Schema();
+    schema.addColumn("col1", Type.BOOLEAN);
+    schema.addColumn("col2", Type.BIT);
+    schema.addColumn("col3", Type.CHAR);
+    schema.addColumn("col4", Type.INT2);
+    schema.addColumn("col5", Type.INT4);
+    schema.addColumn("col6", Type.INT8);
+    schema.addColumn("col7", Type.FLOAT4);
+    schema.addColumn("col8", Type.FLOAT8);
+    schema.addColumn("col9", Type.TEXT);
+    schema.addColumn("col10", Type.BLOB);
+    schema.addColumn("col11", Type.INET4);
+    //schema.addColumn("col11", DataType.IPv6);
+    
+    Tuple tuple = new VTuple(new Datum[] {
+        DatumFactory.createBool(true),
+        DatumFactory.createBit((byte) 0x99),
+        DatumFactory.createChar('7'),
+        DatumFactory.createInt2((short) 17),
+        DatumFactory.createInt4(59),
+        DatumFactory.createInt8(23l),
+        DatumFactory.createFloat4(77.9f),
+        DatumFactory.createFloat8(271.9f),
+        DatumFactory.createText("hyunsik"),
+        DatumFactory.createBlob("hyunsik".getBytes()),
+        DatumFactory.createInet4("192.168.0.1")
+    });
+
+    RowStoreEncoder encoder = RowStoreUtil.createEncoder(schema);
+    RowStoreDecoder decoder = RowStoreUtil.createDecoder(schema);
+    byte [] bytes = encoder.toBytes(tuple);
+    Tuple tuple2 = decoder.toTuple(bytes);
+    
+    assertEquals(tuple, tuple2);
+  }
+
+  @Test
+  public final void testGetPartitions() {
+    VTuple sTuple = new VTuple(7);
+    VTuple eTuple = new VTuple(7);
+
+    Schema schema = new Schema();
+
+    schema.addColumn("numByte", Type.BIT);
+    schema.addColumn("numChar", Type.CHAR);
+    schema.addColumn("numShort", Type.INT2);
+    schema.addColumn("numInt", Type.INT4);
+    schema.addColumn("numLong", Type.INT8);
+    schema.addColumn("numFloat", Type.FLOAT4);
+    schema.addColumn("numDouble", Type.FLOAT4);
+
+    SortSpec[] sortSpecs = PlannerUtil.schemaToSortSpecs(schema);
+
+    sTuple.put(0, DatumFactory.createBit((byte) 44));
+    sTuple.put(1, DatumFactory.createChar('a'));
+    sTuple.put(2, DatumFactory.createInt2((short) 10));
+    sTuple.put(3, DatumFactory.createInt4(5));
+    sTuple.put(4, DatumFactory.createInt8(100));
+    sTuple.put(5, DatumFactory.createFloat4(100));
+    sTuple.put(6, DatumFactory.createFloat8(100));
+
+    eTuple.put(0, DatumFactory.createBit((byte) 99));
+    eTuple.put(1, DatumFactory.createChar('p'));
+    eTuple.put(2, DatumFactory.createInt2((short) 70));
+    eTuple.put(3, DatumFactory.createInt4(70));
+    eTuple.put(4, DatumFactory.createInt8(10000));
+    eTuple.put(5, DatumFactory.createFloat4(150));
+    eTuple.put(6, DatumFactory.createFloat8(170));
+
+    RangePartitionAlgorithm partitioner = new UniformRangePartition(new TupleRange(sortSpecs, sTuple, eTuple),
+        sortSpecs);
+    TupleRange [] ranges = partitioner.partition(5);
+    assertTrue(5 <= ranges.length);
+    BaseTupleComparator comp = new BaseTupleComparator(schema, PlannerUtil.schemaToSortSpecs(schema));
+    TupleRange prev = ranges[0];
+    for (int i = 1; i < ranges.length; i++) {
+      assertTrue(comp.compare(prev.getStart(), ranges[i].getStart()) < 0);
+      assertTrue(comp.compare(prev.getEnd(), ranges[i].getEnd()) < 0);
+      prev = ranges[i];
+    }
+  }
+
+  @Test
+  public void testBuildTupleFromPartitionPath() {
+
+    Schema schema = new Schema();
+    schema.addColumn("key1", Type.INT8);
+    schema.addColumn("key2", Type.TEXT);
+
+    Path path = new Path("hdfs://tajo/warehouse/partition_test/");
+    Tuple tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, true);
+    assertNull(tuple);
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, false);
+    assertNull(tuple);
+
+    path = new Path("hdfs://tajo/warehouse/partition_test/key1=123");
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, true);
+    assertNotNull(tuple);
+    assertEquals(DatumFactory.createInt8(123), tuple.asDatum(0));
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, false);
+    assertNotNull(tuple);
+    assertEquals(DatumFactory.createInt8(123), tuple.asDatum(0));
+
+    path = new Path("hdfs://tajo/warehouse/partition_test/key1=123/part-0000"); // wrong cases;
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, true);
+    assertNull(tuple);
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, false);
+    assertNull(tuple);
+
+    path = new Path("hdfs://tajo/warehouse/partition_test/key1=123/key2=abc");
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, true);
+    assertNotNull(tuple);
+    assertEquals(DatumFactory.createInt8(123), tuple.asDatum(0));
+    assertEquals(DatumFactory.createText("abc"), tuple.asDatum(1));
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, false);
+    assertNotNull(tuple);
+    assertEquals(DatumFactory.createInt8(123), tuple.asDatum(0));
+    assertEquals(DatumFactory.createText("abc"), tuple.asDatum(1));
+
+
+    path = new Path("hdfs://tajo/warehouse/partition_test/key1=123/key2=abc/part-0001");
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, true);
+    assertNull(tuple);
+
+    tuple = PartitionedTableRewriter.buildTupleFromPartitionPath(schema, path, false);
+    assertNotNull(tuple);
+    assertEquals(DatumFactory.createInt8(123), tuple.asDatum(0));
+    assertEquals(DatumFactory.createText("abc"), tuple.asDatum(1));
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java b/tajo-core-tests/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
new file mode 100644
index 0000000..265f075
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.ha;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.TpchTestBase;
+import org.apache.tajo.client.TajoClient;
+import org.apache.tajo.client.TajoClientImpl;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.master.TajoMaster;
+import org.apache.tajo.service.ServiceTracker;
+import org.apache.tajo.service.ServiceTrackerFactory;
+import org.junit.Test;
+
+import static junit.framework.Assert.assertTrue;
+import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+public class TestHAServiceHDFSImpl  {
+  private TajoTestingCluster cluster;
+
+  private TajoMaster primaryMaster;
+  private TajoMaster backupMaster;
+
+  private Path haPath, activePath, backupPath;
+
+  @Test
+  public final void testAutoFailOver() throws Exception {
+    cluster = TpchTestBase.getInstance().getTestingCluster();
+
+    try {
+      FileSystem fs = cluster.getDefaultFileSystem();
+
+      TajoConf primaryConf = setConfigForHAMaster();
+      primaryMaster = new TajoMaster();
+      primaryMaster.init(primaryConf);
+      primaryMaster.start();
+
+      TajoConf backupConf = setConfigForHAMaster();
+      backupMaster = new TajoMaster();
+      backupMaster.init(backupConf);
+      backupMaster.start();
+
+      ServiceTracker tracker = ServiceTrackerFactory.get(primaryConf);
+
+      assertNotEquals(primaryMaster.getMasterName(), backupMaster.getMasterName());
+      verifySystemDirectories(fs);
+
+      assertEquals(2, fs.listStatus(activePath).length);
+      assertEquals(1, fs.listStatus(backupPath).length);
+
+      assertTrue(fs.exists(new Path(activePath, HAConstants.ACTIVE_LOCK_FILE)));
+      assertTrue(fs.exists(new Path(activePath, primaryMaster.getMasterName().replaceAll(":", "_"))));
+      assertTrue(fs.exists(new Path(backupPath, backupMaster.getMasterName().replaceAll(":", "_"))));
+
+      createDatabaseAndTable(tracker);
+      verifyDataBaseAndTable(tracker);
+
+      primaryMaster.stop();
+
+      verifyDataBaseAndTable(tracker);
+
+      assertEquals(2, fs.listStatus(activePath).length);
+      assertEquals(0, fs.listStatus(backupPath).length);
+
+      assertTrue(fs.exists(new Path(activePath, HAConstants.ACTIVE_LOCK_FILE)));
+      assertTrue(fs.exists(new Path(activePath, backupMaster.getMasterName().replaceAll(":", "_"))));
+    } finally {
+      backupMaster.stop();
+    }
+  }
+
+  private TajoConf setConfigForHAMaster() {
+    TajoConf conf = new TajoConf(cluster.getConfiguration());
+
+    conf.setVar(TajoConf.ConfVars.TAJO_MASTER_CLIENT_RPC_ADDRESS,
+        "localhost:" + NetUtils.getFreeSocketPort());
+    conf.setVar(TajoConf.ConfVars.TAJO_MASTER_UMBILICAL_RPC_ADDRESS,
+        "localhost:" + NetUtils.getFreeSocketPort());
+    conf.setVar(TajoConf.ConfVars.RESOURCE_TRACKER_RPC_ADDRESS,
+        "localhost:" + NetUtils.getFreeSocketPort());
+    conf.setVar(TajoConf.ConfVars.CATALOG_ADDRESS,
+        "localhost:" + NetUtils.getFreeSocketPort());
+    conf.setVar(TajoConf.ConfVars.TAJO_MASTER_INFO_ADDRESS,
+        "localhost:" + NetUtils.getFreeSocketPort());
+    conf.setIntVar(TajoConf.ConfVars.REST_SERVICE_PORT,
+        NetUtils.getFreeSocketPort());
+
+    conf.setBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE, true);
+    conf.setIntVar(TajoConf.ConfVars.TAJO_MASTER_HA_MONITOR_INTERVAL, 1000);
+
+    //Client API service RPC Server
+    conf.setIntVar(TajoConf.ConfVars.MASTER_SERVICE_RPC_SERVER_WORKER_THREAD_NUM, 2);
+    conf.setIntVar(TajoConf.ConfVars.WORKER_SERVICE_RPC_SERVER_WORKER_THREAD_NUM, 2);
+
+    // Internal RPC Server
+    conf.setIntVar(TajoConf.ConfVars.MASTER_RPC_SERVER_WORKER_THREAD_NUM, 2);
+    conf.setIntVar(TajoConf.ConfVars.QUERY_MASTER_RPC_SERVER_WORKER_THREAD_NUM, 2);
+    conf.setIntVar(TajoConf.ConfVars.WORKER_RPC_SERVER_WORKER_THREAD_NUM, 2);
+    conf.setIntVar(TajoConf.ConfVars.CATALOG_RPC_SERVER_WORKER_THREAD_NUM, 2);
+    conf.setIntVar(TajoConf.ConfVars.SHUFFLE_RPC_SERVER_WORKER_THREAD_NUM, 2);
+
+    return conf;
+  }
+
+  private void verifySystemDirectories(FileSystem fs) throws Exception {
+    haPath = TajoConf.getSystemHADir(cluster.getConfiguration());
+    assertTrue(fs.exists(haPath));
+
+    activePath = new Path(haPath, TajoConstants.SYSTEM_HA_ACTIVE_DIR_NAME);
+    assertTrue(fs.exists(activePath));
+
+    backupPath = new Path(haPath, TajoConstants.SYSTEM_HA_BACKUP_DIR_NAME);
+    assertTrue(fs.exists(backupPath));
+  }
+
+  private void createDatabaseAndTable(ServiceTracker tracker) throws Exception {
+    TajoClient client = null;
+    try {
+      client = new TajoClientImpl(tracker);
+      client.executeQuery("CREATE TABLE default.ha_test1 (age int);");
+      client.executeQuery("CREATE TABLE default.ha_test2 (age int);");
+    } finally {
+      IOUtils.cleanup(null, client);
+    }
+  }
+
+  private void verifyDataBaseAndTable(ServiceTracker tracker) throws Exception {
+    TajoClient client = null;
+    try {
+      client = new TajoClientImpl(tracker);
+      client.existDatabase("default");
+      client.existTable("default.ha_test1");
+      client.existTable("default.ha_test2");
+    } finally {
+      IOUtils.cleanup(null, client);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestResultSet.java b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
new file mode 100644
index 0000000..0c83fd0
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * 
+ */
+package org.apache.tajo.jdbc;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.IntegrationTest;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.TpchTestBase;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.TableMeta;
+import org.apache.tajo.catalog.statistics.TableStats;
+import org.apache.tajo.client.TajoClient;
+import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.datum.DatumFactory;
+import org.apache.tajo.storage.*;
+import org.apache.tajo.util.KeyValueSet;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.sql.*;
+import java.util.Calendar;
+import java.util.List;
+import java.util.TimeZone;
+
+import static org.junit.Assert.*;
+
+@Category(IntegrationTest.class)
+public class TestResultSet {
+  private static TajoTestingCluster util;
+  private static TajoConf conf;
+  private static TableDesc desc;
+  private static FileTablespace sm;
+  private static TableMeta scoreMeta;
+  private static Schema scoreSchema;
+  private static List<ByteString> serializedData;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    util = TpchTestBase.getInstance().getTestingCluster();
+    conf = util.getConfiguration();
+    sm = TablespaceManager.getDefault();
+
+    scoreSchema = new Schema();
+    scoreSchema.addColumn("deptname", Type.TEXT);
+    scoreSchema.addColumn("score", Type.INT4);
+    scoreMeta = CatalogUtil.newTableMeta("TEXT");
+    TableStats stats = new TableStats();
+
+    Path p = new Path(sm.getTableUri("default", "score"));
+    sm.getFileSystem().mkdirs(p);
+    Appender appender = sm.getAppender(scoreMeta, scoreSchema, new Path(p, "score"));
+    RowStoreUtil.RowStoreEncoder encoder = RowStoreUtil.createEncoder(scoreSchema);
+    serializedData = Lists.newArrayList();
+    appender.init();
+
+    int deptSize = 100;
+    int tupleNum = 10000;
+    Tuple tuple;
+    long written = 0;
+    for (int i = 0; i < tupleNum; i++) {
+      tuple = new VTuple(2);
+      String key = "test" + (i % deptSize);
+      tuple.put(0, DatumFactory.createText(key));
+      tuple.put(1, DatumFactory.createInt4(i + 1));
+      written += key.length() + Integer.SIZE;
+      appender.addTuple(tuple);
+      serializedData.add(ByteString.copyFrom(encoder.toBytes(tuple)));
+    }
+    appender.close();
+    stats.setNumRows(tupleNum);
+    stats.setNumBytes(written);
+    stats.setAvgRows(tupleNum);
+    stats.setNumBlocks(1000);
+    stats.setNumShuffleOutputs(100);
+    desc = new TableDesc(CatalogUtil.buildFQName(TajoConstants.DEFAULT_DATABASE_NAME, "score"),
+        scoreSchema, scoreMeta, p.toUri());
+    desc.setStats(stats);
+  }
+
+  @AfterClass
+  public static void terminate() throws IOException {
+
+  }
+
+  @Test
+  public void testMemoryResultSet() throws Exception {
+    TajoMemoryResultSet rs = new TajoMemoryResultSet(null, desc.getSchema(),
+        serializedData, desc.getStats().getNumRows().intValue(), null);
+
+    ResultSetMetaData meta = rs.getMetaData();
+    assertNotNull(meta);
+    Schema schema = scoreSchema;
+    assertEquals(schema.size(), meta.getColumnCount());
+    for (int i = 0; i < meta.getColumnCount(); i++) {
+      assertEquals(schema.getColumn(i).getSimpleName(), meta.getColumnName(i + 1));
+      assertEquals(schema.getColumn(i).getQualifier(), meta.getTableName(i + 1));
+    }
+
+    int i = 0;
+    assertTrue(rs.isBeforeFirst());
+    for (; rs.next(); i++) {
+      assertEquals("test"+i%100, rs.getString(1));
+      assertEquals("test"+i%100, rs.getString("deptname"));
+      assertEquals(i+1, rs.getInt(2));
+      assertEquals(i+1, rs.getInt("score"));
+    }
+    assertEquals(10000, i);
+    assertTrue(rs.isAfterLast());
+  }
+
+  @Test
+  public void testDateTimeType() throws Exception {
+    // HiveCatalog does not support date type, time type in hive-0.12.0
+    if(util.isHiveCatalogStoreRunning()) return;
+
+    ResultSet res = null;
+    TajoClient client = util.newTajoClient();
+    try {
+      String tableName = "datetimetable";
+      String query = "select col1, col2, col3 from " + tableName;
+
+      String [] table = new String[] {tableName};
+      Schema schema = new Schema();
+      schema.addColumn("col1", Type.DATE);
+      schema.addColumn("col2", Type.TIME);
+      schema.addColumn("col3", Type.TIMESTAMP);
+      Schema [] schemas = new Schema[] {schema};
+      String [] data = {
+          "2014-01-01|01:00:00|2014-01-01 01:00:00"
+      };
+      KeyValueSet tableOptions = new KeyValueSet();
+      tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+
+      res = TajoTestingCluster
+          .run(table, schemas, tableOptions, new String[][]{data}, query, client);
+
+      assertTrue(res.next());
+
+      Date date = res.getDate(1);
+      assertNotNull(date);
+      assertEquals(Date.valueOf("2014-01-01"), date);
+
+      date = res.getDate("col1");
+      assertNotNull(date);
+      assertEquals(Date.valueOf("2014-01-01"), date);
+
+      Time time = res.getTime(2);
+      assertNotNull(time);
+      assertEquals(Time.valueOf("01:00:00"), time);
+
+      time = res.getTime("col2");
+      assertNotNull(time);
+      assertEquals(Time.valueOf("01:00:00"), time);
+
+      Timestamp timestamp = res.getTimestamp(3);
+      assertNotNull(timestamp);
+      assertEquals(Timestamp.valueOf("2014-01-01 01:00:00"), timestamp);
+
+      timestamp = res.getTimestamp("col3");
+      assertNotNull(timestamp);
+      assertEquals(Timestamp.valueOf("2014-01-01 01:00:00"), timestamp);
+
+      // assert with timezone
+      Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT+9"));
+      date = res.getDate(1, cal);
+      assertNotNull(date);
+      assertEquals("2014-01-01", date.toString());
+
+      date = res.getDate("col1", cal);
+      assertNotNull(date);
+      assertEquals("2014-01-01", date.toString());
+
+      time = res.getTime(2, cal);
+      assertNotNull(time);
+      assertEquals("10:00:00", time.toString());
+
+      time = res.getTime("col2", cal);
+      assertNotNull(time);
+      assertEquals("10:00:00", time.toString());
+
+      timestamp = res.getTimestamp(3, cal);
+      assertNotNull(timestamp);
+      assertEquals("2014-01-01 10:00:00.0", timestamp.toString());
+
+      timestamp = res.getTimestamp("col3", cal);
+      assertNotNull(timestamp);
+      assertEquals("2014-01-01 10:00:00.0", timestamp.toString());
+    } finally {
+      if (res != null) {
+        res.close();
+      }
+
+      client.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestSQLState.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestSQLState.java b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestSQLState.java
new file mode 100644
index 0000000..e711524
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestSQLState.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.jdbc;
+
+import com.google.common.collect.Maps;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.tajo.*;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.client.QueryClient;
+import org.apache.tajo.client.QueryStatus;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.net.InetSocketAddress;
+import java.sql.*;
+import java.util.*;
+
+import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
+import static org.junit.Assert.*;
+
+@Category(IntegrationTest.class)
+public class TestSQLState extends QueryTestCaseBase {
+  private static InetSocketAddress tajoMasterAddress;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    tajoMasterAddress = testingCluster.getMaster().getTajoMasterClientService().getBindAddress();
+    Class.forName("org.apache.tajo.jdbc.TajoDriver").newInstance();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+  }
+
+  static String buildConnectionUri(String hostName, int port, String databaseName) {
+    return "jdbc:tajo://" + hostName + ":" + port + "/" + databaseName;
+  }
+
+  private Connection makeConnection() throws SQLException {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+
+    return conn;
+  }
+
+  public void assertSQLState(String sql, String sqlState) throws SQLException {
+    Connection conn = null;
+    Statement stmt = null;
+    ResultSet res = null;
+
+    try {
+      conn = makeConnection();
+      stmt = conn.createStatement();
+      res = stmt.executeQuery(sql);
+    } catch (SQLException se) {
+      assertEquals(sqlState, se.getSQLState());
+    } catch (Throwable t) {
+      fail(t.getMessage());
+    } finally {
+      CatalogUtil.closeQuietly(stmt, res);
+      CatalogUtil.closeQuietly(conn);
+    }
+  }
+
+  @Test
+  public void testSyntaxError() throws Exception {
+    assertSQLState("selec x,y,x from lineitem", "42601");
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoDatabaseMetaData.java b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoDatabaseMetaData.java
new file mode 100644
index 0000000..8ee6755
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoDatabaseMetaData.java
@@ -0,0 +1,504 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.jdbc;
+
+import com.google.common.collect.Sets;
+import org.apache.tajo.QueryTestCaseBase;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.common.type.TajoTypeUtil;
+import org.apache.tajo.util.TUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.sql.*;
+import java.util.*;
+
+import static org.junit.Assert.*;
+
+public class TestTajoDatabaseMetaData extends QueryTestCaseBase {
+  private static InetSocketAddress tajoMasterAddress;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    tajoMasterAddress = testingCluster.getMaster().getTajoMasterClientService().getBindAddress();
+    Class.forName("org.apache.tajo.jdbc.TajoDriver").newInstance();
+  }
+
+  public static List<String> getListFromResultSet(ResultSet resultSet, String columnName) throws SQLException {
+    List<String> list = new ArrayList<String>();
+    while(resultSet.next()) {
+      list.add(resultSet.getString(columnName));
+    }
+    return list;
+  }
+
+  @Test
+  public void testSetAndGetCatalogAndSchema() throws Exception {
+    String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+
+    assertDatabaseNotExists("jdbc_test1");
+    PreparedStatement pstmt = conn.prepareStatement("CREATE DATABASE jdbc_test1");
+    pstmt.executeUpdate();
+    assertDatabaseExists("jdbc_test1");
+    pstmt.close();
+
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      assertDatabaseNotExists("Jdbc_Test2");
+      pstmt = conn.prepareStatement("CREATE DATABASE \"Jdbc_Test2\"");
+      pstmt.executeUpdate();
+      assertDatabaseExists("Jdbc_Test2");
+      pstmt.close();
+    }
+
+    conn.setCatalog("jdbc_test1");
+    assertEquals("jdbc_test1", conn.getCatalog());
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      conn.setCatalog("Jdbc_Test2");
+      assertEquals("Jdbc_Test2", conn.getCatalog());
+    }
+    conn.setCatalog("jdbc_test1");
+    assertEquals("jdbc_test1", conn.getCatalog());
+
+    ResultSet resultSet = conn.getMetaData().getSchemas();
+    assertResultSet(resultSet, "getSchemas1.result");
+    resultSet.close();
+
+    resultSet = conn.getMetaData().getSchemas("jdbc_test1", "%");
+    assertResultSet(resultSet, "getSchemas2.result");
+    resultSet.close();
+
+    resultSet = conn.getMetaData().getTableTypes();
+    assertResultSet(resultSet, "getTableTypes.result");
+    resultSet.close();
+
+    conn.setCatalog(TajoConstants.DEFAULT_DATABASE_NAME);
+    pstmt = conn.prepareStatement("DROP DATABASE jdbc_test1");
+    pstmt.executeUpdate();
+    pstmt.close();
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      pstmt = conn.prepareStatement("DROP DATABASE \"Jdbc_Test2\"");
+      pstmt.executeUpdate();
+      pstmt.close();
+    }
+
+    conn.close();
+  }
+
+  @Test
+  public void testGetCatalogsAndTables() throws Exception {
+    String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection defaultConnect = DriverManager.getConnection(connUri);
+
+    DatabaseMetaData dbmd = defaultConnect.getMetaData();
+    List<String> existingDatabases = getListFromResultSet(dbmd.getCatalogs(), "TABLE_CAT");
+
+    // create database "jdbc_test1" and its tables
+    assertDatabaseNotExists("jdbc_test3");
+    PreparedStatement pstmt = defaultConnect.prepareStatement("CREATE DATABASE jdbc_test3");
+    pstmt.executeUpdate();
+    assertDatabaseExists("jdbc_test3");
+    pstmt.close();
+    pstmt = defaultConnect.prepareStatement("CREATE TABLE jdbc_test3.table1 (age int)");
+    pstmt.executeUpdate();
+    pstmt.close();
+    pstmt = defaultConnect.prepareStatement("CREATE TABLE jdbc_test3.table2 (age int)");
+    pstmt.executeUpdate();
+    pstmt.close();
+
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      // create database "jdbc_test2" and its tables
+      assertDatabaseNotExists("Jdbc_Test4");
+      pstmt = defaultConnect.prepareStatement("CREATE DATABASE \"Jdbc_Test4\"");
+      pstmt.executeUpdate();
+      assertDatabaseExists("Jdbc_Test4");
+      pstmt.close();
+
+      pstmt = defaultConnect.prepareStatement("CREATE TABLE \"Jdbc_Test4\".table3 (age int)");
+      pstmt.executeUpdate();
+      pstmt.close();
+      pstmt = defaultConnect.prepareStatement("CREATE TABLE \"Jdbc_Test4\".table4 (age int)");
+      pstmt.executeUpdate();
+      pstmt.close();
+    }
+
+    // verify getCatalogs()
+    dbmd = defaultConnect.getMetaData();
+    List<String> newDatabases = getListFromResultSet(dbmd.getCatalogs(), "TABLE_CAT");
+
+    newDatabases.removeAll(existingDatabases);
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      assertEquals(2, newDatabases.size());
+    } else {
+      assertEquals(1, newDatabases.size());
+    }
+    assertTrue(newDatabases.contains("jdbc_test3"));
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      assertTrue(newDatabases.contains("Jdbc_Test4"));
+    }
+
+    // verify getTables()
+    ResultSet res = defaultConnect.getMetaData().getTables("jdbc_test3", null, null, null);
+    assertResultSet(res, "getTables1.result");
+    res.close();
+
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      res = defaultConnect.getMetaData().getTables("Jdbc_Test4", null, null, null);
+      assertResultSet(res, "getTables2.result");
+      res.close();
+    }
+
+    defaultConnect.close();
+
+    // jdbc1_test database connection test
+    String jdbcTest1ConnUri =
+        TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(), "jdbc_test3");
+    Connection jdbcTest1Conn = DriverManager.getConnection(jdbcTest1ConnUri);
+    assertEquals("jdbc_test3", jdbcTest1Conn.getCatalog());
+    jdbcTest1Conn.close();
+
+    client.selectDatabase("default");
+    executeString("DROP TABLE jdbc_test3.table1");
+    executeString("DROP TABLE jdbc_test3.table2");
+    executeString("DROP DATABASE jdbc_test3");
+
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      String jdbcTest2ConnUri =
+          TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(), "Jdbc_Test4");
+      Connection jdbcTest2Conn = DriverManager.getConnection(jdbcTest2ConnUri);
+      assertEquals("Jdbc_Test4", jdbcTest2Conn.getCatalog());
+      jdbcTest2Conn.close();
+
+      client.selectDatabase("default");
+      executeString("DROP TABLE \"Jdbc_Test4\".table3");
+      executeString("DROP TABLE \"Jdbc_Test4\".table4");
+      executeString("DROP DATABASE \"Jdbc_Test4\"");
+    }
+  }
+
+  @Test
+  public void testGetTablesWithPattern() throws Exception {
+    String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+
+    Map<String,List<String>> tables = new HashMap<String,List<String>>();
+    assertDatabaseNotExists("db_1");
+    executeString("CREATE DATABASE db_1");
+    assertDatabaseExists("db_1");
+    for (int i = 0; i < 3; i++) {
+      String tableName = "tb_" + i;
+      TUtil.putToNestedList(tables, "db_1", tableName);
+      executeString("CREATE TABLE db_1." + tableName + " (age int)");
+    }
+    for (int i = 0; i < 3; i++) {
+      String tableName = "table_" + i + "_ptn";
+      TUtil.putToNestedList(tables, "db_1", tableName);
+      executeString("CREATE TABLE db_1." + tableName + " (age int)");
+    }
+
+    assertDatabaseNotExists("db_2");
+    executeString("CREATE DATABASE db_2");
+    assertDatabaseExists("db_2");
+    for (int i = 0; i < 3; i++) {
+      String tableName = "tb_" + i;
+      TUtil.putToNestedList(tables, "db_2", tableName);
+      executeString("CREATE TABLE db_2." + tableName + " (age int)");
+    }
+    for (int i = 0; i < 3; i++) {
+      String tableName = "table_" + i + "_ptn";
+      TUtil.putToNestedList(tables, "db_2", tableName);
+      executeString("CREATE TABLE db_2." + tableName + " (age int)");
+    }
+
+    // all wildcard test
+    Set<String> tableList =
+        Sets.newHashSet(getListFromResultSet(conn.getMetaData().getTables("db_2", null, "%", null), "TABLE_NAME"));
+    assertEquals(Sets.newHashSet(tables.get("db_2")), tableList);
+
+    // leading wildcard test
+    tableList =
+        Sets.newHashSet(getListFromResultSet(conn.getMetaData().getTables("db_2", null, "%_ptn", null), "TABLE_NAME"));
+    assertEquals(Sets.newHashSet("table_0_ptn", "table_1_ptn", "table_2_ptn"), tableList);
+
+    // tailing wildcard test
+    tableList =
+        Sets.newHashSet(getListFromResultSet(conn.getMetaData().getTables("db_2", null, "tb_%", null), "TABLE_NAME"));
+    assertEquals(Sets.newHashSet("tb_0", "tb_1", "tb_2"), tableList);
+
+    ResultSet resultSet = conn.getMetaData().getTables(null, null, "tb\\_%", null);
+    int i = 0;
+    while(resultSet.next()) {
+      tables.get(resultSet.getString("TABLE_CAT")).contains(resultSet.getString("TABLE_NAME"));
+      i++;
+    }
+    assertEquals(6, i);
+
+    executeString("DROP DATABASE db_1");
+    executeString("DROP DATABASE db_2");
+  }
+
+  private static String getTestColName(String dbName, String tableName, int i) {
+    if (i % 2 == 1) {
+      return CatalogUtil.denormalizeIdentifier(dbName + "_" + tableName + "_col") + " int";
+    } else {
+      return CatalogUtil.denormalizeIdentifier(dbName + "_" + tableName + "_COL") + " int";
+    }
+  }
+
+  @Test
+  public void testGetColumnsWithPattern() throws Exception {
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+          TajoConstants.DEFAULT_DATABASE_NAME);
+      Connection conn = DriverManager.getConnection(connUri);
+
+      // Below creates the following 12 tables
+      // db<i>.tb<j>, i = {1,2}, 0 <= j < 2
+      // db<i>.table_<j>, i = {1,2}, 0 <= j < 2
+
+      Map<String,List<String>> tables = new HashMap<String,List<String>>();
+      for (int j = 1; j <= 2; j++) {
+        String dbName = "db" + j;
+        assertDatabaseNotExists(dbName);
+        executeString("CREATE DATABASE " + dbName).close();
+        assertDatabaseExists(dbName);
+        for (int i = 3; i < 6; i++) {
+          String tableName = "tb" + i;
+
+
+          if (i % 2 == 0) {
+            tableName = tableName.toUpperCase();
+          }
+
+          TUtil.putToNestedList(tables, dbName, tableName);
+
+          executeString("CREATE TABLE " + dbName + "." + CatalogUtil.denormalizeIdentifier(tableName) +
+              " (" + getTestColName(dbName, tableName, 1) +
+              ") PARTITION BY COLUMN (" + getTestColName(dbName, tableName, 2) + ")").close();
+          assertTableExists(dbName + "." + tableName);
+        }
+        for (int i = 3; i < 6; i++) {
+          String tableName = "table" + i;
+
+
+          if (i % 2 == 0) {
+            tableName = tableName.toUpperCase();
+          }
+
+          TUtil.putToNestedList(tables, dbName, tableName);
+
+          executeString("CREATE TABLE " + dbName + "." + CatalogUtil.denormalizeIdentifier(tableName) +
+              " (" + getTestColName(dbName, tableName, 1) +
+              ") PARTITION BY COLUMN (" + getTestColName(dbName, tableName, 2) + ")").close();
+          assertTableExists(dbName + "." + tableName);
+        }
+      }
+
+      // all wildcard test on columns
+      Set<String> columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "tb3", "%"),
+              "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db2_tb3_col", "db2_tb3_COL"), columnList);
+
+      // tailing wildcard + case sensitive test on columns
+      columnList = Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "tb3", "%col"),
+          "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db2_tb3_col"), columnList);
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "tb3", "%COL"),
+              "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db2_tb3_COL"), columnList);
+
+      // tailing wildcard test on columns
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "tb3", "db2\\_tb3\\_%"),
+              "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db2_tb3_col", "db2_tb3_COL"), columnList);
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "%", "db2\\_tb3\\_%"),
+              "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db2_tb3_col", "db2_tb3_COL"), columnList);
+
+      // leading wildcard test on tables
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db1", null, "%3", "%"),
+              "COLUMN_NAME"));
+      assertEquals(
+          Sets.newHashSet(
+              "db1_tb3_col", "db1_tb3_COL",
+              "db1_table3_col", "db1_table3_COL"),
+          columnList);
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "%3", "%"),
+              "COLUMN_NAME"));
+      assertEquals(
+          Sets.newHashSet(
+              "db2_tb3_col", "db2_tb3_COL",
+              "db2_table3_col", "db2_table3_COL"),
+          columnList);
+
+      // tailing wildcard + case sensitive test on tables
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "TABLE%", "%"),
+              "COLUMN_NAME"));
+      assertEquals(
+          Sets.newHashSet(
+              "db2_TABLE4_col", "db2_TABLE4_COL"), columnList);
+
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "TABLE4", "%"),
+              "COLUMN_NAME"));
+      assertEquals(
+          Sets.newHashSet(
+              "db2_TABLE4_col", "db2_TABLE4_COL"),
+          columnList);
+
+      // tailing wildcard test on tables
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns("db2", null, "table%", "%"),
+              "COLUMN_NAME"));
+      assertEquals(
+          Sets.newHashSet(
+              "db2_table3_col", "db2_table3_COL",
+              "db2_table5_col", "db2_table5_COL"),
+          columnList);
+
+      // wildcard test on database
+      columnList =
+          Sets.newHashSet(getListFromResultSet(conn.getMetaData().getColumns(null, null, "%3", "db1_tb3%"),
+              "COLUMN_NAME"));
+      assertEquals(Sets.newHashSet("db1_tb3_col", "db1_tb3_COL"), columnList);
+
+      executeString("DROP DATABASE db1");
+      executeString("DROP DATABASE db2");
+    }
+  }
+
+  @Test
+  public void testEmptyMetaInfo() throws Exception {
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+          TajoConstants.DEFAULT_DATABASE_NAME);
+      Connection conn = DriverManager.getConnection(connUri);
+
+      try {
+        DatabaseMetaData meta = conn.getMetaData();
+
+        ResultSet res = meta.getProcedures(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getProcedureColumns(null, null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getUDTs(null, null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getColumnPrivileges(null, null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getTablePrivileges(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getBestRowIdentifier(null, null, null, 0, false);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getVersionColumns(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getPrimaryKeys(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getImportedKeys(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getExportedKeys(null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getCrossReference(null, null, null, null, null, null);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getIndexInfo(null, null, null, false, false);
+        assertNotNull(res);
+        assertFalse(res.next());
+
+        res = meta.getClientInfoProperties();
+        assertNotNull(res);
+        assertFalse(res.next());
+      } finally {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public void testGetTypeInfo() throws Exception {
+    if (!testingCluster.isHiveCatalogStoreRunning()) {
+      String connUri = TestTajoJdbc.buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+          TajoConstants.DEFAULT_DATABASE_NAME);
+      Connection conn = DriverManager.getConnection(connUri);
+
+      try {
+        DatabaseMetaData meta = conn.getMetaData();
+
+        ResultSet res = meta.getTypeInfo();
+
+        assertNotNull(res);
+
+        int numTypes = 0;
+
+        String[] columnNames = {"TYPE_NAME", "DATA_TYPE", "PRECISION", "LITERAL_PREFIX", "LITERAL_SUFFIX",
+            "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE",
+            "FIXED_PREC_SCALE", "AUTO_INCREMENT", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE",
+            "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "NUM_PREC_RADIX"};
+
+        while (res.next()) {
+          for (int i = 0; i < columnNames.length; i++) {
+            Object value = res.getObject(columnNames[i]);
+            if (i == 15 || i == 16) {
+              assertNull(value);
+            } else {
+              assertNotNull(value);
+            }
+          }
+          numTypes++;
+        }
+
+        assertEquals(numTypes, TajoTypeUtil.getTypeInfos().size());
+      } finally {
+        conn.close();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/a4106883/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoJdbc.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoJdbc.java b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoJdbc.java
new file mode 100644
index 0000000..acbc1a8
--- /dev/null
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/jdbc/TestTajoJdbc.java
@@ -0,0 +1,608 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.jdbc;
+
+import com.google.common.collect.Maps;
+import org.apache.tajo.*;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.client.QueryClient;
+import org.apache.tajo.client.QueryStatus;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.net.InetSocketAddress;
+import java.sql.*;
+import java.util.*;
+
+import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
+import static org.junit.Assert.*;
+
+@Category(IntegrationTest.class)
+public class TestTajoJdbc extends QueryTestCaseBase {
+  private static InetSocketAddress tajoMasterAddress;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    tajoMasterAddress = testingCluster.getMaster().getTajoMasterClientService().getBindAddress();
+    Class.forName("org.apache.tajo.jdbc.TajoDriver").newInstance();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+  }
+
+  public static String buildConnectionUri(String hostName, int port, String databaseName) {
+    return "jdbc:tajo://" + hostName + ":" + port + "/" + databaseName;
+  }
+
+  @Test
+  public void testAcceptURL() throws SQLException {
+    TajoDriver driver = new TajoDriver();
+    assertTrue(driver.acceptsURL("jdbc:tajo:"));
+    assertFalse(driver.acceptsURL("jdbc:taju:"));
+  }
+
+  @Test(expected = SQLException.class)
+  public void testGetConnection() throws SQLException {
+    DriverManager.getConnection("jdbc:taju://" + tajoMasterAddress.getHostName() + ":" + tajoMasterAddress.getPort()
+      + "/default");
+  }
+
+  @Test
+  public void testStatement() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+
+    Statement stmt = null;
+    ResultSet res = null;
+    try {
+      stmt = conn.createStatement();
+
+      res = stmt.executeQuery("select l_returnflag, l_linestatus, count(*) as count_order from lineitem " +
+        "group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus");
+
+      try {
+        Map<String, Integer> result = Maps.newHashMap();
+        result.put("NO", 3);
+        result.put("RF", 2);
+
+        assertNotNull(res);
+        assertTrue(res.next());
+        assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+        assertTrue(res.next());
+        assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+        assertFalse(res.next());
+
+        ResultSetMetaData rsmd = res.getMetaData();
+        assertEquals(3, rsmd.getColumnCount());
+        assertEquals("l_returnflag", rsmd.getColumnName(1));
+        assertEquals("l_linestatus", rsmd.getColumnName(2));
+        assertEquals("count_order", rsmd.getColumnName(3));
+      } finally {
+        res.close();
+      }
+    } finally {
+      if (res != null) {
+        res.close();
+      }
+      if (stmt != null) {
+        stmt.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public void testPreparedStatement() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+
+    PreparedStatement stmt = null;
+    ResultSet res = null;
+    try {
+      /*
+      test data set
+      1,17.0,N
+      1,36.0,N
+      2,38.0,N
+      3,45.0,R
+      3,49.0,R
+      */
+
+      String sql =
+        "select l_orderkey, l_quantity, l_returnflag from lineitem where l_quantity > ? and l_returnflag = ?";
+
+      stmt = conn.prepareStatement(sql);
+
+      stmt.setInt(1, 20);
+      stmt.setString(2, "N");
+
+      res = stmt.executeQuery();
+
+      ResultSetMetaData rsmd = res.getMetaData();
+      assertEquals(3, rsmd.getColumnCount());
+      assertEquals("l_orderkey", rsmd.getColumnName(1));
+      assertEquals("l_quantity", rsmd.getColumnName(2));
+      assertEquals("l_returnflag", rsmd.getColumnName(3));
+
+      try {
+        int numRows = 0;
+        String[] resultData = {"136.0N", "238.0N"};
+        while (res.next()) {
+          assertEquals(resultData[numRows],
+            ("" + res.getObject(1).toString() + res.getObject(2).toString() + res.getObject(3).toString()));
+          numRows++;
+        }
+        assertEquals(2, numRows);
+      } finally {
+        res.close();
+      }
+
+      stmt.setInt(1, 20);
+      stmt.setString(2, "R");
+
+      res = stmt.executeQuery();
+
+      rsmd = res.getMetaData();
+      assertEquals(3, rsmd.getColumnCount());
+      assertEquals("l_orderkey", rsmd.getColumnName(1));
+      assertEquals("l_quantity", rsmd.getColumnName(2));
+      assertEquals("l_returnflag", rsmd.getColumnName(3));
+
+      try {
+        int numRows = 0;
+        String[] resultData = {"345.0R", "349.0R"};
+        while (res.next()) {
+          assertEquals(resultData[numRows],
+            ("" + res.getObject(1).toString() + res.getObject(2).toString() + res.getObject(3).toString()));
+          numRows++;
+        }
+        assertEquals(2, numRows);
+      } finally {
+        res.close();
+      }
+    } finally {
+      if (res != null) {
+        res.close();
+      }
+      if (stmt != null) {
+        stmt.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public void testDatabaseMetaDataGetTable() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+
+    DatabaseMetaData dbmd = conn.getMetaData();
+
+    ResultSet rs = null;
+
+    try {
+      rs = dbmd.getTables("default", null, null, null);
+
+      ResultSetMetaData rsmd = rs.getMetaData();
+      int numCols = rsmd.getColumnCount();
+      assertEquals(5, numCols);
+
+      Set<String> retrivedViaJavaAPI = new HashSet<String>(client.getTableList("default"));
+
+      Set<String> retrievedViaJDBC = new HashSet<String>();
+      while (rs.next()) {
+        retrievedViaJDBC.add(rs.getString("TABLE_NAME"));
+      }
+      assertEquals(retrievedViaJDBC, retrivedViaJavaAPI);
+    } finally {
+      if (rs != null) {
+        rs.close();
+      }
+    }
+
+    assertTrue(conn.isValid(100));
+    conn.close();
+  }
+
+  @Test
+  public void testDatabaseMetaDataGetColumns() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      TajoConstants.DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+
+    DatabaseMetaData dbmd = conn.getMetaData();
+    ResultSet rs = null;
+
+    try {
+      String tableName = "lineitem";
+      rs = dbmd.getColumns(null, null, tableName, null);
+
+      ResultSetMetaData rsmd = rs.getMetaData();
+      int numCols = rsmd.getColumnCount();
+
+      assertEquals(22, numCols);
+      int numColumns = 0;
+
+      TableDesc tableDesc = client.getTableDesc(CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, tableName));
+      assertNotNull(tableDesc);
+
+      List<Column> columns = tableDesc.getSchema().getRootColumns();
+
+      while (rs.next()) {
+        assertEquals(tableName, rs.getString("TABLE_NAME"));
+        assertEquals(columns.get(numColumns).getSimpleName(), rs.getString("COLUMN_NAME"));
+        // TODO assert type
+        numColumns++;
+      }
+
+      assertEquals(16, numColumns);
+    } finally {
+      if (rs != null) {
+        rs.close();
+      }
+    }
+
+    assertTrue(conn.isValid(100));
+    conn.close();
+    assertFalse(conn.isValid(100));
+  }
+
+  @Test
+  public void testMultipleConnections() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      TajoConstants.DEFAULT_DATABASE_NAME);
+
+    Connection[] conns = new Connection[2];
+    conns[0] = DriverManager.getConnection(connUri);
+    conns[1] = DriverManager.getConnection(connUri);
+
+    try {
+      for (int i = 0; i < conns.length; i++) {
+        Statement stmt = null;
+        ResultSet res = null;
+        try {
+          stmt = conns[i].createStatement();
+
+          res = stmt.executeQuery("select l_returnflag, l_linestatus, count(*) as count_order from lineitem " +
+            "group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus");
+
+          try {
+            Map<String, Integer> result = Maps.newHashMap();
+            result.put("NO", 3);
+            result.put("RF", 2);
+
+            assertNotNull(res);
+            assertTrue(res.next());
+            assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+            assertTrue(res.next());
+            assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+            assertFalse(res.next());
+
+            ResultSetMetaData rsmd = res.getMetaData();
+            assertEquals(3, rsmd.getColumnCount());
+            assertEquals("l_returnflag", rsmd.getColumnName(1));
+            assertEquals("l_linestatus", rsmd.getColumnName(2));
+            assertEquals("count_order", rsmd.getColumnName(3));
+          } finally {
+            res.close();
+          }
+        } finally {
+          if (res != null) {
+            res.close();
+          }
+          if (stmt != null) {
+            stmt.close();
+          }
+        }
+      }
+    } finally {
+      assertTrue(conns[0].isValid(100));
+      conns[0].close();
+      assertFalse(conns[0].isValid(100));
+      assertTrue(conns[1].isValid(100));
+      conns[1].close();
+      assertFalse(conns[1].isValid(100));
+    }
+  }
+
+  @Test
+  public void testMultipleConnectionsSequentialClose() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      DEFAULT_DATABASE_NAME);
+
+    Connection[] conns = new Connection[2];
+    conns[0] = DriverManager.getConnection(connUri);
+    conns[1] = DriverManager.getConnection(connUri);
+
+    try {
+      for (int i = 0; i < conns.length; i++) {
+        Statement stmt = null;
+        ResultSet res = null;
+        try {
+          stmt = conns[i].createStatement();
+
+          res = stmt.executeQuery("select l_returnflag, l_linestatus, count(*) as count_order from lineitem " +
+            "group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus");
+
+          try {
+            Map<String, Integer> result = Maps.newHashMap();
+            result.put("NO", 3);
+            result.put("RF", 2);
+
+            assertNotNull(res);
+            assertTrue(res.next());
+            assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+            assertTrue(res.next());
+            assertTrue(result.get(res.getString(1) + res.getString(2)) == res.getInt(3));
+            assertFalse(res.next());
+
+            ResultSetMetaData rsmd = res.getMetaData();
+            assertEquals(3, rsmd.getColumnCount());
+            assertEquals("l_returnflag", rsmd.getColumnName(1));
+            assertEquals("l_linestatus", rsmd.getColumnName(2));
+            assertEquals("count_order", rsmd.getColumnName(3));
+          } finally {
+            res.close();
+          }
+        } finally {
+          if (res != null) {
+            res.close();
+          }
+          if (stmt != null) {
+            stmt.close();
+          }
+          conns[i].close();
+        }
+      }
+    } finally {
+      if (!conns[0].isClosed()) {
+        assertTrue(conns[0].isValid(100));
+        conns[0].close();
+        assertFalse(conns[0].isValid(100));
+      }
+      if (!conns[1].isClosed()) {
+        assertTrue(conns[1].isValid(100));
+        conns[1].close();
+        assertFalse(conns[1].isValid(100));
+      }
+    }
+  }
+
+  @Test
+  public void testCreateTableWithDateAndTimestamp() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testCreateTableWithDateAndTimestamp");
+
+    int result;
+    Statement stmt = null;
+    ResultSet res = null;
+    Connection conn = null;
+    try {
+      String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        DEFAULT_DATABASE_NAME);
+      conn = DriverManager.getConnection(connUri);
+      assertTrue(conn.isValid(100));
+
+      stmt = conn.createStatement();
+      result = stmt.executeUpdate("create table " + tableName + " (id int, name text, score double"
+        + ", register_date timestamp, update_date date, send_date time)");
+      assertEquals(result, 1);
+
+      res = stmt.executeQuery("select * from " + tableName);
+      assertFalse(res.next());
+
+      ResultSetMetaData rsmd = res.getMetaData();
+      assertNotNull(rsmd);
+      assertEquals(6, rsmd.getColumnCount());
+
+      assertEquals("id", rsmd.getColumnName(1));
+      assertEquals("name", rsmd.getColumnName(2));
+      assertEquals("score", rsmd.getColumnName(3));
+      assertEquals("register_date", rsmd.getColumnName(4));
+      assertEquals("update_date", rsmd.getColumnName(5));
+      assertEquals("send_date", rsmd.getColumnName(6));
+
+      assertEquals("integer", rsmd.getColumnTypeName(1));
+      assertEquals("varchar", rsmd.getColumnTypeName(2));
+      assertEquals("float8", rsmd.getColumnTypeName(3));
+      assertEquals("timestamp", rsmd.getColumnTypeName(4));
+      assertEquals("date", rsmd.getColumnTypeName(5));
+      assertEquals("time", rsmd.getColumnTypeName(6));
+
+    } finally {
+      cleanupQuery(res);
+      if (stmt != null) {
+        stmt.close();
+      }
+
+      if(conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public void testSortWithDateTime() throws Exception {
+    Statement stmt = null;
+    ResultSet res = null;
+    Connection conn = null;
+    int result;
+
+    // skip this test if catalog uses HiveCatalogStore.
+    // It is because HiveCatalogStore does not support Time data type.
+    try {
+      if (!testingCluster.isHiveCatalogStoreRunning()) {
+        executeDDL("create_table_with_date_ddl.sql", "table1");
+
+        String connUri = buildConnectionUri(tajoMasterAddress.getHostName(),
+          tajoMasterAddress.getPort(), "TestTajoJdbc");
+
+        conn = DriverManager.getConnection(connUri);
+        assertTrue(conn.isValid(100));
+
+        stmt = conn.createStatement();
+        res = stmt.executeQuery("select col1, col2, col3 from table1 order by col1, col2, col3");
+
+        ResultSetMetaData rsmd = res.getMetaData();
+        assertNotNull(rsmd);
+        assertEquals(3, rsmd.getColumnCount());
+
+        assertEquals("timestamp", rsmd.getColumnTypeName(1));
+        assertEquals("date", rsmd.getColumnTypeName(2));
+        assertEquals("time", rsmd.getColumnTypeName(3));
+
+        assertResultSet(res);
+
+        result = stmt.executeUpdate("drop table table1");
+        assertEquals(result, 1);
+
+      }
+    } finally {
+      cleanupQuery(res);
+      if (stmt != null) {
+        stmt.close();
+      }
+
+      if(conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+
+  @Test
+  public void testAlterTableAddPartition() throws Exception {
+    Statement stmt = null;
+    ResultSet resultSet = null;
+    int retCode = 0;
+    Connection conn = null;
+    int result;
+    String errorMessage = null;
+
+    // skip this test if catalog uses HiveCatalogStore.
+    // It is because HiveCatalogStore does not support Time data type.
+    try {
+      if (!testingCluster.isHiveCatalogStoreRunning()) {
+        String connUri = buildConnectionUri(tajoMasterAddress.getHostName(),
+          tajoMasterAddress.getPort(), "TestTajoJdbc");
+
+        conn = DriverManager.getConnection(connUri);
+        assertTrue(conn.isValid(100));
+
+        String tableName = CatalogUtil.normalizeIdentifier("testAlterTablePartition");
+        resultSet = executeString(
+          "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
+        resultSet.close();
+
+        stmt = conn.createStatement();
+        result  = stmt.executeUpdate("alter table " + tableName + " add partition (key = 0.1)");
+        assertEquals(result, 1);
+     }
+    } finally {      
+      cleanupQuery(resultSet);
+      if (stmt != null) {
+        stmt.close();
+      }
+
+      if(conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public void testMaxRows() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+      DEFAULT_DATABASE_NAME);
+    Connection conn = DriverManager.getConnection(connUri);
+    assertTrue(conn.isValid(100));
+    Statement stmt = null;
+    ResultSet res = null;
+    //Parameter value setting for test.
+    final int maxRowsNum = 3;
+    int resultRowsNum = 0, returnMaxRows = 0;
+    try {
+      stmt = conn.createStatement();
+      //set maxRows(3)
+      stmt.setMaxRows(maxRowsNum);
+      //get MaxRows
+      returnMaxRows = stmt.getMaxRows();
+      res = stmt.executeQuery("select * from lineitem");
+      assertNotNull(res);
+      while (res.next()) {
+        //Actuality result Rows.
+        resultRowsNum++;	
+      }
+      //The test success, if maxRowsNum and resultRowsNum and returnMaxRows is same.
+      assertTrue(maxRowsNum == resultRowsNum && maxRowsNum == returnMaxRows);
+    } finally {
+      if (res != null) {
+        cleanupQuery(res);
+      }
+      if (stmt != null) {
+        stmt.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  @Test
+  public final void testCancel() throws Exception {
+    String connUri = buildConnectionUri(tajoMasterAddress.getHostName(), tajoMasterAddress.getPort(),
+        DEFAULT_DATABASE_NAME);
+    Properties props = new Properties();
+    props.setProperty(SessionVars.BLOCK_ON_RESULT.keyname(), "false");
+
+    Connection conn = new JdbcConnection(connUri, props);
+    PreparedStatement statement = conn.prepareStatement("select sleep(1) from lineitem");
+    try {
+      assertTrue("should have result set", statement.execute());
+      TajoResultSetBase result = (TajoResultSetBase) statement.getResultSet();
+      Thread.sleep(1000);   // todo query master is not killed properly if it's compiling the query (use 100, if you want see)
+      statement.cancel();
+
+      QueryStatus status = client.getQueryStatus(result.getQueryId());
+      assertEquals(TajoProtos.QueryState.QUERY_KILLED, status.getState());
+    } finally {
+      if (statement != null) {
+        statement.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+}


Mime
View raw message