From hcatalog-commits-return-768-apmail-incubator-hcatalog-commits-archive=incubator.apache.org@incubator.apache.org Thu Mar 22 00:08:50 2012 Return-Path: X-Original-To: apmail-incubator-hcatalog-commits-archive@minotaur.apache.org Delivered-To: apmail-incubator-hcatalog-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E8DAB99F4 for ; Thu, 22 Mar 2012 00:08:50 +0000 (UTC) Received: (qmail 44010 invoked by uid 500); 22 Mar 2012 00:08:50 -0000 Delivered-To: apmail-incubator-hcatalog-commits-archive@incubator.apache.org Received: (qmail 43965 invoked by uid 500); 22 Mar 2012 00:08:50 -0000 Mailing-List: contact hcatalog-commits-help@incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hcatalog-dev@incubator.apache.org Delivered-To: mailing list hcatalog-commits@incubator.apache.org Received: (qmail 43958 invoked by uid 99); 22 Mar 2012 00:08:50 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 22 Mar 2012 00:08:50 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 22 Mar 2012 00:08:41 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id DBE102388847; Thu, 22 Mar 2012 00:08:18 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1303627 - in /incubator/hcatalog/trunk: ./ src/test/org/apache/hcatalog/ src/test/org/apache/hcatalog/mapreduce/ src/test/org/apache/hcatalog/pig/ Date: Thu, 22 Mar 2012 00:08:18 -0000 To: hcatalog-commits@incubator.apache.org From: hashutosh@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120322000818.DBE102388847@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: hashutosh Date: Thu Mar 22 00:08:18 2012 New Revision: 1303627 URL: http://svn.apache.org/viewvc?rev=1303627&view=rev Log: HCATALOG-329 : HCatalog build fails with pig 0.9 (traviscrawford via hashutosh) Modified: incubator/hcatalog/trunk/CHANGES.txt incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatLoader.java incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorer.java incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorerMulti.java Modified: incubator/hcatalog/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/CHANGES.txt?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/CHANGES.txt (original) +++ incubator/hcatalog/trunk/CHANGES.txt Thu Mar 22 00:08:18 2012 @@ -26,6 +26,8 @@ Trunk (unreleased changes) HCAT-287 Add data api to HCatalog (hashutosh) IMPROVEMENTS + HCAT-329 HCatalog build fails with pig 0.9 (traviscrawford via hashutosh) + HCAT-233 gitignore file (enis via gates) OPTIMIZATIONS Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java Thu Mar 22 00:08:18 2012 @@ -18,8 +18,12 @@ package org.apache.hcatalog; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -28,11 +32,14 @@ import org.apache.hadoop.hive.metastore. import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hcatalog.data.Pair; +import org.apache.pig.PigServer; /** * Utility methods for tests */ public class HcatTestUtils { + private static final Log LOG = LogFactory.getLog(HcatTestUtils.class); public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx--- @@ -74,5 +81,21 @@ public class HcatTestUtils { hive.dropTable("default", tablename, true, true); } } - + + public static void createTestDataFile(String filename, String[] lines) throws IOException { + FileWriter writer = null; + try { + File file = new File(filename); + file.deleteOnExit(); + writer = new FileWriter(file); + for (String line : lines) { + writer.write(line + "\n"); + } + } finally { + if (writer != null) { + writer.close(); + } + } + + } } Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java Thu Mar 22 00:08:18 2012 @@ -18,13 +18,15 @@ package org.apache.hcatalog.mapreduce; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; -import java.util.Properties; import junit.framework.TestCase; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -32,28 +34,29 @@ import org.apache.hadoop.hive.metastore. import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hcatalog.MiniCluster; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.pig.HCatLoader; import org.apache.pig.ExecType; import org.apache.pig.PigServer; import org.apache.pig.data.Tuple; -import org.apache.pig.impl.util.UDFContext; - public class TestHCatHiveCompatibility extends TestCase { + private static final String TEST_DATA_DIR = System.getProperty("user.dir") + + "/build/test/data/" + TestHCatHiveCompatibility.class.getCanonicalName(); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; - MiniCluster cluster = MiniCluster.buildCluster(); private Driver driver; - Properties props; - private HiveMetaStoreClient client; - String fileName = "/tmp/input.data"; - String fullFileName; - @Override protected void setUp() throws Exception { + File f = new File(TEST_WAREHOUSE_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + + new File(TEST_WAREHOUSE_DIR).mkdirs(); HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); @@ -62,22 +65,15 @@ public class TestHCatHiveCompatibility e driver = new Driver(hiveConf); client = new HiveMetaStoreClient(hiveConf); SessionState.start(new CliSessionState(hiveConf)); - props = new Properties(); - props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name")); - fullFileName = cluster.getProperties().getProperty("fs.default.name") + fileName; - MiniCluster.deleteFile(cluster, fileName); int LOOP_SIZE = 11; - String[] input = new String[LOOP_SIZE]; + File file = new File(INPUT_FILE_NAME); + file.deleteOnExit(); + FileWriter writer = new FileWriter(file); for(int i = 0; i < LOOP_SIZE; i++) { - input[i] = i + "\t1"; + writer.write(i + "\t1\n"); } - MiniCluster.createInputFile(cluster, fileName, input); - } - - @Override - protected void tearDown() throws Exception { - MiniCluster.deleteFile(cluster, fileName); + writer.close(); } public void testUnpartedReadWrite() throws Exception{ @@ -93,9 +89,8 @@ public class TestHCatHiveCompatibility e Table table = client.getTable("default", "junit_unparted_noisd"); assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS)); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int);"); + PigServer server = new PigServer(ExecType.LOCAL); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int);"); server.registerQuery("store A into 'default.junit_unparted_noisd' using org.apache.hcatalog.pig.HCatStorer();"); server.registerQuery("B = load 'default.junit_unparted_noisd' using "+HCatLoader.class.getName()+"();"); Iterator itr= server.openIterator("B"); @@ -133,9 +128,8 @@ public class TestHCatHiveCompatibility e assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS)); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int);"); + PigServer server = new PigServer(ExecType.LOCAL); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int);"); server.registerQuery("store A into 'default.junit_parted_noisd' using org.apache.hcatalog.pig.HCatStorer('b=42');"); server.registerQuery("B = load 'default.junit_parted_noisd' using "+HCatLoader.class.getName()+"();"); Iterator itr= server.openIterator("B"); Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java Thu Mar 22 00:08:18 2012 @@ -21,10 +21,10 @@ package org.apache.hcatalog.mapreduce; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; -import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.cli.CliSessionState; @@ -38,7 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; -import org.apache.hcatalog.MiniCluster; +import org.apache.hcatalog.HcatTestUtils; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.common.HCatException; import org.apache.hcatalog.common.HCatUtil; @@ -48,17 +48,16 @@ import org.apache.hcatalog.data.schema.H import org.apache.pig.ExecType; import org.apache.pig.PigServer; import org.apache.pig.data.Tuple; -import org.apache.pig.impl.util.UDFContext; import org.junit.Test; public class TestSequenceFileReadWrite { + private static final String TEST_DATA_DIR = System.getProperty("user.dir") + + "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName(); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; - private static MiniCluster cluster = MiniCluster.buildCluster(); private static Driver driver; - private static Properties props; private static PigServer server; - private static final String basicFile = "/tmp/basic.input.data"; - private static String fullFileNameBasic; private static String[] input; private static HiveConf hiveConf; @@ -66,16 +65,12 @@ public class TestSequenceFileReadWrite { hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); + hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); driver = new Driver(hiveConf); SessionState.start(new CliSessionState(hiveConf)); - props = new Properties(); - props.setProperty("fs.default.name", cluster.getProperties() - .getProperty("fs.default.name")); - fullFileNameBasic = cluster.getProperties().getProperty( - "fs.default.name") - + basicFile; + + new File(TEST_WAREHOUSE_DIR).mkdirs(); int numRows = 3; input = new String[numRows]; @@ -84,9 +79,8 @@ public class TestSequenceFileReadWrite { String col2 = "b" + i; input[i] = i + "," + col1 + "," + col2; } - MiniCluster.deleteFile(cluster, basicFile); - MiniCluster.createInputFile(cluster, basicFile, input); - server = new PigServer(ExecType.LOCAL, props); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + server = new PigServer(ExecType.LOCAL); } @Test @@ -97,10 +91,9 @@ public class TestSequenceFileReadWrite { int retCode1 = driver.run(createTable).getResponseCode(); assertTrue(retCode1 == 0); - UDFContext.getUDFContext().setClientSystemProps(); server.setBatchOn(); server.registerQuery("A = load '" - + fullFileNameBasic + + INPUT_FILE_NAME + "' using PigStorage(',') as (a0:int,a1:chararray,a2:chararray);"); server.registerQuery("store A into 'demo_table' using org.apache.hcatalog.pig.HCatStorer();"); server.executeBatch(); @@ -127,10 +120,9 @@ public class TestSequenceFileReadWrite { int retCode1 = driver.run(createTable).getResponseCode(); assertTrue(retCode1 == 0); - UDFContext.getUDFContext().setClientSystemProps(); server.setBatchOn(); server.registerQuery("A = load '" - + fullFileNameBasic + + INPUT_FILE_NAME + "' using PigStorage(',') as (a0:int,a1:chararray,a2:chararray);"); server.registerQuery("store A into 'demo_table_1' using org.apache.hcatalog.pig.HCatStorer();"); server.executeBatch(); @@ -167,7 +159,7 @@ public class TestSequenceFileReadWrite { job.setOutputKeyClass(NullWritable.class); job.setOutputValueClass(DefaultHCatRecord.class); job.setInputFormatClass(TextInputFormat.class); - TextInputFormat.setInputPaths(job, this.fullFileNameBasic); + TextInputFormat.setInputPaths(job, INPUT_FILE_NAME); HCatOutputFormat.setOutput(job, OutputJobInfo.create( MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_2", null)); @@ -178,7 +170,6 @@ public class TestSequenceFileReadWrite { new FileOutputCommitterContainer(job, null).cleanupJob(job); assertTrue(job.isSuccessful()); - UDFContext.getUDFContext().setClientSystemProps(); server.setBatchOn(); server.registerQuery("C = load 'default.demo_table_2' using org.apache.hcatalog.pig.HCatLoader();"); server.executeBatch(); @@ -213,7 +204,7 @@ public class TestSequenceFileReadWrite { job.setOutputKeyClass(NullWritable.class); job.setOutputValueClass(DefaultHCatRecord.class); job.setInputFormatClass(TextInputFormat.class); - TextInputFormat.setInputPaths(job, this.fullFileNameBasic); + TextInputFormat.setInputPaths(job, INPUT_FILE_NAME); HCatOutputFormat.setOutput(job, OutputJobInfo.create( MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_3", null)); @@ -223,7 +214,6 @@ public class TestSequenceFileReadWrite { new FileOutputCommitterContainer(job, null).cleanupJob(job); assertTrue(job.isSuccessful()); - UDFContext.getUDFContext().setClientSystemProps(); server.setBatchOn(); server.registerQuery("D = load 'default.demo_table_3' using org.apache.hcatalog.pig.HCatLoader();"); server.executeBatch(); Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatLoader.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatLoader.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatLoader.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatLoader.java Thu Mar 22 00:08:18 2012 @@ -17,6 +17,7 @@ */ package org.apache.hcatalog.pig; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -24,16 +25,16 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Properties; import junit.framework.TestCase; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hcatalog.MiniCluster; +import org.apache.hcatalog.HcatTestUtils; import org.apache.hcatalog.data.Pair; import org.apache.pig.ExecType; import org.apache.pig.PigServer; @@ -41,21 +42,18 @@ import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; -import org.apache.pig.impl.util.UDFContext; public class TestHCatLoader extends TestCase { + private static final String TEST_DATA_DIR = System.getProperty("user.dir") + + "/build/test/data/" + TestHCatLoader.class.getCanonicalName(); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String BASIC_FILE_NAME = TEST_DATA_DIR + "/basic.input.data"; + private static final String COMPLEX_FILE_NAME = TEST_DATA_DIR + "/complex.input.data"; private static final String BASIC_TABLE = "junit_unparted_basic"; private static final String COMPLEX_TABLE = "junit_unparted_complex"; private static final String PARTITIONED_TABLE = "junit_parted_basic"; - private static MiniCluster cluster = MiniCluster.buildCluster(); private static Driver driver; - private static Properties props; - - private static final String basicFile = "/tmp/basic.input.data"; - private static final String complexFile = "/tmp/complex.input.data"; - private static String fullFileNameBasic; - private static String fullFileNameComplex; private static int guardTestCount = 5; // ugh, instantiate using introspection in guardedSetupBeforeClass private static boolean setupHasRun = false; @@ -90,16 +88,19 @@ public class TestHCatLoader extends Test return; } + File f = new File(TEST_WAREHOUSE_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + new File(TEST_WAREHOUSE_DIR).mkdirs(); + HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); driver = new Driver(hiveConf); SessionState.start(new CliSessionState(hiveConf)); - props = new Properties(); - props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name")); - fullFileNameBasic = cluster.getProperties().getProperty("fs.default.name") + basicFile; - fullFileNameComplex = cluster.getProperties().getProperty("fs.default.name") + complexFile; cleanup(); @@ -127,19 +128,17 @@ public class TestHCatLoader extends Test k++; } } - MiniCluster.createInputFile(cluster, basicFile, input); - - MiniCluster.createInputFile(cluster, complexFile, + HcatTestUtils.createTestDataFile(BASIC_FILE_NAME, input); + HcatTestUtils.createTestDataFile(COMPLEX_FILE_NAME, new String[]{ - //"Henry Jekyll\t42\t(415-253-6367,hjekyll@contemporary.edu.uk)\t{(PHARMACOLOGY),(PSYCHIATRY)},[PHARMACOLOGY#A-,PSYCHIATRY#B+],{(415-253-6367,cell),(408-253-6367,landline)}", - //"Edward Hyde\t1337\t(415-253-6367,anonymous@b44chan.org)\t{(CREATIVE_WRITING),(COPYRIGHT_LAW)},[CREATIVE_WRITING#A+,COPYRIGHT_LAW#D],{(415-253-6367,cell),(408-253-6367,landline)}", + //"Henry Jekyll\t42\t(415-253-6367,hjekyll@contemporary.edu.uk)\t{(PHARMACOLOGY),(PSYCHIATRY)},[PHARMACOLOGY#A-,PSYCHIATRY#B+],{(415-253-6367,cell),(408-253-6367,landline)}", + //"Edward Hyde\t1337\t(415-253-6367,anonymous@b44chan.org)\t{(CREATIVE_WRITING),(COPYRIGHT_LAW)},[CREATIVE_WRITING#A+,COPYRIGHT_LAW#D],{(415-253-6367,cell),(408-253-6367,landline)}", } ); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+fullFileNameBasic+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+BASIC_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();"); server.registerQuery("B = foreach A generate a,b;"); @@ -150,14 +149,12 @@ public class TestHCatLoader extends Test server.registerQuery("C2 = filter C by a >= 2;"); server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=1');"); - server.registerQuery("D = load '"+fullFileNameComplex+"' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});"); + server.registerQuery("D = load '"+COMPLEX_FILE_NAME+"' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});"); server.registerQuery("store D into '"+COMPLEX_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();"); server.executeBatch(); } private void cleanup() throws IOException, CommandNeedRetryException { - MiniCluster.deleteFile(cluster, basicFile); - MiniCluster.deleteFile(cluster, complexFile); dropTable(BASIC_TABLE); dropTable(COMPLEX_TABLE); dropTable(PARTITIONED_TABLE); @@ -183,7 +180,7 @@ public class TestHCatLoader extends Test public void testSchemaLoadBasic() throws IOException{ - PigServer server = new PigServer(ExecType.LOCAL, props); + PigServer server = new PigServer(ExecType.LOCAL); // test that schema was loaded correctly server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();"); @@ -198,7 +195,7 @@ public class TestHCatLoader extends Test } public void testReadDataBasic() throws IOException { - PigServer server = new PigServer(ExecType.LOCAL, props); + PigServer server = new PigServer(ExecType.LOCAL); server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();"); Iterator XIter = server.openIterator("X"); @@ -217,22 +214,22 @@ public class TestHCatLoader extends Test public void testSchemaLoadComplex() throws IOException{ - PigServer server = new PigServer(ExecType.LOCAL, props); + PigServer server = new PigServer(ExecType.LOCAL); // test that schema was loaded correctly server.registerQuery("K = load '"+COMPLEX_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();"); Schema dumpedKSchema = server.dumpSchema("K"); List Kfields = dumpedKSchema.getFields(); - assertEquals(6,Kfields.size()); + assertEquals(6, Kfields.size()); assertEquals(DataType.CHARARRAY,Kfields.get(0).type); assertEquals("name",Kfields.get(0).alias.toLowerCase()); assertEquals( DataType.INTEGER,Kfields.get(1).type); - assertEquals("studentid",Kfields.get(1).alias.toLowerCase()); + assertEquals("studentid", Kfields.get(1).alias.toLowerCase()); - assertEquals(DataType.TUPLE,Kfields.get(2).type); - assertEquals("contact",Kfields.get(2).alias.toLowerCase()); + assertEquals(DataType.TUPLE, Kfields.get(2).type); + assertEquals("contact", Kfields.get(2).alias.toLowerCase()); { assertNotNull(Kfields.get(2).schema); assertTrue(Kfields.get(2).schema.getFields().size() == 2); @@ -241,8 +238,8 @@ public class TestHCatLoader extends Test assertTrue(Kfields.get(2).schema.getFields().get(1).type == DataType.CHARARRAY); assertTrue(Kfields.get(2).schema.getFields().get(1).alias.equalsIgnoreCase("email")); } - assertEquals(DataType.BAG,Kfields.get(3).type); - assertEquals("currently_registered_courses",Kfields.get(3).alias.toLowerCase()); + assertEquals(DataType.BAG, Kfields.get(3).type); + assertEquals("currently_registered_courses", Kfields.get(3).alias.toLowerCase()); { assertNotNull(Kfields.get(3).schema); assertEquals(1,Kfields.get(3).schema.getFields().size()); @@ -257,7 +254,7 @@ public class TestHCatLoader extends Test assertEquals(DataType.MAP,Kfields.get(4).type); assertEquals("current_grades",Kfields.get(4).alias.toLowerCase()); assertEquals(DataType.BAG,Kfields.get(5).type); - assertEquals("phnos",Kfields.get(5).alias.toLowerCase()); + assertEquals("phnos", Kfields.get(5).alias.toLowerCase()); { assertNotNull(Kfields.get(5).schema); assertEquals(1,Kfields.get(5).schema.getFields().size()); @@ -273,12 +270,12 @@ public class TestHCatLoader extends Test } public void testReadPartitionedBasic() throws IOException, CommandNeedRetryException { - PigServer server = new PigServer(ExecType.LOCAL, props); + PigServer server = new PigServer(ExecType.LOCAL); driver.run("select * from "+PARTITIONED_TABLE); ArrayList valuesReadFromHiveDriver = new ArrayList(); driver.getResults(valuesReadFromHiveDriver); - assertEquals(basicInputData.size(),valuesReadFromHiveDriver.size()); + assertEquals(basicInputData.size(), valuesReadFromHiveDriver.size()); server.registerQuery("W = load '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();"); Schema dumpedWSchema = server.dumpSchema("W"); @@ -337,7 +334,7 @@ public class TestHCatLoader extends Test public void testProjectionsBasic() throws IOException { - PigServer server = new PigServer(ExecType.LOCAL, props); + PigServer server = new PigServer(ExecType.LOCAL); // projections are handled by using generate, not "as" on the Load Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorer.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorer.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorer.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorer.java Thu Mar 22 00:08:18 2012 @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; -import java.util.Properties; import junit.framework.TestCase; @@ -30,10 +29,7 @@ import org.apache.hadoop.hive.conf.HiveC import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hcatalog.MiniCluster; -import org.apache.hcatalog.common.HCatConstants; -import org.apache.hcatalog.rcfile.RCFileInputDriver; -import org.apache.hcatalog.rcfile.RCFileOutputDriver; +import org.apache.hcatalog.HcatTestUtils; import org.apache.pig.ExecType; import org.apache.pig.PigException; import org.apache.pig.PigServer; @@ -41,32 +37,28 @@ import org.apache.pig.data.DataByteArray import org.apache.pig.data.Tuple; import org.apache.pig.impl.logicalLayer.FrontendException; import org.apache.pig.impl.util.LogUtils; -import org.apache.pig.impl.util.UDFContext; public class TestHCatStorer extends TestCase { + private static final String TEST_DATA_DIR = System.getProperty("user.dir") + + "/build/test/data/" + TestHCatStorer.class.getCanonicalName(); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; - MiniCluster cluster = MiniCluster.buildCluster(); private Driver driver; - Properties props; @Override protected void setUp() throws Exception { - - HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - driver = new Driver(hiveConf); - SessionState.start(new CliSessionState(hiveConf)); - props = new Properties(); - props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name")); - fullFileName = cluster.getProperties().getProperty("fs.default.name") + fileName; + if (driver == null) { + HiveConf hiveConf = new HiveConf(this.getClass()); + hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + driver = new Driver(hiveConf); + SessionState.start(new CliSessionState(hiveConf)); + } } - String fileName = "/tmp/input.data"; - String fullFileName; - - // public void testStoreFuncMap() throws IOException{ // // driver.run("drop table junit_unparted"); @@ -81,8 +73,7 @@ public class TestHCatStorer extends Test // MiniCluster.deleteFile(cluster, fileName); // MiniCluster.createInputFile(cluster, fileName, new String[]{"test\t{([a#haddop,b#pig])}","data\t{([b#hive,a#hcat])}"}); // -// PigServer server = new PigServer(ExecType.LOCAL, props); -// UDFContext.getUDFContext().setClientSystemProps(); +// PigServer server = new PigServer(ExecType.LOCAL); // server.setBatchOn(); // server.registerQuery("A = load '"+ fullFileName +"' as (b:chararray,arr_of_maps:bag{mytup:tuple ( mymap:map[ ])});"); // server.registerQuery("store A into 'default.junit_unparted' using org.apache.hadoop.hive.hCatalog.pig.HCatStorer('','b:chararray,arr_of_maps:bag{mytup:tuple ( mymap:map[ ])}');"); @@ -111,16 +102,14 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); int LOOP_SIZE = 11; String[] input = new String[LOOP_SIZE]; for(int i = 0; i < LOOP_SIZE; i++) { input[i] = i + "\t1"; } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int, b:chararray);"); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('b=1');"); server.registerQuery("B = load 'default.junit_unparted' using "+HCatLoader.class.getName()+"();"); Iterator itr= server.openIterator("B"); @@ -137,7 +126,6 @@ public class TestHCatStorer extends Test assertFalse(itr.hasNext()); assertEquals(11, i); - MiniCluster.deleteFile(cluster, fileName); } public void testMultiPartColsInData() throws IOException, CommandNeedRetryException{ @@ -151,17 +139,15 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fullFileName); String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN", "111238\tKalpana\t01/01/2000\tF\tIN\tKA", "111239\tSatya\t01/01/2001\tM\tIN\tKL", "111240\tKavya\t01/01/2002\tF\tIN\tAP"}; - MiniCluster.createInputFile(cluster, fullFileName, inputData); - PigServer pig = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); + PigServer pig = new PigServer(ExecType.LOCAL); pig.setBatchOn(); - pig.registerQuery("A = LOAD '"+fullFileName+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + + pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + "emp_gender:chararray,emp_country:chararray,emp_state:chararray);"); pig.registerQuery("TN = FILTER A BY emp_state == 'TN';"); pig.registerQuery("KA = FILTER A BY emp_state == 'KA';"); @@ -181,7 +167,6 @@ public class TestHCatStorer extends Test assertEquals(inputData[1], results.get(1)); assertEquals(inputData[2], results.get(2)); assertEquals(inputData[3], results.get(3)); - MiniCluster.deleteFile(cluster, fullFileName); driver.run("drop table employee"); } @@ -193,16 +178,14 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); int LOOP_SIZE = 11; String[] input = new String[LOOP_SIZE]; for(int i = 0; i < LOOP_SIZE; i++) { input[i] = i+""; } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int);"); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int);"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('b=1');"); server.registerQuery("B = load 'default.junit_unparted' using "+HCatLoader.class.getName()+"();"); Iterator itr= server.openIterator("B"); @@ -219,7 +202,6 @@ public class TestHCatStorer extends Test assertFalse(itr.hasNext()); assertEquals(11, i); - MiniCluster.deleteFile(cluster, fileName); } public void testNoAlias() throws IOException, CommandNeedRetryException{ @@ -229,12 +211,11 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer server = new PigServer(ExecType.LOCAL); boolean errCaught = false; try{ server.setBatchOn(); - server.registerQuery("A = load '"+ fullFileName +"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);"); server.registerQuery("B = foreach A generate a+10, b;"); server.registerQuery("store B into 'junit_parted' using "+HCatStorer.class.getName()+"('ds=20100101');"); server.executeBatch(); @@ -250,7 +231,7 @@ public class TestHCatStorer extends Test errCaught = false; try{ server.setBatchOn(); - server.registerQuery("A = load '"+ fullFileName +"' as (a:int, B:chararray);"); + server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, B:chararray);"); server.registerQuery("B = foreach A generate a, B;"); server.registerQuery("store B into 'junit_parted' using "+HCatStorer.class.getName()+"('ds=20100101');"); server.executeBatch(); @@ -281,7 +262,6 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; int k = 0; @@ -291,17 +271,15 @@ public class TestHCatStorer extends Test input[k++] = si + "\t"+j; } } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+ fullFileName +"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);"); server.registerQuery("B = filter A by a < 2;"); server.registerQuery("store B into 'junit_unparted' using "+HCatStorer.class.getName()+"();"); server.registerQuery("C = filter A by a >= 2;"); server.registerQuery("store C into 'junit_unparted2' using "+HCatStorer.class.getName()+"();"); server.executeBatch(); - MiniCluster.deleteFile(cluster, fileName); driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); @@ -331,7 +309,7 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); + int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; int k = 0; @@ -341,14 +319,12 @@ public class TestHCatStorer extends Test input[k++] = si + "\t"+j; } } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+ fullFileName +"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('');"); server.executeBatch(); - MiniCluster.deleteFile(cluster, fileName); driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); @@ -371,7 +347,7 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); + int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; int k = 0; @@ -381,14 +357,12 @@ public class TestHCatStorer extends Test input[k++] = si + "\t"+j; } } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+ fullFileName +"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);"); server.registerQuery("store A into 'junit_unparted' using "+HCatStorer.class.getName()+"();"); server.executeBatch(); - MiniCluster.deleteFile(cluster, fileName); driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); @@ -411,7 +385,7 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); + int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; int k = 0; @@ -421,15 +395,13 @@ public class TestHCatStorer extends Test input[k++] = si + "\t"+j; } } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("B = filter A by a > 100;"); server.registerQuery("store B into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int,b:chararray');"); server.executeBatch(); - MiniCluster.deleteFile(cluster, fileName); driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); @@ -449,22 +421,18 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); - MiniCluster.createInputFile(cluster, fileName, new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(hcat)}", - "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(hcat)}"}); + String[] inputData = new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(hcat)}", + "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(hcat)}"}; - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); + + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+fullFileName+"' as (b:chararray, a:tuple(a1:int), arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)});"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (b:chararray, a:tuple(a1:int), arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)});"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','b:chararray, a:tuple(a1:int)," + " arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)}');"); server.executeBatch(); - - - MiniCluster.deleteFile(cluster, fileName); - driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); driver.getResults(res); @@ -484,18 +452,17 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); + int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; for(int i = 0; i < LOOP_SIZE*LOOP_SIZE; i++) { input[i] = i + "\t" + i * 2.1f +"\t"+ i*1.1d + "\t" + i * 2L +"\t"+"lets hcat"+"\tbinary-data"; } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int, b:float, c:double, d:long, e:chararray, f:bytearray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:float, c:double, d:long, e:chararray, f:bytearray);"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int, b:float, c:double, d:long, e:chararray,f:bytearray');"); server.executeBatch(); @@ -519,14 +486,12 @@ public class TestHCatStorer extends Test count++; } assertEquals(LOOP_SIZE * LOOP_SIZE, count); - MiniCluster.deleteFile(cluster, fileName); driver.run("drop table junit_unparted"); } @Override protected void tearDown() throws Exception { super.tearDown(); - MiniCluster.deleteFile(cluster, fileName); } @@ -540,24 +505,22 @@ public class TestHCatStorer extends Test if(retCode != 0) { throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fileName); + int LOOP_SIZE = 3; - String[] input = new String[LOOP_SIZE*LOOP_SIZE]; + String[] inputData = new String[LOOP_SIZE*LOOP_SIZE]; int k = 0; for(int i = 1; i <= LOOP_SIZE; i++) { String si = i + ""; for(int j=1;j<=LOOP_SIZE;j++) { - input[k++] = si + "\t"+j; + inputData[k++] = si + "\t"+j; } } - MiniCluster.createInputFile(cluster, fileName, input); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+fullFileName+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int,b:chararray');"); server.executeBatch(); - MiniCluster.deleteFile(cluster, fileName); driver.run("select * from junit_unparted"); ArrayList res = new ArrayList(); @@ -586,17 +549,15 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fullFileName); String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN", "111238\tKalpana\t01/01/2000\tF\tIN\tKA", "111239\tSatya\t01/01/2001\tM\tIN\tKL", "111240\tKavya\t01/01/2002\tF\tIN\tAP"}; - MiniCluster.createInputFile(cluster, fullFileName, inputData); - PigServer pig = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); + PigServer pig = new PigServer(ExecType.LOCAL); pig.setBatchOn(); - pig.registerQuery("A = LOAD '"+fullFileName+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + + pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + "emp_gender:chararray,emp_country:chararray,emp_state:chararray);"); pig.registerQuery("IN = FILTER A BY emp_country == 'IN';"); pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN');"); @@ -610,7 +571,6 @@ public class TestHCatStorer extends Test assertEquals(inputData[1], results.get(1)); assertEquals(inputData[2], results.get(2)); assertEquals(inputData[3], results.get(3)); - MiniCluster.deleteFile(cluster, fullFileName); driver.run("drop table employee"); } @@ -625,17 +585,15 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fullFileName); String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN", "111238\tKalpana\t01/01/2000\tF\tIN\tKA", "111239\tSatya\t01/01/2001\tM\tIN\tKL", "111240\tKavya\t01/01/2002\tF\tIN\tAP"}; - MiniCluster.createInputFile(cluster, fullFileName, inputData); - PigServer pig = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); + PigServer pig = new PigServer(ExecType.LOCAL); pig.setBatchOn(); - pig.registerQuery("A = LOAD '"+fullFileName+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + + pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + "emp_gender:chararray,emp_country:chararray,emp_state:chararray);"); pig.registerQuery("IN = FILTER A BY emp_country == 'IN';"); pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"();"); @@ -649,7 +607,6 @@ public class TestHCatStorer extends Test assertEquals(inputData[1], results.get(1)); assertEquals(inputData[2], results.get(2)); assertEquals(inputData[3], results.get(3)); - MiniCluster.deleteFile(cluster, fullFileName); driver.run("drop table employee"); } @@ -664,14 +621,12 @@ public class TestHCatStorer extends Test throw new IOException("Failed to create table."); } - MiniCluster.deleteFile(cluster, fullFileName); String[] inputData = {}; + HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData); - MiniCluster.createInputFile(cluster, fullFileName, inputData); - PigServer pig = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer pig = new PigServer(ExecType.LOCAL); pig.setBatchOn(); - pig.registerQuery("A = LOAD '"+fullFileName+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + + pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," + "emp_gender:chararray,emp_country:chararray,emp_state:chararray);"); pig.registerQuery("IN = FILTER A BY emp_country == 'IN';"); pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"();"); @@ -680,9 +635,6 @@ public class TestHCatStorer extends Test ArrayList results = new ArrayList(); driver.getResults(results); assertEquals(0, results.size()); - MiniCluster.deleteFile(cluster, fullFileName); driver.run("drop table employee"); } - - } Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorerMulti.java URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorerMulti.java?rev=1303627&r1=1303626&r2=1303627&view=diff ============================================================================== --- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorerMulti.java (original) +++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/pig/TestHCatStorerMulti.java Thu Mar 22 00:08:18 2012 @@ -17,36 +17,35 @@ */ package org.apache.hcatalog.pig; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; -import java.util.Properties; import junit.framework.TestCase; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hcatalog.MiniCluster; import org.apache.hcatalog.data.Pair; import org.apache.pig.ExecType; import org.apache.pig.PigServer; -import org.apache.pig.impl.util.UDFContext; public class TestHCatStorerMulti extends TestCase { + private static final String TEST_DATA_DIR = System.getProperty("user.dir") + + "/build/test/data/" + TestHCatStorerMulti.class.getCanonicalName(); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; private static final String BASIC_TABLE = "junit_unparted_basic"; private static final String PARTITIONED_TABLE = "junit_parted_basic"; - private static MiniCluster cluster = MiniCluster.buildCluster(); private static Driver driver; - private static final String basicFile = "/tmp/basic.input.data"; - private static String basicFileFullName; - private static Properties props; - private static Map> basicInputData; private void dropTable(String tablename) throws IOException, CommandNeedRetryException{ @@ -77,14 +76,11 @@ public class TestHCatStorerMulti extends hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); driver = new Driver(hiveConf); SessionState.start(new CliSessionState(hiveConf)); } - props = new Properties(); - props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name")); - basicFileFullName = cluster.getProperties().getProperty("fs.default.name") + basicFile; - cleanup(); } @@ -100,10 +96,9 @@ public class TestHCatStorerMulti extends populateBasicFile(); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();"); server.executeBatch(); @@ -119,10 +114,9 @@ public class TestHCatStorerMulti extends populateBasicFile(); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("B2 = filter A by a < 2;"); server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=0');"); @@ -145,10 +139,9 @@ public class TestHCatStorerMulti extends populateBasicFile(); - PigServer server = new PigServer(ExecType.LOCAL, props); - UDFContext.getUDFContext().setClientSystemProps(); + PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);"); + server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);"); server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();"); server.registerQuery("B2 = filter A by a < 2;"); @@ -173,20 +166,29 @@ public class TestHCatStorerMulti extends String[] input = new String[LOOP_SIZE*LOOP_SIZE]; basicInputData = new HashMap>(); int k = 0; + File file = new File(INPUT_FILE_NAME); + file.deleteOnExit(); + FileWriter writer = new FileWriter(file); for(int i = 1; i <= LOOP_SIZE; i++) { String si = i + ""; for(int j=1;j<=LOOP_SIZE;j++) { String sj = "S"+j+"S"; input[k] = si + "\t" + sj; basicInputData.put(k, new Pair(i,sj)); + writer.write(input[k] + "\n"); k++; } } - MiniCluster.createInputFile(cluster, basicFile, input); + writer.close(); } private void cleanup() throws IOException, CommandNeedRetryException { - MiniCluster.deleteFile(cluster, basicFile); + File f = new File(TEST_WAREHOUSE_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + new File(TEST_WAREHOUSE_DIR).mkdirs(); + dropTable(BASIC_TABLE); dropTable(PARTITIONED_TABLE); }