hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From pras...@apache.org
Subject svn commit: r771017 - in /hadoop/hive/trunk: ./ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/conf/ metastore/src/java/org/apache/hadoop/hive/metastore/ ql/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache...
Date Sun, 03 May 2009 02:38:00 GMT
Author: prasadc
Date: Sun May  3 02:37:59 2009
New Revision: 771017

URL: http://svn.apache.org/viewvc?rev=771017&view=rev
Log:
HIVE-453 Use FileSystem derived from user supplied path when present instead of default

Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
    hadoop/hive/trunk/ql/build.xml
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Sun May  3 02:37:59 2009
@@ -3,7 +3,7 @@
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES
-  
+
     HIVE-143. Remove the old file based metastore, in favour of the
     RDBMS based metastore. (prasadc via johan)
 
@@ -53,13 +53,13 @@
 
     HIVE-381. Fix JDBC HiveResultSet's next function.
     (Kim P via namit)
-    
+
     HIVE-387. Use URI from FileSystem Object instead of from HADOOPFS
     directly. (jssarma)
 
     HIVE-385. Split Driver's run into compile and execute.
     (Neil Conway via namit)
-  
+
     HIVE-393. Remove unnecessary checks in movetask for file type.
     (Zheng Shao via namit)
 
@@ -101,7 +101,7 @@
 
     HIVE-366. testParse should not depend on a static field
     (Zheng Shao via namit)
-    
+
     HIVE-433. Fixed union18 and union19 tests.
     (athusoo via johan)
 
@@ -111,13 +111,16 @@
     HIVE-395. Fix build problems with eclipse and 0.19 hadoop.
     (Neil Conway via athusoo)
 
-    HIVE-456. Add option to run junit tests in quiet mode. 
+    HIVE-456. Add option to run junit tests in quiet mode.
     (Zheng Shao via prasadc)
 
+    HIVE-453. Use FileSystem derived from user supplied path when present
+    instead of default. (Joydeep Sen Sarma via prasadc)
+
 Release 0.3.1 - Unreleased
 
   INCOMPATIBLE CHANGES
-  
+
   NEW FEATURES
 
     HIVE-61. Implement "ORDER BY". (Namit Jain via zshao)
@@ -183,9 +186,9 @@
 
     HIVE-271. Move test-udfs creation to build-common in order to
     fix broken 0.17 build. (athusoo)
-    
+
     HIVE-286. Use round(xxx,12) to make sure there is no precision
-    matching problem in testCliDriver_udf7. (zshao via johan)    
+    matching problem in testCliDriver_udf7. (zshao via johan)
 
     HIVE-327. Fix row counts printed in hive history to reflect the
     count of rows from the query in the session. (Suresh Anthony via athusoo)
@@ -199,11 +202,11 @@
     HIVE-124. Fix empty aggregations to return 1 row instead of
     nothing. (namit)
 
-    HIVE-347. Fix lot of partition scans in strict mode in case of error in 
+    HIVE-347. Fix lot of partition scans in strict mode in case of error in
     partition specification error. (namit)
 
     HIVE-251. Throw an error to the CLI if a user defined script fails in
-    TRANSFORM, MAP or REDUCE commands. (athusoo) 
+    TRANSFORM, MAP or REDUCE commands. (athusoo)
 
     HIVE-317. Fix HiveServer to accept 1 argument for port.
     (Min Zhou via athusoo)
@@ -217,13 +220,13 @@
     HIVE-350. Fix wrong order of aggregations in explain plan.
     (namit)
 
-    HIVE-347. undo for Fix lot of partition scans in strict mode in case of error in 
+    HIVE-347. undo for Fix lot of partition scans in strict mode in case of error in
     partition specification error. (namit)
 
-    HIVE-319. Add UDF for unix timestamp. 
+    HIVE-319. Add UDF for unix timestamp.
     (Hao Liu via namit)
 
-    HIVE-363. Use 1 reducer in case of aggregations with no grouping keys. 
+    HIVE-363. Use 1 reducer in case of aggregations with no grouping keys.
     (namit)
 
     HIVE-366. Make id a non-bean attribute so that TestParse does not depend on that.
@@ -235,15 +238,15 @@
     HIVE-318. Fix union all queries. (Namit Jain via zshao)
 
     HIVE-367. Null Pointer when some of the files in the table are
-    empty. (Namit Jain via athusoo) 
+    empty. (Namit Jain via athusoo)
 
-    HIVE-278. Add support for Hive History to 
+    HIVE-278. Add support for Hive History to
     Hive Web UI. (Edward Capriolo via athusoo)
 
     HIVE-356. Treat Object[] and List<Object> differently.
     (Zheng Shao via namit)
 
-    HIVE-373. 1 reducer should be used if no grouping key is present in all 
+    HIVE-373. 1 reducer should be used if no grouping key is present in all
     scenarios. (Namit Jain via zshao)
 
     HIVE-375. LazySimpleSerDe directly creates a UTF8 buffer for primitive types.
@@ -308,7 +311,7 @@
     HIVE-147. Add a tool for extracting lineage info from hive sql.
     (Suresh Antony via zshao)
 
-    HIVE-48.  Support JDBC connections for interoperability between 
+    HIVE-48.  Support JDBC connections for interoperability between
     Hive and RDBMS.  (Raghotham Murthy and Michi Mutsuzaki via dhruba)
 
     HIVE-140. Event Based Infrastructure for Syntax Trees in the compiler.
@@ -379,7 +382,7 @@
 
     HIVE-69. genMapRedTasks uses tree walker. (Namit through zshao)
 
-    HIVE-249. Give execute permissions to the hive binary 
+    HIVE-249. Give execute permissions to the hive binary
     (Jeff Hammerbacher via athusoo)
 
     HIVE-231. Create junit reports for tests. (Johan Oskarsson via athusoo)
@@ -444,16 +447,16 @@
 
     HIVE-168. Fixed join on a subquery with a group by. (Namit Jain via zshao)
 
-    HIVE-169. Fixed configuration parameter used for determining join interval 
+    HIVE-169. Fixed configuration parameter used for determining join interval
     in JoinOperator. (Namit Jain via zshao)
 
     HIVE-114. Drop partition does not delete data for external tables now.
     (Johan Oskarsson via zshao)
 
-    HIVE-144. Hive/ql java source copied to build/ql/java to make it work 
+    HIVE-144. Hive/ql java source copied to build/ql/java to make it work
     with eclipse. (Johan Oskarsson via zshao)
 
-    HIVE-129. Fix aux.jar packaging to work properly with 0.17 and 0.18 
+    HIVE-129. Fix aux.jar packaging to work properly with 0.17 and 0.18
     versions of hadoop. (Joydeep Sarma via zshao)
 
     HIVE-162. Fix join0.q test failure with hadoop 0.17. (zshao)
@@ -490,7 +493,7 @@
     HIVE-60. Fix the build scripts for the new hive tree.
     (Ashish through zshao)
 
-    HIVE-54. Create this file to record the patches going into a 
+    HIVE-54. Create this file to record the patches going into a
     particular release. (dhruba)
 
     HIVE-12. Improve README for Hive.
@@ -501,19 +504,19 @@
 
     HIVE-70. A configuration variable that limits the amount of data
     writen by a task to its standard error.
-    (Joydeep Sen Sarma via dhruba) 
+    (Joydeep Sen Sarma via dhruba)
 
     HIVE-17. Remove duplicate entries for hive.metastore.uris in config file.
-    (Jeff Hammerbacher via dhruba) 
+    (Jeff Hammerbacher via dhruba)
 
     HIVE-72. Generate correct results when partition pruning is not strict
-    and no map-reduce jobs are needed.  (Namim Jain via dhruba) 
+    and no map-reduce jobs are needed.  (Namim Jain via dhruba)
 
     HIVE-75. Make limit work when applied to outermost block that is not
-    a query.  (Namim Jain via dhruba) 
+    a query.  (Namim Jain via dhruba)
 
     HIVE-76. Fix for column number mismatch between query and destination
-    tables when alias.* expressions are present in the select list of a 
+    tables when alias.* expressions are present in the select list of a
     join. (Ashish Thusoo via dhruba)
 
     HIVE-196. Two test runs can run simultaneously on the same machine.
@@ -530,10 +533,10 @@
     HIVE-220. Incorrect log directory in TestMTQueries causing null pointer
     exception.  (Prasad Chakka via dhruba)
 
-    HIVE-230.  Fix for a Null Pointer Exception that occured while loading a 
+    HIVE-230.  Fix for a Null Pointer Exception that occured while loading a
     table from a query that returns empty data.  (Prasad Chakka via dhruba)
 
-    HIVE-232. Metastore.warehouse configuration should use inherited 
+    HIVE-232. Metastore.warehouse configuration should use inherited
     hadoop configuration. (Prasad Chakka via dhruba)
 
     HIVE-239. Check that replace columns in alter table does not have names
@@ -566,7 +569,7 @@
 
     HIVE-256. Fix in map side aggregation wherein we were not including
     private members of the records in calculating the avg size of the
-    rows and we were also not estimating variable length columns 
+    rows and we were also not estimating variable length columns
     properly. (Namit Jain via athusoo)
 
     HIVE-262. Fix for joins wherein a merged join having and outer or

Modified: hadoop/hive/trunk/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (original)
+++ hadoop/hive/trunk/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java Sun May  3 02:37:59 2009
@@ -44,12 +44,14 @@
   private Driver qp;
   private FsShell dfs;
   private LogHelper console;
+  private Configuration conf;
 
   public CliDriver() {
     SessionState ss = SessionState.get();
     sp = new SetProcessor();
     qp = new Driver();
-    dfs = new FsShell(ss != null ? ss.getConf() : new Configuration ());
+    conf = (ss != null) ? ss.getConf() : new Configuration ();
+    dfs = new FsShell(conf);
     Log LOG = LogFactory.getLog("CliDriver");
     console = new LogHelper(LOG);
   }
@@ -166,7 +168,7 @@
         ss.delete_resource(t);
       }
 
-    } else if (!cmd_trimmed.equals("")) {
+    } else if (!StringUtils.isBlank(cmd_trimmed)) {
       PrintStream out = ss.out;
 
       long start = System.currentTimeMillis();
@@ -199,18 +201,21 @@
   }
 
   public int processLine(String line) {
-    int ret = 0;
+    int lastRet = 0, ret = 0;
+
     for(String oneCmd: line.split(";")) {
-      if(oneCmd.equals(""))
+
+      if(StringUtils.isBlank(oneCmd))
         continue;
       
       ret = processCmd(removeComments(oneCmd));
-      if(ret != 0) {
-        // ignore anything after the first failed command
+      lastRet = ret;
+      boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS);
+      if(ret != 0 && !ignoreErrors) {
         return ret;
       }
     }
-    return 0;
+    return lastRet;
   }
 
   private String removeComments(String oneCmd) {
@@ -220,18 +225,12 @@
   public int processReader(BufferedReader r) throws IOException {
     String line;
     StringBuffer qsb = new StringBuffer();
-    int ret = 0;
 
     while((line = r.readLine()) != null) {
       qsb.append(line + "\n");
     }
 
-    ret = processLine(qsb.toString());
-    if (ret != 0) {
-      return ret;
-    }
-
-    return 0;
+    return (processLine(qsb.toString()));
   }
 
   public static void main(String[] args) throws IOException {

Modified: hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sun May  3 02:37:59 2009
@@ -59,7 +59,7 @@
     SCRIPTWRAPPER("hive.exec.script.wrapper", null),
     PLAN("hive.exec.plan", null),
     SCRATCHDIR("hive.exec.scratchdir", "/tmp/"+System.getProperty("user.name")+"/hive"),
-    SUBMITVIACHILD("hive.exec.submitviachild", "false"),
+    SUBMITVIACHILD("hive.exec.submitviachild", false),
     SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000),
     COMPRESSRESULT("hive.exec.compress.output", false),
     COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false),
@@ -81,6 +81,9 @@
     METASTOREURIS("hive.metastore.uris", ""),
     METASTOREPWD("javax.jdo.option.ConnectionPassword", ""),
 
+    // CLI
+    CLIIGNOREERRORS("hive.cli.errors.ignore", false),
+
     // Things we log in the jobconf
 
     // session identifier

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sun May  3 02:37:59 2009
@@ -269,33 +269,49 @@
       public void create_table(Table tbl) throws AlreadyExistsException, MetaException, InvalidObjectException {
         this.incrementCounter("create_table");
         logStartFunction("create_table: db=" + tbl.getDbName() + " tbl=" + tbl.getTableName());
-        boolean success = false;
+
         if(!MetaStoreUtils.validateName(tbl.getTableName()) ||
             !MetaStoreUtils.validateColNames(tbl.getSd().getCols()) ||
-             (tbl.getPartitionKeys() != null && !MetaStoreUtils.validateColNames(tbl.getPartitionKeys()))) {
+             (tbl.getPartitionKeys() != null &&
+              !MetaStoreUtils.validateColNames(tbl.getPartitionKeys()))) {
             throw new InvalidObjectException(tbl.getTableName() + " is not a valid object name");
         }
+        
+        Path tblPath = null;
+        boolean success = false, madeDir = false;
         try {
           getMS().openTransaction();
-          Path tblPath = null;
           if(tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) {
             tblPath = wh.getDefaultTablePath(tbl.getDbName(), tbl.getTableName());
             tbl.getSd().setLocation(tblPath.toString());
           } else {
+            if (!isExternal(tbl)) {
+              LOG.warn("Location: " + tbl.getSd().getLocation() +
+                       "specified for non-external table:" + tbl.getTableName());
+            }
             tblPath = new Path(tbl.getSd().getLocation());
           }
           // get_table checks whether database exists, it should be moved here
           if(is_table_exists(tbl.getDbName(), tbl.getTableName())) {
             throw new AlreadyExistsException("Table " + tbl.getTableName() + " already exists");
           }
-          getMS().createTable(tbl);
-          if(wh.mkdirs(tblPath)) {
-            success = getMS().commitTransaction();
+
+          if(!wh.isDir(tblPath)) {
+            if(!wh.mkdirs(tblPath)) {
+              throw new MetaException (tblPath + " is not a directory or unable to create one");
+            }
+            madeDir = true;
           }
+
+          getMS().createTable(tbl);
+          success = getMS().commitTransaction();
       
         } finally {
           if(!success) {
             getMS().rollbackTransaction();
+            if(madeDir) {
+              wh.deleteDir(tblPath, true);
+            }
           }
         }
       }
@@ -385,7 +401,8 @@
           }
         }
         Partition part = new Partition();
-        boolean success = false;
+        boolean success = false, madeDir = false;
+        Path partLocation = null;
         try {
           getMS().openTransaction();
           part = new Partition();
@@ -399,14 +416,23 @@
           }
 
           part.setSd(tbl.getSd());
-          Path partLocation = new Path(tbl.getSd().getLocation(), Warehouse.makePartName(tbl.getPartitionKeys(), part_vals));
+          partLocation = new Path(tbl.getSd().getLocation(),
+                                  Warehouse.makePartName(tbl.getPartitionKeys(), part_vals));
           part.getSd().setLocation(partLocation.toString());
 
-          Partition old_part = this.get_partition(part.getDbName(), part.getTableName(), part.getValues());
+          Partition old_part = this.get_partition(part.getDbName(),
+                                                  part.getTableName(), part.getValues());
           if( old_part != null) {
             throw new AlreadyExistsException("Partition already exists:" + part);
           }
           
+          if(!wh.isDir(partLocation)) {
+            if(!wh.mkdirs(partLocation)) {
+              throw new MetaException (partLocation + " is not a directory or unable to create one");
+            }
+            madeDir = true;
+          }
+
           success = getMS().addPartition(part);
           if(success) {
             success = getMS().commitTransaction();
@@ -414,9 +440,9 @@
         } finally {
           if(!success) {
             getMS().rollbackTransaction();
-          } else {
-            Path path = new Path(part.getSd().getLocation());
-            wh.mkdirs(path);
+            if(madeDir) {
+              wh.deleteDir(partLocation, true);
+            }
           }
         }
         return part;
@@ -450,7 +476,8 @@
           AlreadyExistsException, MetaException {
         this.incrementCounter("add_partition");
         logStartFunction("add_partition", part.getDbName(), part.getTableName());
-        boolean success = false;
+        boolean success = false, madeDir = false;
+        Path partLocation = null;
         try {
           getMS().openTransaction();
           Partition old_part = this.get_partition(part.getDbName(), part.getTableName(), part.getValues());
@@ -461,21 +488,29 @@
           if(tbl == null) {
             throw new InvalidObjectException("Unable to add partition because table or database do not exist");
           }
-          if (part.getSd().getLocation() == null) {
+          partLocation = new Path(part.getSd().getLocation());
+          if (partLocation == null) {
             // set default location if not specified
-            part.getSd().setLocation(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
+            String partLocStr = Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues());
+            partLocation = new Path(partLocStr);
+            part.getSd().setLocation(partLocStr);
           }
-          // add partition
-          success = getMS().addPartition(part);
-          if(success) {
-            success = getMS().commitTransaction();
+
+          if(!wh.isDir(partLocation)) {
+            if(!wh.mkdirs(partLocation)) {
+              throw new MetaException (partLocation + " is not a directory or unable to create one");
+            }
+            madeDir = true;
           }
+
+          success = getMS().addPartition(part) && getMS().commitTransaction();
+
         } finally {
           if(!success) {
             getMS().rollbackTransaction();
-          } else {
-            Path path = new Path(part.getSd().getLocation());
-            wh.mkdirs(path);
+            if(madeDir) {
+              wh.deleteDir(partLocation, true);
+            }
           }
         }
         return part;

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Sun May  3 02:37:59 2009
@@ -43,11 +43,12 @@
  */
 public class Warehouse {
   private Path whRoot;
-  private FileSystem fs;
+  private Configuration conf;
 
   public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse");
 
   public Warehouse(Configuration conf) throws MetaException {
+    this.conf = conf;
     String whRootString =  HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE);
     if(StringUtils.isBlank(whRootString)) {
       throw new MetaException(HiveConf.ConfVars.METASTOREWAREHOUSE.varname + " is not set in the config or blank");
@@ -59,19 +60,13 @@
     // then use the default file system as specified by the Configuration
     try {
       if ((uri.getScheme() == null) && (uri.getAuthority() == null)) {
-        fs = FileSystem.get(conf);
+        FileSystem fs = FileSystem.get(conf);
         whRoot = new Path(fs.getUri().toString(), whRootString);
-      } else {
-        fs  = whRoot.getFileSystem(conf);
       }
     } catch (IOException e) {
       MetaStoreUtils.logAndThrowMetaException(e);
     }
   }
-  
-  public FileSystem getFs() {
-    return fs;
-  }
 
   public Path getDefaultDatabasePath(String dbName) {
     if(dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) {
@@ -84,8 +79,18 @@
     return new Path(getDefaultDatabasePath(dbName), tableName.toLowerCase());
   }
 
+  private FileSystem getFs(Path f) throws MetaException {
+    try {
+      return f.getFileSystem(conf);
+    } catch (IOException e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
   public boolean mkdirs(Path f) throws MetaException {
     try {
+      FileSystem fs = getFs(f);
       LOG.debug("Creating directory if it doesn't exist: " + f);
       return (fs.mkdirs(f) || fs.getFileStatus(f).isDir());
     } catch (IOException e) {
@@ -97,6 +102,7 @@
   public boolean deleteDir(Path f, boolean recursive) throws MetaException {
     LOG.info("deleting  " + f);
     try {
+      FileSystem fs = getFs(f);
       if(!fs.exists(f)) {
         return false;
       }
@@ -141,6 +147,7 @@
   
   public boolean isDir(Path f) throws MetaException {
     try {
+      FileSystem fs = getFs(f);
       FileStatus fstatus = fs.getFileStatus(f);
       if(!fstatus.isDir()) {
         return false;

Modified: hadoop/hive/trunk/ql/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/build.xml?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/build.xml (original)
+++ hadoop/hive/trunk/ql/build.xml Sun May  3 02:37:59 2009
@@ -39,6 +39,7 @@
     <pathelement location="${hive.conf.dir}"/>
     <pathelement location="${hive.root}/cli/lib/jline-0.9.94.jar"/>
     <fileset dir="${hive.root}" includes="testlibs/*.jar"/>
+    <fileset dir="${hadoop.root}/lib" includes="*.jar"/>
     <path refid="classpath"/>
   </path>
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java Sun May  3 02:37:59 2009
@@ -35,7 +35,7 @@
 public class Context {
   private Path resFile;
   private Path resDir;
-  private FileSystem fs;
+  private FileSystem resFs;
   static final private Log LOG = LogFactory.getLog("hive.ql.Context");
   private Path[] resDirPaths;
   private int    resDirFilesNum;
@@ -44,15 +44,10 @@
   private HiveConf conf;
   
   public Context(HiveConf conf) {
-    try {
-      this.conf = conf;
-      fs = FileSystem.get(conf);
-      initialized = false;
-      resDir = null;
-      resFile = null;
-    } catch (IOException e) {
-      LOG.info("Context creation error: " + StringUtils.stringifyException(e));
-    }
+    this.conf = conf;
+    initialized = false;
+    resDir = null;
+    resFile = null;
   }
 
   public void makeScratchDir() throws Exception {
@@ -60,6 +55,7 @@
     int randomid = Math.abs(rand.nextInt()%rand.nextInt());
     scratchDir = conf.getVar(HiveConf.ConfVars.SCRATCHDIR) + File.separator + randomid;
     Path tmpdir = new Path(scratchDir);
+    FileSystem fs = tmpdir.getFileSystem(conf);
     fs.mkdirs(tmpdir);
   }
 
@@ -69,6 +65,7 @@
 
   public void removeScratchDir() throws Exception {
     Path tmpdir = new Path(scratchDir);
+    FileSystem fs = tmpdir.getFileSystem(conf);
     fs.delete(tmpdir, true);
   }
 
@@ -113,6 +110,7 @@
     {
       try
       {
+        FileSystem fs = resDir.getFileSystem(conf);
         fs.delete(resDir, true);
       } catch (IOException e) {
         LOG.info("Context clear error: " + StringUtils.stringifyException(e));
@@ -123,6 +121,7 @@
     {
       try
       {
+        FileSystem fs = resFile.getFileSystem(conf);
       	fs.delete(resFile, false);
       } catch (IOException e) {
         LOG.info("Context clear error: " + StringUtils.stringifyException(e));
@@ -142,12 +141,14 @@
         initialized = true;
         if ((resFile == null) && (resDir == null)) return null;
       
-        if (resFile != null)
-          return (DataInput)fs.open(resFile);
+        if (resFile != null) {
+          return (DataInput)resFile.getFileSystem(conf).open(resFile);
+        }
         
-        FileStatus status = fs.getFileStatus(resDir);
+        resFs = resDir.getFileSystem(conf);
+        FileStatus status = resFs.getFileStatus(resDir);
         assert status.isDir();
-        FileStatus[] resDirFS = fs.globStatus(new Path(resDir + "/*"));
+        FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"));
         resDirPaths = new Path[resDirFS.length];
         int pos = 0;
         for (FileStatus resFS: resDirFS)
@@ -155,7 +156,7 @@
             resDirPaths[pos++] = resFS.getPath();
         if (pos == 0) return null;
         
-        return (DataInput)fs.open(resDirPaths[resDirFilesNum++]);
+        return (DataInput)resFs.open(resDirPaths[resDirFilesNum++]);
       }
       else {
         return getNextStream();
@@ -174,7 +175,7 @@
     {
       if (resDir != null && resDirFilesNum < resDirPaths.length && 
           (resDirPaths[resDirFilesNum] != null))
-        return (DataInput)fs.open(resDirPaths[resDirFilesNum++]);
+        return (DataInput)resFs.open(resDirPaths[resDirFilesNum++]);
     } catch (FileNotFoundException e) {
       LOG.info("getNextStream error: " + StringUtils.stringifyException(e));
       return null;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sun May  3 02:37:59 2009
@@ -87,10 +87,8 @@
 
     // Create the db
     Hive db;
-    FileSystem fs;
     try {
       db = Hive.get(conf);
-      fs = FileSystem.get(conf);
 
       createTableDesc crtTbl = work.getCreateTblDesc();
       if (crtTbl != null) {
@@ -114,22 +112,22 @@
       
       MsckDesc msckDesc = work.getMsckDesc();
       if (msckDesc != null) {
-        return msck(db, fs, msckDesc);
+        return msck(db, msckDesc);
       }      
 
       descTableDesc descTbl = work.getDescTblDesc();
       if (descTbl != null) {
-        return describeTable(db, fs, descTbl);
+        return describeTable(db, descTbl);
       }
 
       showTablesDesc showTbls = work.getShowTblsDesc();
       if (showTbls != null) {
-        return showTables(db, fs, showTbls);
+        return showTables(db, showTbls);
       }
 
       showPartitionsDesc showParts = work.getShowPartsDesc();
       if (showParts != null) {
-        return showPartitions(db, fs, showParts);
+        return showPartitions(db, showParts);
       }
 
     } catch (InvalidTableException e) {
@@ -179,16 +177,15 @@
    * are either missing on disk on in the metastore.
    * 
    * @param db The database in question.
-   * @param fs FileSystem that will contain the file written.
    * @param msckDesc Information about the tables and partitions
    * we want to check for.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
    */
-  private int msck(Hive db, FileSystem fs, MsckDesc msckDesc) {
+  private int msck(Hive db, MsckDesc msckDesc) {
     
     CheckResult result = new CheckResult();
     try {
-      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db, fs);
+      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
       checker.checkMetastore(
         MetaStoreUtils.DEFAULT_DATABASE_NAME, msckDesc.getTableName(), 
         msckDesc.getPartitionSpec(),
@@ -203,6 +200,7 @@
             
       BufferedWriter resultOut = null;
       try {
+        FileSystem fs = msckDesc.getResFile().getFileSystem(conf);
         resultOut = new BufferedWriter(
             new OutputStreamWriter(fs.create(msckDesc.getResFile())));
         
@@ -265,12 +263,11 @@
    * Write a list of partitions to a file.
    * 
    * @param db The database in question.
-   * @param fs FileSystem that will contain the file written.
    * @param showParts These are the partitions we're interested in.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
    * @throws HiveException Throws this exception if an unexpected error occurs.
    */
-  private int showPartitions(Hive db, FileSystem fs,
+  private int showPartitions(Hive db,
       showPartitionsDesc showParts) throws HiveException {
     // get the partitions for the table and populate the output
     String tabName = showParts.getTabName();
@@ -289,6 +286,7 @@
 
     // write the results in the file
     try {
+      FileSystem fs = showParts.getResFile().getFileSystem(conf);
       DataOutput outStream = (DataOutput) fs.create(showParts.getResFile());
       Iterator<String> iterParts = parts.iterator();
 
@@ -315,12 +313,11 @@
    * Write a list of the tables in the database to a file.
    * 
    * @param db The database in question.
-   * @param fs FileSystem that will contain the file written.
    * @param showTbls These are the tables we're interested in.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
    * @throws HiveException Throws this exception if an unexpected error occurs.
    */
-  private int showTables(Hive db, FileSystem fs, showTablesDesc showTbls)
+  private int showTables(Hive db, showTablesDesc showTbls)
       throws HiveException {
     // get the tables for the desired pattenn - populate the output stream
     List<String> tbls = null;
@@ -333,6 +330,7 @@
 
     // write the results in the file
     try {
+      FileSystem fs = showTbls.getResFile().getFileSystem(conf);
       DataOutput outStream = (DataOutput)fs.create(showTbls.getResFile());
       SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
       Iterator<String> iterTbls = sortedTbls.iterator();
@@ -359,12 +357,11 @@
    * Write the description of a table to a file.
    * 
    * @param db The database in question.
-   * @param fs FileSystem that will contain the file written.
    * @param descTbl This is the table we're interested in.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
    * @throws HiveException Throws this exception if an unexpected error occurs.
    */
-  private int describeTable(Hive db, FileSystem fs, descTableDesc descTbl)
+  private int describeTable(Hive db, descTableDesc descTbl)
       throws HiveException {
     String colPath = descTbl.getTableName();
     String tableName = colPath.substring(0,
@@ -375,6 +372,7 @@
     Partition part = null;
     try {
       if (tbl == null) {
+        FileSystem fs = descTbl.getResFile().getFileSystem(conf);
         DataOutput outStream = (DataOutput) fs.open(descTbl.getResFile());
         String errMsg = "Table " + tableName + " does not exist";
         outStream.write(errMsg.getBytes("UTF-8"));
@@ -384,6 +382,7 @@
       if (descTbl.getPartSpec() != null) {
         part = db.getPartition(tbl, descTbl.getPartSpec(), false);
         if (part == null) {
+          FileSystem fs = descTbl.getResFile().getFileSystem(conf);
           DataOutput outStream = (DataOutput) fs.open(descTbl.getResFile());
           String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
               + tableName + " does not exist";
@@ -414,6 +413,7 @@
       } else {
         cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
       }
+      FileSystem fs = descTbl.getResFile().getFileSystem(conf);
       DataOutput outStream = (DataOutput)fs.create(descTbl.getResFile());
       Iterator<FieldSchema> iterCols = cols.iterator();
       while (iterCols.hasNext()) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java Sun May  3 02:37:59 2009
@@ -295,11 +295,12 @@
    */
   public long getTotalInputFileSize(JobConf job, mapredWork work) throws IOException {
     long r = 0;
-    FileSystem fs = FileSystem.get(job);
     // For each input path, calculate the total size.
     for (String path: work.getPathToAliases().keySet()) {
       try {
-        ContentSummary cs = fs.getContentSummary(new Path(path));
+        Path p = new Path(path);
+        FileSystem fs = p.getFileSystem(job);
+        ContentSummary cs = fs.getContentSummary(p);
         r += cs.getLength();
       } catch (IOException e) {
         LOG.info("Cannot get size of " + path + ". Safely ignored.");
@@ -336,8 +337,8 @@
     }
 
     String hiveScratchDir = HiveConf.getVar(job, HiveConf.ConfVars.SCRATCHDIR);
-    String jobScratchDir = hiveScratchDir + Utilities.randGen.nextInt();
-    FileOutputFormat.setOutputPath(job, new Path(jobScratchDir));
+    Path jobScratchDir = new Path(hiveScratchDir + Utilities.randGen.nextInt());
+    FileOutputFormat.setOutputPath(job, jobScratchDir);
     job.setMapperClass(ExecMapper.class);
 
     job.setMapOutputKeyClass(HiveKey.class);
@@ -361,21 +362,19 @@
     }
 
     int returnVal = 0;
-    FileSystem fs = null;
     RunningJob rj = null, orig_rj = null;
     boolean success = false;
 
     try {
-      fs = FileSystem.get(job);
-
       // if the input is empty exit gracefully
       Path[] inputPaths = FileInputFormat.getInputPaths(job);
       boolean emptyInput = true;
       for (Path inputP : inputPaths) {
-        if (!fs.exists(inputP))
+        FileSystem inputFs = inputP.getFileSystem(job);
+        if (!inputFs.exists(inputP))
           continue;
 
-        FileStatus[] fStats = fs.listStatus(inputP);
+        FileStatus[] fStats = inputFs.listStatus(inputP);
         for (FileStatus fStat : fStats) {
           if (fStat.getLen() > 0) {
             emptyInput = false;
@@ -445,7 +444,8 @@
     } finally {
       Utilities.clearMapRedWork(job);
       try {
-        fs.delete(new Path(jobScratchDir), true);
+        FileSystem fs = jobScratchDir.getFileSystem(job);
+        fs.delete(jobScratchDir, true);
         if (returnVal != 0 && rj != null) {
           rj.killJob();
         }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java Sun May  3 02:37:59 2009
@@ -67,28 +67,26 @@
   private int maxRows = 100;
   
   public void initialize (HiveConf conf) {
-   	super.initialize(conf);
+    super.initialize(conf);
     currRecReader = null;
     
-   	try {
-       // Create a file system handle
-       fs = FileSystem.get(conf);   
-       job = new JobConf(conf, ExecDriver.class);
+    try {
+      job = new JobConf(conf, ExecDriver.class);
        
-	 	   mSerde = new LazySimpleSerDe();
-       Properties mSerdeProp = new Properties();
-       mSerdeProp.put(Constants.SERIALIZATION_FORMAT, "" + Utilities.tabCode);
-       mSerdeProp.put(Constants.SERIALIZATION_NULL_FORMAT, ((fetchWork)work).getSerializationNullFormat());
-       mSerde.initialize(job, mSerdeProp);
+      mSerde = new LazySimpleSerDe();
+      Properties mSerdeProp = new Properties();
+      mSerdeProp.put(Constants.SERIALIZATION_FORMAT, "" + Utilities.tabCode);
+      mSerdeProp.put(Constants.SERIALIZATION_NULL_FORMAT, ((fetchWork)work).getSerializationNullFormat());
+      mSerde.initialize(job, mSerdeProp);
        
-       currPath = null;
-       currTbl = null;
-       currPart = null;
-       iterPath = null;
-       iterPartDesc = null;
-       totalRows = 0;
-       tblDataDone = false;
-       rowWithPart = new Object[2];
+      currPath = null;
+      currTbl = null;
+      currPart = null;
+      iterPath = null;
+      iterPartDesc = null;
+      totalRows = 0;
+      tblDataDone = false;
+      rowWithPart = new Object[2];
     } catch (Exception e) {
       // Bail out ungracefully - we should never hit
       // this here - but would have hit it in SemanticAnalyzer
@@ -98,8 +96,8 @@
   }
   
   public int execute() {
-  	assert false;
-  	return 0;
+    assert false;
+    return 0;
   }
   /**
    * Return the tableDesc of the fetchWork
@@ -122,23 +120,22 @@
         inputFormats.put(inputFormatClass, newInstance);
       } catch (Exception e) {
         throw new IOException("Cannot create an instance of InputFormat class " + inputFormatClass.getName()
-                               + " as specified in mapredWork!");
+                              + " as specified in mapredWork!");
       }
     }
     return inputFormats.get(inputFormatClass);
   }
   
   private int splitNum;
-  private FileSystem fs;  
   private RecordReader<WritableComparable, Writable> currRecReader;
   private InputSplit[] inputSplits;
   private InputFormat  inputFormat;
   private JobConf      job;
-	private WritableComparable key; 
-	private Writable value;
-	private Deserializer  serde;
-	private LazySimpleSerDe mSerde;
-	private int totalRows;
+  private WritableComparable key; 
+  private Writable value;
+  private Deserializer  serde;
+  private LazySimpleSerDe mSerde;
+  private int totalRows;
   private Iterator<Path> iterPath;
   private Iterator<partitionDesc> iterPartDesc; 
   private Path currPath;
@@ -167,7 +164,7 @@
     
     rowWithPart[1] = partValues;
     rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays.asList(new StructObjectInspector[]{
-                                                                                              rowObjectInspector, partObjectInspector}));
+          rowObjectInspector, partObjectInspector}));
   }
 
   private void getNextPath() throws Exception {
@@ -177,16 +174,17 @@
         if (!tblDataDone) {
           currPath = work.getTblDir();
           currTbl = work.getTblDesc();
+          FileSystem fs = currPath.getFileSystem(conf);
           if (fs.exists(currPath)) 
-          {
-            FileStatus[] fStats = fs.listStatus(currPath);
-            for (FileStatus fStat:fStats) {
-              if (fStat.getLen() > 0) {
-                tblDataDone = true;
-                break;
+            {
+              FileStatus[] fStats = fs.listStatus(currPath);
+              for (FileStatus fStat:fStats) {
+                if (fStat.getLen() > 0) {
+                  tblDataDone = true;
+                  break;
+                }
               }
             }
-          }
 
           if (!tblDataDone) currPath = null;
           return;
@@ -195,65 +193,65 @@
           currPath = null;
         }
         return;
-      }
-      else {
+      } else {
         iterPath = work.getPartDir().iterator();
         iterPartDesc = work.getPartDesc().iterator();
       }
     }
 
-		while (iterPath.hasNext()) {
-			Path nxt = iterPath.next();
+    while (iterPath.hasNext()) {
+      Path nxt = iterPath.next();
       partitionDesc prt = iterPartDesc.next();
-		  if (fs.exists(nxt)) 
-      {
-        FileStatus[] fStats = fs.listStatus(nxt);
-        for (FileStatus fStat:fStats) {
-          if (fStat.getLen() > 0) {
-            currPath = nxt;
-            currPart = prt;
-            return;
+      FileSystem fs = nxt.getFileSystem(conf);
+      if (fs.exists(nxt)) 
+        {
+          FileStatus[] fStats = fs.listStatus(nxt);
+          for (FileStatus fStat:fStats) {
+            if (fStat.getLen() > 0) {
+              currPath = nxt;
+              currPart = prt;
+              return;
+            }
           }
         }
-      }
-		}
-	}
+    }
+  }
   
- 	private RecordReader<WritableComparable, Writable> getRecordReader() throws Exception {
- 		if (currPath == null) {
- 			getNextPath();
- 			if (currPath == null)
- 				return null;
+  private RecordReader<WritableComparable, Writable> getRecordReader() throws Exception {
+    if (currPath == null) {
+      getNextPath();
+      if (currPath == null)
+        return null;
 
- 			FileInputFormat.setInputPaths(job, currPath);
+      FileInputFormat.setInputPaths(job, currPath);
       tableDesc tmp = currTbl;
       if (tmp == null)
         tmp = currPart.getTableDesc();
- 			inputFormat = getInputFormatFromCache(tmp.getInputFileFormatClass(), job);
- 			inputSplits = inputFormat.getSplits(job, 1); 		
- 			splitNum = 0;
+      inputFormat = getInputFormatFromCache(tmp.getInputFileFormatClass(), job);
+      inputSplits = inputFormat.getSplits(job, 1); 		
+      splitNum = 0;
       serde = tmp.getDeserializerClass().newInstance();
       serde.initialize(job, tmp.getProperties());
       LOG.debug("Creating fetchTask with deserializer typeinfo: " + serde.getObjectInspector().getTypeName());
       LOG.debug("deserializer properties: " + tmp.getProperties());
       if (!tblDataDone)
         setPrtnDesc();
- 		}
+    }
  		
- 		if (splitNum >= inputSplits.length) {
- 			if (currRecReader != null) {
- 				currRecReader.close();
+    if (splitNum >= inputSplits.length) {
+      if (currRecReader != null) {
+        currRecReader.close();
         currRecReader = null;
       }
- 			currPath = null;
- 			return getRecordReader();
- 		}
+      currPath = null;
+      return getRecordReader();
+    }
  		
-		currRecReader = inputFormat.getRecordReader(inputSplits[splitNum++], job, Reporter.NULL);
-		key = currRecReader.createKey();
-		value = currRecReader.createValue();
-		return currRecReader;
-	}
+    currRecReader = inputFormat.getRecordReader(inputSplits[splitNum++], job, Reporter.NULL);
+    key = currRecReader.createKey();
+    value = currRecReader.createValue();
+    return currRecReader;
+  }
  
   /**
    * Return the maximum number of rows returned by fetch
@@ -270,7 +268,7 @@
   }
 	
   public boolean fetch(Vector<String> res) {
-  	try {
+    try {
       int numRows = 0;
       int rowsRet = maxRows;
       if ((work.getLimit() >= 0) && ((work.getLimit() - totalRows) < rowsRet))
@@ -283,18 +281,18 @@
         return false;
       }
 
-    	while (numRows < rowsRet) {
-  	    if (currRecReader == null) {
-  	  	  currRecReader = getRecordReader();
-  	  		if (currRecReader == null) {
+      while (numRows < rowsRet) {
+        if (currRecReader == null) {
+          currRecReader = getRecordReader();
+          if (currRecReader == null) {
             if (numRows == 0) 
-            	return false;
+              return false;
             totalRows += numRows;
             return true;
-    	    }
-  	    }
+          }
+        }
       	boolean ret = currRecReader.next(key, value);
-   	  	if (ret) {
+        if (ret) {
           if (tblDataDone) {
             Object obj = serde.deserialize(value);
             res.add(((Text)mSerde.serialize(obj, serde.getObjectInspector())).toString());
@@ -302,25 +300,25 @@
             rowWithPart[0] = serde.deserialize(value);
             res.add(((Text)mSerde.serialize(rowWithPart, rowObjectInspector)).toString());
           }
-   	  		numRows++;
-   	  	}
-   	  	else {
+          numRows++;
+        }
+        else {
           currRecReader.close();
           currRecReader = null;
-   	  		currRecReader = getRecordReader();
-   	  		if (currRecReader == null) {
+          currRecReader = getRecordReader();
+          if (currRecReader == null) {
             if (numRows == 0) 
-            	return false;
+              return false;
             totalRows += numRows;
             return true;
-    	    }
+          }
           else {
-        		key = currRecReader.createKey();
-        		value = currRecReader.createValue();
+            key = currRecReader.createKey();
+            value = currRecReader.createValue();
           }
       	}
       }
-    	totalRows += numRows;
+      totalRows += numRows;
       return true;
     }
     catch (Exception e) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java Sun May  3 02:37:59 2009
@@ -87,15 +87,14 @@
     
     if(workClass == mapredWork.class) {
 
-      String viachild = conf.getVar(HiveConf.ConfVars.SUBMITVIACHILD);
+      boolean viachild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD);
       
       try {
 
         // in local mode - or if otherwise so configured - always submit
         // jobs via separate jvm
         Task<T> ret = null;
-        if(conf.getVar(HiveConf.ConfVars.HADOOPJT).equals("local") ||
-           viachild.equals("true")) {
+        if(conf.getVar(HiveConf.ConfVars.HADOOPJT).equals("local") || viachild) {
           ret = (Task<T>)MapRedTask.class.newInstance();
         } else {
           ret = (Task<T>)ExecDriver.class.newInstance();

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Sun May  3 02:37:59 2009
@@ -17,6 +17,7 @@
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
+import org.apache.hadoop.hive.conf.HiveConf;
 
 import com.facebook.thrift.TException;
 
@@ -30,12 +31,12 @@
   public static final Log LOG = LogFactory.getLog(HiveMetaStoreChecker.class);
 
   private Hive hive;
-  private FileSystem fs;
+  private HiveConf conf;
 
-  public HiveMetaStoreChecker(Hive hive, FileSystem fs) {
+  public HiveMetaStoreChecker(Hive hive) {
     super();
     this.hive = hive;
-    this.fs = fs;
+    this.conf = hive.getConf();
   }
 
   /**
@@ -116,6 +117,7 @@
     }
 
     for (Path dbPath : dbPaths) {
+      FileSystem fs = dbPath.getFileSystem(conf);
       FileStatus[] statuses = fs.listStatus(dbPath);
       for (FileStatus status : statuses) {
         
@@ -200,6 +202,7 @@
     throws IOException, HiveException {
 
     Path tablePath = table.getPath();
+    FileSystem fs = tablePath.getFileSystem(conf);
     if (!fs.exists(tablePath)) {
       result.getTablesNotOnFs().add(table.getName());
       return;
@@ -214,6 +217,7 @@
         continue;
       }
       Path partPath = partition.getPartitionPath();
+      fs = partPath.getFileSystem(conf);
       if (!fs.exists(partPath)) {
         PartitionResult pr = new PartitionResult();
         pr.setPartitionName(partition.getName());
@@ -257,6 +261,7 @@
     
     // we should now only have the unexpected folders left
     for (Path partPath : allPartDirs) {
+      FileSystem fs = partPath.getFileSystem(conf);
       String partitionName = getPartitionName(fs.makeQualified(tablePath), 
           partPath);
       
@@ -304,8 +309,14 @@
    * @param allDirs This set will contain the leaf paths at the end.
    * @throws IOException Thrown if we can't get lists from the fs.
    */
+
   private void getAllLeafDirs(Path basePath, Set<Path> allDirs)
       throws IOException {
+    getAllLeafDirs(basePath, allDirs, basePath.getFileSystem(conf));
+  }
+
+  private void getAllLeafDirs(Path basePath, Set<Path> allDirs, FileSystem fs)
+      throws IOException {
     
     FileStatus[] statuses = fs.listStatus(basePath);
 
@@ -315,7 +326,7 @@
 
     for (FileStatus status : statuses) {
       if (status.isDir()) {
-        getAllLeafDirs(status.getPath(), allDirs);
+        getAllLeafDirs(status.getPath(), allDirs, fs);
       }
     }
   }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java Sun May  3 02:37:59 2009
@@ -191,8 +191,10 @@
       // that the hive warehouse is also located in the local file system - but that's just a test case.
       URI copyURI;
       try {
+        // extract out the path name only from the scratchdir configuration
+        String scratchPath = (new Path(conf.getVar(HiveConf.ConfVars.SCRATCHDIR))).toUri().getPath();
         copyURI = new URI(toURI.getScheme(), toURI.getAuthority(),
-                          conf.getVar(HiveConf.ConfVars.SCRATCHDIR) + "/" + Utilities.randGen.nextInt(),
+                          scratchPath + "/" + Utilities.randGen.nextInt(),
                           null, null);                          
       } catch (URISyntaxException e) {
         // Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils

Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java?rev=771017&r1=771016&r2=771017&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java Sun May  3 02:37:59 2009
@@ -39,8 +39,7 @@
   protected void setUp() throws Exception {
     super.setUp();
     hive = Hive.get();
-    fs = FileSystem.getLocal(hive.getConf());
-    checker = new HiveMetaStoreChecker(hive, fs);
+    checker = new HiveMetaStoreChecker(hive);
 
     partCols = new ArrayList<FieldSchema>();
     partCols.add(new FieldSchema(partDateName, Constants.STRING_TYPE_NAME, 
@@ -115,6 +114,7 @@
     assertTrue(result.getPartitionsNotInMs().isEmpty());
 
     // remove the table folder
+    fs = table.getPath().getFileSystem(hive.getConf());
     fs.delete(table.getPath(), true);
 
     // now this shouldn't find the path on the fs
@@ -185,6 +185,7 @@
     assertEquals(2, partitions.size());
     Partition partToRemove = partitions.get(0);
     Path partToRemovePath = new Path(partToRemove.getDataLocation().toString());
+    fs = partToRemovePath.getFileSystem(hive.getConf());
     fs.delete(partToRemovePath, true);
 
     result = new CheckResult();    



Mime
View raw message