hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r822548 - in /hadoop/hbase/trunk/src: contrib/ contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/ java/org/apache/hadoop/hbase/io/hfile/ java/org/apache/hadoop/hbase/mapreduce/ java/org/apache/hadoop/hbase/master/ java/org/apache/h...
Date Wed, 07 Oct 2009 01:16:16 GMT
Author: stack
Date: Wed Oct  7 01:16:15 2009
New Revision: 822548

URL: http://svn.apache.org/viewvc?rev=822548&view=rev
Log:
HBASE-1887 Update hbase trunk to latests on hadoop 0.21 branch so we can all test sync/append

Modified:
    hadoop/hbase/trunk/src/contrib/build-contrib.xml
    hadoop/hbase/trunk/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java

Modified: hadoop/hbase/trunk/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/contrib/build-contrib.xml?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/contrib/build-contrib.xml (original)
+++ hadoop/hbase/trunk/src/contrib/build-contrib.xml Wed Oct  7 01:16:15 2009
@@ -267,7 +267,7 @@
       -->
       <sysproperty key="user.dir" value="${build.test}/data"/>
       
-      <sysproperty key="fs.default.name" value="${fs.default.name}"/>
+      <sysproperty key="fs.defaultFS" value="${fs.default.name}"/>
       <sysproperty key="hbase.test.localoutputfile" value="${hbase.test.localoutputfile}"/>
       <sysproperty key="hbase.log.dir" value="${hbase.log.dir}"/> 
       <classpath refid="test.classpath"/>

Modified: hadoop/hbase/trunk/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
(original)
+++ hadoop/hbase/trunk/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java
Wed Oct  7 01:16:15 2009
@@ -86,7 +86,7 @@
     testDir = new File(path.toString());
     dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
     FileSystem filesystem = dfsCluster.getFileSystem();
-    conf.set("fs.default.name", filesystem.getUri().toString());      
+    conf.set("fs.defaultFS", filesystem.getUri().toString());      
     Path parentdir = filesystem.getHomeDirectory();
     conf.set(HConstants.HBASE_DIR, parentdir.toString());
     filesystem.mkdirs(parentdir);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Wed Oct  7 01:16:15
2009
@@ -1663,7 +1663,7 @@
       boolean checkFamily = cmd.hasOption("a");
       // get configuration, file system and get list of files
       HBaseConfiguration conf = new HBaseConfiguration();
-      conf.set("fs.default.name",
+      conf.set("fs.defaultFS",
         conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
       FileSystem fs = FileSystem.get(conf);
       ArrayList<Path> files = new ArrayList<Path>();

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java Wed
Oct  7 01:16:15 2009
@@ -136,12 +136,8 @@
      */
     @Override
     public void run() {
-        try {
-            context.setStatus("Closing");
-        } catch (IOException e) {
-            return;
-        }
-        while (!closed) {
+      context.setStatus("Closing");
+      while (!closed) {
         try {
           context.progress();            
           Thread.sleep(1000);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Wed Oct  7 01:16:15
2009
@@ -189,8 +189,8 @@
     }
     this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
     // The filesystem hbase wants to use is probably not what is set into
-    // fs.default.name; its value is probably the default.
-    this.conf.set("fs.default.name", this.rootdir.toString());
+    // fs.defaultFS; its value is probably the default.
+    this.conf.set("fs.defaultFS", this.rootdir.toString());
     this.fs = FileSystem.get(conf);
     if (this.fs instanceof DistributedFileSystem) {
       // Make sure dfs is not in safe mode
@@ -594,6 +594,10 @@
   private void splitLogAfterStartup() throws IOException {
     Path logsDirPath =
       new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
+    if (!this.fs.exists(logsDirPath)) {
+      if (this.fs.mkdirs(logsDirPath))
+        throw new IOException("Failed create of " + logsDirPath);
+    }
     FileStatus [] logFolders = this.fs.listStatus(logsDirPath);
     if (logFolders == null || logFolders.length == 0) {
       LOG.debug("No log files to split, proceeding...");
@@ -706,7 +710,7 @@
       mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress));
     }
     
-    return addConfig(mw, "fs.default.name");
+    return addConfig(mw, "fs.defaultFS");
   }
 
   private MapWritable addConfig(final MapWritable mw, final String key) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Wed
Oct  7 01:16:15 2009
@@ -25,7 +25,6 @@
 import java.lang.management.MemoryUsage;
 import java.lang.management.RuntimeMXBean;
 import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -759,18 +758,18 @@
         this.serverInfo.setServerAddress(hsa);
       }
       // Master sent us hbase.rootdir to use. Should be fully qualified
-      // path with file system specification included.  Set 'fs.default.name'
+      // path with file system specification included.  Set 'fs.defaultFS'
       // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
       // accessors will be going against wrong filesystem (unless all is set
       // to defaults).
-      this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
+      this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
       this.fs = FileSystem.get(this.conf);
 
       // Register shutdown hook for HRegionServer, runs an orderly shutdown
       // when a kill signal is recieved
       Runtime.getRuntime().addShutdownHook(new ShutdownThread(this,
           Thread.currentThread()));
-      this.hdfsShutdownThread = suppressHdfsShutdownHook();
+      this.conf.setBoolean("fs.automatic.close", false);
 
       this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
       this.hlog = setupHLog();
@@ -983,16 +982,21 @@
 
     @Override
     public void run() {
-      LOG.info("Starting shutdown thread.");
+      LOG.info("Starting shutdown thread");
       
       // tell the region server to stop
-      instance.stop();
+      this.instance.stop();
 
       // Wait for main thread to exit.
-      Threads.shutdown(mainThread);
+      Threads.shutdown(this.mainThread);
+      try {
+        FileSystem.closeAll();
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
 
       LOG.info("Shutdown thread complete");
-    }    
+    }
   }
 
   // We need to call HDFS shutdown when we are done shutting down
@@ -1029,43 +1033,6 @@
       }
     }
   }
-  
-  /**
-   * So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In
-   * order to make sure things are cleaned up, it also creates a shutdown hook
-   * so that all filesystems can be closed when the process is terminated. This
-   * conveniently runs concurrently with our own shutdown handler, and
-   * therefore causes all the filesystems to be closed before the server can do
-   * all its necessary cleanup.
-   *
-   * The crazy dirty reflection in this method sneaks into the FileSystem cache
-   * and grabs the shutdown hook, removes it from the list of active shutdown
-   * hooks, and hangs onto it until later. Then, after we're properly done with
-   * our graceful shutdown, we can execute the hdfs hook manually to make sure
-   * loose ends are tied up.
-   *
-   * This seems quite fragile and susceptible to breaking if Hadoop changes
-   * anything about the way this cleanup is managed. Keep an eye on things.
-   */
-  private Thread suppressHdfsShutdownHook() {
-    try {
-      Field field = FileSystem.class.getDeclaredField ("clientFinalizer");
-      field.setAccessible(true);
-      Thread hdfsClientFinalizer = (Thread)field.get(null);
-      if (hdfsClientFinalizer == null) {
-        throw new RuntimeException("client finalizer is null, can't suppress!");
-      }
-      Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer);
-      return hdfsClientFinalizer;
-      
-    } catch (NoSuchFieldException nsfe) {
-      LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe);
-      throw new RuntimeException("Failed to suppress HDFS shutdown hook");
-    } catch (IllegalAccessException iae) {
-      LOG.fatal("Couldn't access field 'clientFinalizer' in FileSystem!", iae);
-      throw new RuntimeException("Failed to suppress HDFS shutdown hook");
-    }
-  }
 
   /**
    * Report the status of the server. A server is online once all the startup 
@@ -2540,4 +2507,4 @@
     doMain(args, regionServerClass);
   }
 
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Oct  7
01:16:15 2009
@@ -162,11 +162,15 @@
     final Progressable reporter)
   throws IOException {
     HRegionInfo info = region.regionInfo;
+    this.fs = fs;
     this.homedir = getStoreHomedir(basedir, info.getEncodedName(),
       family.getName());
+    if (!this.fs.exists(this.homedir)) {
+      if (!this.fs.mkdirs(this.homedir))
+        throw new IOException("Failed create of: " + this.homedir.toString());
+    }
     this.region = region;
     this.family = family;
-    this.fs = fs;
     this.conf = conf;
     this.blockcache = family.isBlockCacheEnabled();
     this.blocksize = family.getBlocksize();

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java Wed Oct
 7 01:16:15 2009
@@ -115,17 +115,37 @@
     // continue
   } 
 
+  /*
+   * Create dir and set its value into configuration.
+   * @param key Create dir under test for this key.  Set its fully-qualified
+   * value into the conf.
+   * @throws IOException
+   */
+  private void setupDFSConfig(final String key) throws IOException {
+    Path basedir =
+      new Path(this.conf.get(TEST_DIRECTORY_KEY, "test/build/data"));
+    FileSystem fs = FileSystem.get(this.conf);
+    Path dir = fs.makeQualified(new Path(basedir, key));
+    // Delete if exists.  May contain data from old tests.
+    if (fs.exists(dir)) if (!fs.delete(dir, true)) throw new IOException("Delete: " + dir);
+    if (!fs.mkdirs(dir)) throw new IOException("Create: " + dir);
+    this.conf.set(key, dir.toString());
+  }
+
   @Override
   protected void setUp() throws Exception {
     try {
-      if (startDfs) {
-        // start up the dfs
-        dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+      if (this.startDfs) {
+        /*
+        setupDFSConfig("dfs.namenode.name.dir");
+        setupDFSConfig("dfs.datanode.data.dir");
+        */
+        this.dfsCluster = new MiniDFSCluster(this.conf, 2, true, null);
 
         // mangle the conf so that the fs parameter points to the minidfs we
         // just started up
         FileSystem filesystem = dfsCluster.getFileSystem();
-        conf.set("fs.default.name", filesystem.getUri().toString());      
+        conf.set("fs.defaultFS", filesystem.getUri().toString());
         Path parentdir = filesystem.getHomeDirectory();
         conf.set(HConstants.HBASE_DIR, parentdir.toString());
         filesystem.mkdirs(parentdir);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Wed Oct
 7 01:16:15 2009
@@ -862,7 +862,7 @@
       // mangle the conf so that the fs parameter points to the minidfs we
       // just started up
       FileSystem fs = dfsCluster.getFileSystem();
-      conf.set("fs.default.name", fs.getUri().toString());      
+      conf.set("fs.defaultFS", fs.getUri().toString());      
       Path parentdir = fs.getHomeDirectory();
       conf.set(HConstants.HBASE_DIR, parentdir.toString());
       fs.mkdirs(parentdir);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=822548&r1=822547&r2=822548&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java Wed Oct  7
01:16:15 2009
@@ -112,7 +112,7 @@
     // Start up dfs
     this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
     this.fs = this.dfsCluster.getFileSystem();
-    conf.set("fs.default.name", fs.getUri().toString());
+    conf.set("fs.defaultFS", fs.getUri().toString());
     Path parentdir = fs.getHomeDirectory();
     conf.set(HConstants.HBASE_DIR, parentdir.toString());
     fs.mkdirs(parentdir);



Mime
View raw message