hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1196114 - in /hbase/trunk: ./ conf/ src/docbkx/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/master/ src/main/java/org/apache/hadoop/hbase/master/handler/ src/main/java/org/apache/hadoop/hbase/util/ src/test...
Date Tue, 01 Nov 2011 16:44:39 GMT
Author: stack
Date: Tue Nov  1 16:44:38 2011
New Revision: 1196114

URL: http://svn.apache.org/viewvc?rev=1196114&view=rev
Log:
HBASE-4714 Don't ship w/ icms enabled by default

Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/conf/hbase-env.sh
    hbase/trunk/src/docbkx/performance.xml
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Tue Nov  1 16:44:38 2011
@@ -731,6 +731,7 @@ Release 0.92.0 - Unreleased
    HBASE-4699  Cleanup the UIs
    HBASE-4552  Remove trivial 0.90 deprecated code from 0.92 and trunk.
                (Jonathan Hsieh)
+   HBASE-4714  Don't ship w/ icms enabled by default
 
   NEW FEATURES
    HBASE-2001  Coprocessors: Colocate user code with regions (Mingjie Lai via

Modified: hbase/trunk/conf/hbase-env.sh
URL: http://svn.apache.org/viewvc/hbase/trunk/conf/hbase-env.sh?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/conf/hbase-env.sh (original)
+++ hbase/trunk/conf/hbase-env.sh Tue Nov  1 16:44:38 2011
@@ -34,7 +34,7 @@
 # Below are what we set by default.  May only work with SUN JVM.
 # For more on why as well as other possible settings,
 # see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC"
 
 # Uncomment below to enable java garbage collection logging in the .out file.
 # export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps" 

Modified: hbase/trunk/src/docbkx/performance.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/src/docbkx/performance.xml?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/docbkx/performance.xml (original)
+++ hbase/trunk/src/docbkx/performance.xml Tue Nov  1 16:44:38 2011
@@ -119,7 +119,10 @@
         must be explicitly enabled in HBase 0.90.x (Its defaulted to be on in
         0.92.x HBase). See <code>hbase.hregion.memstore.mslab.enabled</code>
         to true in your <classname>Configuration</classname>. See the cited
-        slides for background and detail.</para>
+        slides for background and detail<footnote><para>The latest jvms do better
+        regards fragmentation so make sure you are running a recent release.
+        Read down in the message,
+        <link xlink:href="http://osdir.com/ml/hotspot-gc-use/2011-11/msg00002.html">Identifying concurrent mode failures caused by fragmentation</link>.</para></footnote>.</para>
         <para>For more information about GC logs, see <xref linkend="trouble.log.gc" />.
         </para>
       </section>

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/HConstants.java Tue Nov  1 16:44:38 2011
@@ -182,9 +182,6 @@ public final class HConstants {
   /** Used to construct the name of the compaction directory during compaction */
   public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
 
-  /** The file name used to store HTD in HDFS  */
-  public static final String TABLEINFO_NAME = ".tableinfo";
-
   /** Default maximum file size */
   public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Tue Nov  1 16:44:38 2011
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
@@ -335,9 +336,9 @@ public class MasterFileSystem {
 
   private void createRootTableInfo(Path rd) throws IOException {
     // Create ROOT tableInfo if required.
-    if (!FSUtils.tableInfoExists(fs, rd,
+    if (!FSTableDescriptors.isTableInfoExists(fs, rd,
         Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
-      FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
+      FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
     }
   }
 
@@ -420,7 +421,7 @@ public class MasterFileSystem {
    */
   public void createTableDescriptor(HTableDescriptor htableDescriptor)
       throws IOException {
-    FSUtils.createTableDescriptor(htableDescriptor, conf);
+    FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
   }
 
   /**

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java Tue Nov  1 16:44:38 2011
@@ -29,21 +29,21 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.zookeeper.KeeperException;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.zookeeper.KeeperException;
 
 /**
  * Handler to create a table.
@@ -137,7 +137,7 @@ public class CreateTableHandler extends 
     // tableDir is created.  Should we change below method to be createTable
     // where we create table in tmp dir with its table descriptor file and then
     // do rename to move it into place?
-    FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf);
+    FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
 
     List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
     final int batchSize =

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java Tue Nov  1 16:44:38 2011
@@ -19,20 +19,29 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 
 /**
@@ -42,7 +51,6 @@ import org.apache.commons.logging.LogFac
  * the filesystem or can be read and write.
  */
 public class FSTableDescriptors implements TableDescriptors {
-
   private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
   private final FileSystem fs;
   private final Path rootdir;
@@ -50,6 +58,9 @@ public class FSTableDescriptors implemen
   long cachehits = 0;
   long invocations = 0;
 
+  /** The file name used to store HTD in HDFS  */
+  public static final String TABLEINFO_NAME = ".tableinfo";
+
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
@@ -130,7 +141,7 @@ public class FSTableDescriptors implemen
 
     // Check mod time has not changed (this is trip to NN).
     long modtime =
-      FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename);
+      FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, tablename);
     if (tdm != null) {
       if (modtime <= tdm.getModtime()) {
         cachehits++;
@@ -138,7 +149,7 @@ public class FSTableDescriptors implemen
       }
     }
     HTableDescriptor htd =
-      FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename);
+      FSTableDescriptors.getTableDescriptor(this.fs, this.rootdir, tablename);
     if (htd == null) {
       // More likely is above will throw a FileNotFoundException
       throw new TableExistsException("No descriptor for " + tablename);
@@ -181,9 +192,9 @@ public class FSTableDescriptors implemen
     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
       throw new NotImplementedException();
     }
-    if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd);
+    if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
     long modtime =
-      FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
+      FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
     this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
   }
 
@@ -201,4 +212,317 @@ public class FSTableDescriptors implemen
     TableDescriptorModtime tdm = this.cache.remove(tablename);
     return tdm == null? null: tdm.getTableDescriptor();
   }
-}
+
+  /**
+   * Checks if <code>.tableinfo<code> exists for given table
+   * 
+   * @param fs file system
+   * @param rootdir root directory of HBase installation
+   * @param tableName name of table
+   * @return true if exists
+   * @throws IOException
+   */
+  public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
+      String tableName) throws IOException {
+    FileStatus status =
+      FSTableDescriptors.getTableInfoPath(fs, rootdir, tableName);
+    return status == null? false: fs.exists(status.getPath());
+  }
+
+  private static FileStatus getTableInfoPath(final FileSystem fs,
+      final Path rootdir, final String tableName)
+  throws IOException {
+    Path tabledir = FSUtils.getTablePath(rootdir, tableName);
+    return getTableInfoPath(fs, tabledir);
+  }
+
+  private static FileStatus getTableInfoPath(final FileSystem fs,
+      final Path tabledir)
+  throws IOException {
+    FileStatus [] status = fs.listStatus(tabledir, new PathFilter() {
+      @Override
+      public boolean accept(Path p) {
+        // Accept any file that starts with TABLEINFO_NAME
+        return p.getName().startsWith(TABLEINFO_NAME);
+      }
+    });
+    if (status == null || status.length < 1) return null;
+    Arrays.sort(status, new TableInfoFileStatusComparator());
+    if (status.length > 1) {
+      // Clean away old versions of .tableinfo
+      for (int i = 1; i < status.length; i++) {
+        // Clean up old versions
+        if (!fs.delete(status[i].getPath(), false)) {
+          LOG.warn("Failed cleanup of " + status);
+        }
+      }
+    }
+    return status[0];
+  }
+
+  /**
+   * Compare {@link FileStatus} instances by {@link Path#getName()}.
+   * Returns in reverse order.
+   */
+  static class TableInfoFileStatusComparator
+  implements Comparator<FileStatus> {
+    @Override
+    public int compare(FileStatus left, FileStatus right) {
+      return -left.compareTo(right);
+    }
+  }
+
+  /**
+   * Width of the sequenceid that is suffix on tableinfo.
+   */
+  static final int WIDTH_OF_SEQUENCE_ID = 10;
+
+  /**
+   * Regex to eat up sequenceid suffix on a .tableinfo file.
+   */
+  private static final Pattern SUFFIX =
+    Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
+
+  /*
+   * @param number
+   * @return Returns zero-prefixed 5-byte wide decimal version of passed
+   * number (Does absolute in case number is negative).
+   */
+  static String formatTableInfoSequenceId(final int number) {
+    byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
+    int d = Math.abs(number);
+    for (int i = b.length - 1; i >= 0; i--) {
+      b[i] = (byte)((d % 10) + '0');
+      d /= 10;
+    }
+    return Bytes.toString(b);
+  }
+
+  /**
+   * @param p Path to a <code>.tableinfo</code> file.
+   * @return The current editid or 0 if none found.
+   */
+  static int getTableInfoSequenceid(final Path p) {
+    if (p == null) return 0;
+    Matcher m = SUFFIX.matcher(p.getName());
+    return m.matches()? Integer.parseInt(m.group(2)): 0;
+  }
+
+  /**
+   * @param tabledir
+   * @param sequenceid
+   * @return Name of tableinfo file.
+   */
+  static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
+    return new Path(tabledir,
+      TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
+  }
+
+  /**
+   * @param fs
+   * @param rootdir
+   * @param tableName
+   * @return Modification time for the table {@link #TABLEINFO_NAME} file
+   * or <code>0</code> if no tableinfo file found.
+   * @throws IOException
+   */
+  static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
+      final String tableName)
+  throws IOException {
+    FileStatus status = getTableInfoPath(fs, rootdir, tableName);
+    return status == null? 0: status.getModificationTime();
+  }
+
+  /**
+   * Get HTD from HDFS.
+   * @param fs
+   * @param hbaseRootDir
+   * @param tableName
+   * @return Descriptor or null if none found.
+   * @throws IOException
+   */
+  public static HTableDescriptor getTableDescriptor(FileSystem fs,
+      Path hbaseRootDir, byte[] tableName)
+  throws IOException {
+     return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
+  }
+
+  static HTableDescriptor getTableDescriptor(FileSystem fs,
+      Path hbaseRootDir, String tableName) {
+    HTableDescriptor htd = null;
+    try {
+      htd = getTableDescriptor(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
+    } catch (NullPointerException e) {
+      LOG.debug("Exception during readTableDecriptor. Current table name = " +
+        tableName , e);
+    } catch (IOException ioe) {
+      LOG.debug("Exception during readTableDecriptor. Current table name = " +
+        tableName , ioe);
+    }
+    return htd;
+  }
+
+  public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
+  throws IOException, NullPointerException {
+    if (tableDir == null) throw new NullPointerException();
+    FileStatus status = getTableInfoPath(fs, tableDir);
+    if (status == null) return null;
+    FSDataInputStream fsDataInputStream = fs.open(status.getPath());
+    HTableDescriptor hTableDescriptor = null;
+    try {
+      hTableDescriptor = new HTableDescriptor();
+      hTableDescriptor.readFields(fsDataInputStream);
+    } finally {
+      fsDataInputStream.close();
+    }
+    return hTableDescriptor;
+  }
+
+  /**
+   * Update table descriptor
+   * @param fs
+   * @param conf
+   * @param hTableDescriptor
+   * @return New tableinfo
+   * @throws IOException
+   */
+  static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
+      HTableDescriptor hTableDescriptor)
+  throws IOException {
+    Path tabledir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
+    Path p = writeTableDescriptor(fs, hTableDescriptor, tabledir, true);
+    LOG.info("Updated tableinfo=" + p);
+    return p;
+  }
+
+  private static void writeHTD(final FileSystem fs, final Path p,
+      final HTableDescriptor htd)
+  throws IOException {
+    FSDataOutputStream out = fs.create(p, true);
+    try {
+      htd.write(out);
+      out.write('\n');
+      out.write('\n');
+      out.write(Bytes.toBytes(htd.toString()));
+    } finally {
+      out.close();
+    }
+  }
+
+  /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+   * 
+   * @param fs
+   * @param htableDescriptor
+   * @param rootdir
+   */
+  public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
+      HTableDescriptor htableDescriptor) throws IOException {
+    return createTableDescriptor(fs, rootdir, htableDescriptor, false);
+  }
+
+  /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
+   * forceCreation is true then even if previous table descriptor is present it
+   * will be overwritten
+   * 
+   * @param fs
+   * @param htableDescriptor
+   * @param rootdir
+   * @param forceCreation
+   */
+  public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
+      HTableDescriptor htableDescriptor, boolean forceCreation)
+      throws IOException {
+    FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
+    if (status != null) {
+      LOG.info("Current tableInfoPath = " + status.getPath());
+      if (!forceCreation) {
+        if (fs.exists(status.getPath()) && status.getLen() > 0) {
+          LOG.info("TableInfo already exists.. Skipping creation");
+          return false;
+        }
+      }
+    }
+    writeTableDescriptor(fs, htableDescriptor,
+      FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()),
+      forceCreation);
+    return true;
+  }
+
+  /**
+   * Deletes a table's directory from the file system if exists. Used in unit
+   * tests.
+   */
+  public static void deleteTableDescriptorIfExists(String tableName,
+      Configuration conf) throws IOException {
+    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+    FileStatus status =
+      FSTableDescriptors.getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
+    // The below deleteDirectory works for either file or directory.
+    if (fs.exists(status.getPath())) FSUtils.deleteDirectory(fs, status.getPath());
+  }
+
+  /**
+   * Called when we are creating a table to write out the tables' descriptor.
+   * @param fs
+   * @param hTableDescriptor
+   * @param tableDir
+   * @param forceCreation True if we are to force creation
+   * @param status The status of the current tableinfo; can be null
+   * @throws IOException
+   */
+  private static Path writeTableDescriptor(FileSystem fs,
+      HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
+  throws IOException {
+    FileStatus status = getTableInfoPath(fs, tableDir);
+    int sequenceid = getTableInfoSequenceid(status == null? null: status.getPath());
+    Path tableInfoPath = null;
+    do {
+      sequenceid += 1;
+      tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
+    } while (fs.exists(tableInfoPath));
+    try {
+      writeHTD(fs, tableInfoPath, hTableDescriptor);
+      if (status != null) {
+        if (!fs.delete(status.getPath(), false)) {
+          LOG.warn("Failed delete of " + status.getPath());
+        }
+      }
+    } catch (IOException e) {
+      LOG.error("Unable to write the tabledescriptor in the path" + tableInfoPath
+          + ".", e);
+      fs.delete(tableInfoPath, true);
+      throw e;
+    }
+    return tableInfoPath;
+  }
+
+  /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+   * 
+   * @param htableDescriptor
+   * @param conf
+   */
+  public static boolean createTableDescriptor(
+      HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
+    return createTableDescriptor(htableDescriptor, conf, false);
+  }
+
+  /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
+   * forceCreation is true then even if previous table descriptor is present it
+   * will be overwritten
+   * 
+   * @param htableDescriptor
+   * @param conf
+   * @param forceCreation
+   */
+  public static boolean createTableDescriptor(
+      HTableDescriptor htableDescriptor, Configuration conf,
+      boolean forceCreation) throws IOException {
+    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+    return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
+        forceCreation);
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Tue Nov  1 16:44:38 2011
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.DataInputStream;
 import java.io.EOFException;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -63,7 +62,7 @@ public abstract class FSUtils {
   protected FSUtils() {
     super();
   }
-  
+
   public static FSUtils getInstance(FileSystem fs, Configuration conf) {
     String scheme = fs.getUri().getScheme();
     if (scheme == null) {
@@ -156,7 +155,8 @@ public abstract class FSUtils {
    * @return true if dfs is in safemode, false otherwise.
    *
    */
-  private static boolean isInSafeMode(FileSystem fs) throws IOException {
+  private static boolean isInSafeMode(FileSystem fs)
+  throws IOException {
     // Refactored safe-mode check for HBASE-4510
     if (fs instanceof DistributedFileSystem) {
       Path rootPath = new Path("/");
@@ -180,8 +180,9 @@ public abstract class FSUtils {
    */
   public static void checkDfsSafeMode(final Configuration conf) 
   throws IOException {
+    Path rootDir = getRootDir(conf);
     FileSystem fs = FileSystem.get(conf);
-    if (isInSafeMode(fs)) {
+    if (isInSafeMode(fs, rootDir)) {
       throw new IOException("File system is in safemode, it can't be written now");
     }
   }
@@ -451,9 +452,10 @@ public abstract class FSUtils {
   public static void waitOnSafeMode(final Configuration conf,
     final long wait)
   throws IOException {
+    Path rootDir = getRootDir(conf);
     FileSystem fs = FileSystem.get(conf);
     // Make sure dfs is not in safe mode
-    while (isInSafeMode(fs)) {
+    while (isInSafeMode(fs, rootDir)) {
       LOG.info("Waiting for dfs to exit safe mode...");
       try {
         Thread.sleep(wait);
@@ -505,21 +507,6 @@ public abstract class FSUtils {
   }
 
   /**
-   * Checks if .tableinfo exists for given table
-   * 
-   * @param fs file system
-   * @param rootdir root directory of HBase installation
-   * @param tableName name of table
-   * @return true if exists
-   * @throws IOException
-   */
-  public static boolean tableInfoExists(FileSystem fs, Path rootdir,
-      String tableName) throws IOException {
-    Path tablePath = getTableInfoPath(rootdir, tableName);
-    return fs.exists(tablePath);
-  }
-
-  /**
    * Compute HDFS blocks distribution of a given file, or a portion of the file
    * @param fs file system
    * @param status file status of the file
@@ -864,35 +851,6 @@ public abstract class FSUtils {
     return tabledirs;
   }
 
-  /**
-   * Get table info path for a table.
-   * @param rootdir
-   * @param tableName
-   * @return Table info path
-   */
-  private static Path getTableInfoPath(Path rootdir, String tablename) {
-    Path tablePath = getTablePath(rootdir, tablename);
-    return new Path(tablePath, HConstants.TABLEINFO_NAME);
-  }
-
-  /**
-   * @param fs
-   * @param rootdir
-   * @param tablename
-   * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file.
-   * @throws IOException
-   */
-  public static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
-      final String tablename)
-  throws IOException {
-    Path p = getTableInfoPath(rootdir, tablename);
-    FileStatus [] status = fs.listStatus(p);
-    if (status == null || status.length < 1) {
-        throw new FileNotFoundException("No status for " + p.toString());
-    }
-    return status[0].getModificationTime();
-  }
-
   public static Path getTablePath(Path rootdir, byte [] tableName) {
     return getTablePath(rootdir, Bytes.toString(tableName));
   }
@@ -901,234 +859,14 @@ public abstract class FSUtils {
     return new Path(rootdir, tableName);
   }
 
-  private static FileSystem getCurrentFileSystem(Configuration conf)
-  throws IOException {
-    return getRootDir(conf).getFileSystem(conf);
-  }
-
   /**
-   * Get HTableDescriptor
-   * @param config
-   * @param tableName
-   * @return HTableDescriptor for table
-   * @throws IOException
-   */
-  public static HTableDescriptor getHTableDescriptor(Configuration config,
-      String tableName)
-  throws IOException {
-    Path path = getRootDir(config);
-    FileSystem fs = path.getFileSystem(config);
-    return getTableDescriptor(fs, path, tableName);
-  }
-
-  /**
-   * Get HTD from HDFS.
-   * @param fs
-   * @param hbaseRootDir
-   * @param tableName
-   * @return Descriptor or null if none found.
-   * @throws IOException
-   */
-  public static HTableDescriptor getTableDescriptor(FileSystem fs,
-      Path hbaseRootDir, byte[] tableName)
-  throws IOException {
-     return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
-  }
-
-  public static HTableDescriptor getTableDescriptor(FileSystem fs,
-      Path hbaseRootDir, String tableName) {
-    HTableDescriptor htd = null;
-    try {
-      htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName));
-    } catch (NullPointerException e) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = " +
-        tableName , e);
-    } catch (IOException ioe) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = " +
-        tableName , ioe);
-    }
-    return htd;
-  }
-
-  public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
-  throws IOException, NullPointerException {
-    if (tableDir == null) throw new NullPointerException();
-    Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME);
-    FSDataInputStream fsDataInputStream = fs.open(tableinfo);
-    HTableDescriptor hTableDescriptor = null;
-    try {
-      hTableDescriptor = new HTableDescriptor();
-      hTableDescriptor.readFields(fsDataInputStream);
-    } finally {
-      fsDataInputStream.close();
-    }
-    return hTableDescriptor;
-  }
-
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
-   * 
-   * @param htableDescriptor
    * @param conf
-   */
-  public static boolean createTableDescriptor(
-      HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
-    return createTableDescriptor(htableDescriptor, conf, false);
-  }
-
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
-   * forceCreation is true then even if previous table descriptor is present it
-   * will be overwritten
-   * 
-   * @param htableDescriptor
-   * @param conf
-   * @param forceCreation
-   */
-  public static boolean createTableDescriptor(
-      HTableDescriptor htableDescriptor, Configuration conf,
-      boolean forceCreation) throws IOException {
-    FileSystem fs = getCurrentFileSystem(conf);
-    return createTableDescriptor(fs, getRootDir(conf), htableDescriptor,
-        forceCreation);
-  }
-
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
-   * 
-   * @param fs
-   * @param htableDescriptor
-   * @param rootdir
-   */
-  public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
-      HTableDescriptor htableDescriptor) throws IOException {
-    return createTableDescriptor(fs, rootdir, htableDescriptor, false);
-  }
-
-  /**
-   * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
-   * forceCreation is true then even if previous table descriptor is present it
-   * will be overwritten
-   * 
-   * @param fs
-   * @param htableDescriptor
-   * @param rootdir
-   * @param forceCreation
-   */
-  public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
-      HTableDescriptor htableDescriptor, boolean forceCreation)
-      throws IOException {
-    Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor
-        .getNameAsString());
-    LOG.info("Current tableInfoPath = " + tableInfoPath);
-    if (!forceCreation) {
-      if (fs.exists(tableInfoPath)
-          && fs.getFileStatus(tableInfoPath).getLen() > 0) {
-        LOG.info("TableInfo already exists.. Skipping creation");
-        return false;
-      }
-    }
-    writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir,
-        htableDescriptor.getNameAsString()), forceCreation);
-
-    return true;
-  }
-
-  /**
-   * Deletes a table's directory from the file system if exists. Used in unit
-   * tests.
-   */
-  public static void deleteTableDescriptorIfExists(String tableName,
-      Configuration conf) throws IOException {
-    FileSystem fs = getCurrentFileSystem(conf);
-    Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName);
-    if (fs.exists(tableInfoPath))
-      deleteDirectory(fs, tableInfoPath);
-  }
-
-  /**
-   * Called when we are creating a table to write out the tables' descriptor.
-   * @param fs
-   * @param hTableDescriptor
-   * @param tableDir
-   * @param forceCreation True if we are to force creation
-   * @throws IOException
-   */
-  private static void writeTableDescriptor(FileSystem fs,
-      HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
-  throws IOException {
-    // Create in tmpdir and then move into place in case we crash after
-    // create but before close. If we don't successfully close the file,
-    // subsequent region reopens will fail the below because create is
-    // registered in NN.
-    Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
-    Path tmpPath = new Path(new Path(tableDir, ".tmp"),
-      HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis());
-    LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
-    try {
-      writeHTD(fs, tmpPath, hTableDescriptor);
-    } catch (IOException e) {
-      LOG.error("Unable to write the tabledescriptor in the path" + tmpPath
-          + ".", e);
-      fs.delete(tmpPath, true);
-      throw e;
-    }
-    // TODO: The below is less than ideal and likely error prone.  There is a
-    // better rename in hadoops after 0.20 that takes rename options (this has
-    // its own issues according to mighty Todd in that old readers may fail
-    // as we cross the renme transition) but until then, we have this
-    // forceCreation flag which does a delete and then we rename so there is a
-    // hole.  Need to fix.
-    try {
-      if (forceCreation) {
-        if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) {
-          String errMsg = "Unable to delete " + tableInfoPath
-              + " while forcefully writing the table descriptor.";
-          LOG.error(errMsg);
-          throw new IOException(errMsg);
-        }
-      }
-      if (!fs.rename(tmpPath, tableInfoPath)) {
-        String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath;
-        LOG.error(errMsg);
-        throw new IOException(errMsg);
-      } else {
-        LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
-      }
-    } finally {
-      fs.delete(tmpPath, true);
-    }
-  }
-
-  /**
-   * Update table descriptor
-   * @param fs
-   * @param rootdir
-   * @param hTableDescriptor
+   * @return Returns the filesystem of the hbase rootdir.
    * @throws IOException
    */
-  public static void updateHTableDescriptor(FileSystem fs, Path rootdir,
-      HTableDescriptor hTableDescriptor)
+  public static FileSystem getCurrentFileSystem(Configuration conf)
   throws IOException {
-    Path tableInfoPath =
-      getTableInfoPath(rootdir, hTableDescriptor.getNameAsString());
-    writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true);
-    LOG.info("Updated tableinfo=" + tableInfoPath + " to " +
-      hTableDescriptor.toString());
-  }
-
-  private static void writeHTD(final FileSystem fs, final Path p,
-      final HTableDescriptor htd)
-  throws IOException {
-    FSDataOutputStream out = fs.create(p, true);
-    try {
-      htd.write(out);
-      out.write('\n');
-      out.write('\n');
-      out.write(Bytes.toBytes(htd.toString()));
-    } finally {
-      out.close();
-    }
+    return getRootDir(conf).getFileSystem(conf);
   }
   
   /**

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Tue Nov  1 16:44:38 2011
@@ -152,7 +152,7 @@ class HMerge {
           fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
           Bytes.toString(tableName)
       );
-      this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir);
+      this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
       Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
           HConstants.HREGION_LOGDIR_NAME);
       Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java Tue Nov  1 16:44:38 2011
@@ -237,7 +237,7 @@ public class Merge extends Configured im
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta2);
     }
-    HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()),
+    HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
     HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java Tue Nov  1 16:44:38 2011
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.master.As
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -357,7 +357,7 @@ public class TestMasterFailover {
     Path rootdir = filesystem.makeQualified(
         new Path(conf.get(HConstants.HBASE_DIR)));
     // Write the .tableinfo
-    FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
+    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
 
     HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
     HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@@ -369,7 +369,7 @@ public class TestMasterFailover {
     HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
     htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
     // Write the .tableinfo
-    FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
+    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
     HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
     HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
     List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
@@ -681,7 +681,7 @@ public class TestMasterFailover {
     Path rootdir = filesystem.makeQualified(
            new Path(conf.get(HConstants.HBASE_DIR)));
     // Write the .tableinfo
-    FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
+    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
     HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
         null, null);
     HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@@ -693,7 +693,7 @@ public class TestMasterFailover {
     HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
     htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
     // Write the .tableinfo
-    FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
+    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
     HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
     HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java Tue Nov  1 16:44:38 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.junit.Test;
@@ -68,11 +69,11 @@ public class TestHRegionInfo {
 
     // Delete the temporary table directory that might still be there from the
     // previous test run.
-    FSUtils.deleteTableDescriptorIfExists(tablename,
+    FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
         HTU.getConfiguration());
 
     HTableDescriptor htd = new HTableDescriptor(tablename);
-    FSUtils.createTableDescriptor(htd, HTU.getConfiguration());
+    FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
     HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
     HTableDescriptor htd2 = hri.getTableDesc();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java Tue Nov  1 16:44:38 2011
@@ -21,9 +21,11 @@ import static org.junit.Assert.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -43,6 +45,58 @@ public class TestFSTableDescriptors {
   private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class);
 
   @Test
+  public void testSequenceidAdvancesOnTableInfo() throws IOException {
+    Path testdir = UTIL.getDataTestDir();
+    HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+    int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
+    Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+    // Assert we cleaned up the old file.
+    assertTrue(!fs.exists(p0));
+    int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
+    assertTrue(i1 == i0 + 1);
+    Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+    // Assert we cleaned up the old file.
+    assertTrue(!fs.exists(p1));
+    int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
+    assertTrue(i2 == i1 + 1);
+  }
+
+  @Test
+  public void testFormatTableInfoSequenceId() {
+    Path p0 = assertWriteAndReadSequenceid(0);
+    // Assert p0 has format we expect.
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
+      sb.append("0");
+    }
+    assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(),
+      p0.getName());
+    // Check a few more.
+    Path p2 = assertWriteAndReadSequenceid(2);
+    Path p10000 = assertWriteAndReadSequenceid(10000);
+    // Get a .tablinfo that has no sequenceid suffix.
+    Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
+    FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
+    FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
+    FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
+    FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
+    FSTableDescriptors.TableInfoFileStatusComparator comparator =
+      new FSTableDescriptors.TableInfoFileStatusComparator();
+    assertTrue(comparator.compare(fs, fs0) > 0);
+    assertTrue(comparator.compare(fs0, fs2) > 0);
+    assertTrue(comparator.compare(fs2, fs10000) > 0);
+  }
+
+  private Path assertWriteAndReadSequenceid(final int i) {
+    Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i);
+    int ii = FSTableDescriptors.getTableInfoSequenceid(p);
+    assertEquals(i, ii);
+    return p;
+  }
+
+  @Test
   public void testRemoves() throws IOException {
     final String name = "testRemoves";
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -62,14 +116,14 @@ public class TestFSTableDescriptors {
     Path rootdir = UTIL.getDataTestDir(name);
     createHTDInFS(fs, rootdir, htd);
     HTableDescriptor htd2 =
-      FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
+      FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
     assertTrue(htd.equals(htd2));
   }
 
   private void createHTDInFS(final FileSystem fs, Path rootdir,
       final HTableDescriptor htd)
   throws IOException {
-    FSUtils.createTableDescriptor(fs, rootdir, htd);
+    FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
   }
 
   @Test public void testHTableDescriptors()
@@ -102,7 +156,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       HTableDescriptor htd = new HTableDescriptor(name + i);
       htd.addFamily(new HColumnDescriptor("" + i));
-      FSUtils.updateHTableDescriptor(fs, rootdir, htd);
+      FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
     }
     // Wait a while so mod time we write is for sure different.
     Thread.sleep(100);
@@ -121,7 +175,7 @@ public class TestFSTableDescriptors {
       htds.cachehits >= ((count * 2) + 1));
   }
 
-  @Test (expected=java.io.FileNotFoundException.class)
+  @Test (expected=org.apache.hadoop.hbase.TableExistsException.class)
   public void testNoSuchTable() throws IOException {
     final String name = "testNoSuchTable";
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -143,4 +197,30 @@ public class TestFSTableDescriptors {
     htds.add(htd);
     htds.add(htd);
   }
-}
+
+  @Test
+  public void testTableInfoFileStatusComparator() {
+    FileStatus bare =
+      new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
+    FileStatus future =
+      new FileStatus(0, false, 0, 0, -1,
+        new Path("/tmp/tablinfo." + System.currentTimeMillis()));
+    FileStatus farFuture =
+      new FileStatus(0, false, 0, 0, -1,
+        new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
+    FileStatus [] alist = {bare, future, farFuture};
+    FileStatus [] blist = {bare, farFuture, future};
+    FileStatus [] clist = {farFuture, bare, future};
+    FSTableDescriptors.TableInfoFileStatusComparator c =
+      new FSTableDescriptors.TableInfoFileStatusComparator();
+    Arrays.sort(alist, c);
+    Arrays.sort(blist, c);
+    Arrays.sort(clist, c);
+    // Now assert all sorted same in way we want.
+    for (int i = 0; i < alist.length; i++) {
+      assertTrue(alist[i].equals(blist[i]));
+      assertTrue(blist[i].equals(clist[i]));
+      assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare));
+    }
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java Tue Nov  1 16:44:38 2011
@@ -35,7 +35,6 @@ import org.junit.Test;
  * Test {@link FSUtils}.
  */
 public class TestFSUtils {
-  
   @Test public void testIsHDFS() throws Exception {
     HBaseTestingUtility htu = new HBaseTestingUtility();
     htu.getConfiguration().setBoolean("dfs.support.append", false);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java Tue Nov  1 16:44:38 2011
@@ -96,7 +96,7 @@ public class TestMergeTable {
 
     // Create regions and populate them at same time.  Create the tabledir
     // for them first.
-    FSUtils.createTableDescriptor(fs, rootdir, desc);
+    FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
     HRegion [] regions = {
       createRegion(desc, null, row_70001, 1, 70000, rootdir),
       createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=1196114&r1=1196113&r2=1196114&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java Tue Nov  1 16:44:38 2011
@@ -136,7 +136,7 @@ public class TestMergeTool extends HBase
     try {
       // Create root and meta regions
       createRootAndMetaRegions();
-      FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc);
+      FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc);
       /*
        * Create the regions we will merge
        */



Mime
View raw message