hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r699527 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/util/ src/java/org/apache/hadoop/hbase/util/migration/ src/java/org/apache/hadoop/hbase/util/migration/v5/ src/test/org/apache/hadoop/hbase/util/
Date Fri, 26 Sep 2008 22:55:47 GMT
Author: jimk
Date: Fri Sep 26 15:55:46 2008
New Revision: 699527

URL: http://svn.apache.org/viewvc?rev=699527&view=rev
Log:
HBASE-905   Remove V5 migration classes from 0.19.0 (Jean-Daniel Cryans via Jim Kellerman)

Added:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/package.html
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java
      - copied, changed from r699497, hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java
Removed:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java
Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=699527&r1=699526&r2=699527&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Sep 26 15:55:46 2008
@@ -3,6 +3,8 @@
   INCOMPATIBLE CHANGES
    HBASE-885   TableMap and TableReduce should be interfaces
                (Doğacan Güney via Stack)
+   HBASE-905   Remove V5 migration classes from 0.19.0 (Jean-Daniel Cryans via
+               Jim Kellerman)
 
   BUG FIXES
    HBASE-891   HRS.validateValuesLength throws IOE, gets caught in the retries

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=699527&r1=699526&r2=699527&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Fri Sep 26 15:55:46
2008
@@ -90,6 +90,12 @@
   // Filesystem version of hbase 0.1.x.
   private static final float HBASE_0_1_VERSION = 0.1f;
   
+  // Filesystem version we can migrate from
+  private static final int PREVIOUS_VERSION = 4;
+  
+  private static final String MIGRATION_LINK = 
+    " See http://wiki.apache.org/hadoop/Hbase/HowToMigrate for more information.";
+
   /** default constructor */
   public Migrate() {
     this(new HBaseConfiguration());
@@ -170,20 +176,28 @@
 
       // See if there is a file system version file
       String versionStr = FSUtils.getVersion(fs, FSUtils.getRootDir(this.conf));
-      if (versionStr != null &&
-          versionStr.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) {
+      if (versionStr == null) {
+        throw new IOException("File system version file " +
+            HConstants.VERSION_FILE_NAME +
+            " does not exist. No upgrade possible." + MIGRATION_LINK);
+      }
+      if (versionStr.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) {
         LOG.info("No upgrade necessary.");
         return 0;
       }
       float version = Float.parseFloat(versionStr);
-      if (version == HBASE_0_1_VERSION) {
-        checkForUnrecoveredLogFiles(getRootDirFiles());
-        migrateToV5();
-      } else {
-        throw new IOException("Unrecognized or non-migratable version: " +
-          version);
+      if (version == HBASE_0_1_VERSION ||
+          Integer.valueOf(versionStr) < PREVIOUS_VERSION) {
+        String msg = "Cannot upgrade from " + versionStr + " to " +
+        HConstants.FILE_SYSTEM_VERSION + " you must install hbase-0.2.x, run " +
+        "the upgrade tool, reinstall this version and run this utility again." +
+        MIGRATION_LINK;
+        System.out.println(msg);
+        throw new IOException(msg);
       }
 
+      // insert call to new migration method here.
+      
       if (!readOnly) {
         // Set file system version
         LOG.info("Setting file system version.");
@@ -199,93 +213,6 @@
     }
   }
   
-  private void migrateToV5() throws IOException {
-    rewriteMetaHRegionInfo();
-    addHistorianFamilyToMeta();
-    updateBloomFilters();
-  }
-  
-  /**
-   * Rewrite the meta tables so that HRI is versioned and so we move to new
-   * HCD and HCD.
-   * @throws IOException 
-   */
-  private void rewriteMetaHRegionInfo() throws IOException {
-    if (this.readOnly && this.migrationNeeded) {
-      return;
-    }
-    // Read using old classes.
-    final org.apache.hadoop.hbase.util.migration.v5.MetaUtils utils =
-      new org.apache.hadoop.hbase.util.migration.v5.MetaUtils(this.conf);
-    try {
-      // Scan the root region
-      utils.scanRootRegion(new org.apache.hadoop.hbase.util.migration.v5.MetaUtils.ScannerListener()
{
-        public boolean processRow(org.apache.hadoop.hbase.util.migration.v5.HRegionInfo info)
-        throws IOException {
-          // Scan every meta region
-          final org.apache.hadoop.hbase.util.migration.v5.HRegion metaRegion =
-            utils.getMetaRegion(info);
-          // If here, we were able to read with old classes.  If readOnly, then
-          // needs migration.
-          if (readOnly && !migrationNeeded) {
-            migrationNeeded = true;
-            return false;
-          }
-          updateHRegionInfo(utils.getRootRegion(), info);
-          utils.scanMetaRegion(info, new org.apache.hadoop.hbase.util.migration.v5.MetaUtils.ScannerListener()
{
-            public boolean processRow(org.apache.hadoop.hbase.util.migration.v5.HRegionInfo
hri)
-            throws IOException {
-              updateHRegionInfo(metaRegion, hri);
-              return true;
-            }
-          });
-          return true;
-        }
-      });
-    } finally {
-      utils.shutdown();
-    }
-  }
-  
-  /*
-   * Move from old pre-v5 hregioninfo to current HRegionInfo
-   * Persist back into <code>r</code>
-   * @param mr
-   * @param oldHri
-   */
-  void updateHRegionInfo(org.apache.hadoop.hbase.util.migration.v5.HRegion mr,
-    org.apache.hadoop.hbase.util.migration.v5.HRegionInfo oldHri)
-  throws IOException {
-    byte [] oldHriTableName = oldHri.getTableDesc().getName();
-    HTableDescriptor newHtd =
-      Bytes.equals(HConstants.ROOT_TABLE_NAME, oldHriTableName)?
-        HTableDescriptor.ROOT_TABLEDESC:
-        Bytes.equals(HConstants.META_TABLE_NAME, oldHriTableName)?
-          HTableDescriptor.META_TABLEDESC:
-          new HTableDescriptor(oldHri.getTableDesc().getName());
-    for (org.apache.hadoop.hbase.util.migration.v5.HColumnDescriptor oldHcd:
-        oldHri.getTableDesc().getFamilies()) {
-      HColumnDescriptor newHcd = new HColumnDescriptor(
-        HStoreKey.addDelimiter(oldHcd.getName()),
-        oldHcd.getMaxValueLength(),
-        HColumnDescriptor.CompressionType.valueOf(oldHcd.getCompressionType().toString()),
-        oldHcd.isInMemory(), oldHcd.isBlockCacheEnabled(),
-        oldHcd.getMaxValueLength(), oldHcd.getTimeToLive(),
-        oldHcd.isBloomFilterEnabled());
-      newHtd.addFamily(newHcd);
-    }
-    HRegionInfo newHri = new HRegionInfo(newHtd, oldHri.getStartKey(),
-      oldHri.getEndKey(), oldHri.isSplit(), oldHri.getRegionId());
-    BatchUpdate b = new BatchUpdate(newHri.getRegionName());
-    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(newHri));
-    mr.batchUpdate(b);
-    if (LOG.isDebugEnabled()) {
-        LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
-          " for " + oldHri.toString() + " in " + mr.toString() + " is: " +
-          newHri.toString());
-    }
-  }
-
   private FileStatus[] getRootDirFiles() throws IOException {
     FileStatus[] stats = fs.listStatus(FSUtils.getRootDir(this.conf));
     if (stats == null || stats.length == 0) {
@@ -316,90 +243,6 @@
     }
   }
 
-  private void addHistorianFamilyToMeta() throws IOException {
-    if (this.migrationNeeded) {
-      // Be careful. We cannot use MetAutils if current hbase in the
-      // Filesystem has not been migrated.
-      return;
-    }
-    boolean needed = false;
-    MetaUtils utils = new MetaUtils(this.conf);
-    try {
-      List<HRegionInfo> metas = utils.getMETARows(HConstants.META_TABLE_NAME);
-      for (HRegionInfo meta : metas) {
-        if (meta.getTableDesc().
-            getFamily(HConstants.COLUMN_FAMILY_HISTORIAN) == null) {
-          needed = true;
-          break;
-        }
-      }
-      if (needed && this.readOnly) {
-        this.migrationNeeded = true;
-      } else {
-        utils.addColumn(HConstants.META_TABLE_NAME,
-          new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
-            HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
-            false, false, Integer.MAX_VALUE, HConstants.FOREVER, false));
-        LOG.info("Historian family added to .META.");
-        // Flush out the meta edits.
-      }
-    } finally {
-      utils.shutdown();
-    }
-  }
-  
-  private void updateBloomFilters() throws IOException {
-    if (this.migrationNeeded && this.readOnly) {
-      return;
-    }
-    final Path rootDir = FSUtils.getRootDir(conf);
-    final MetaUtils utils = new MetaUtils(this.conf);
-    try {
-      // Scan the root region
-      utils.scanRootRegion(new MetaUtils.ScannerListener() {
-        public boolean processRow(HRegionInfo info) throws IOException {
-          // Scan every meta region
-          final HRegion metaRegion = utils.getMetaRegion(info);
-          utils.scanMetaRegion(info, new MetaUtils.ScannerListener() {
-            public boolean processRow(HRegionInfo hri) throws IOException {
-              HTableDescriptor desc = hri.getTableDesc();
-              Path tableDir =
-                HTableDescriptor.getTableDir(rootDir, desc.getName()); 
-              for (HColumnDescriptor column: desc.getFamilies()) {
-                if (column.isBloomfilter()) {
-                  // Column has a bloom filter
-                  migrationNeeded = true;
-                  Path filterDir = HStoreFile.getFilterDir(tableDir,
-                      hri.getEncodedName(), column.getName());
-                  if (fs.exists(filterDir)) {
-                    // Filter dir exists
-                    if (readOnly) {
-                      // And if we are only checking to see if a migration is
-                      // needed - it is. We're done.
-                      return false;
-                    }
-                    // Delete the filter
-                    fs.delete(filterDir, true);
-                    // Update the HRegionInfo in meta setting the bloomfilter
-                    // to be disabled.
-                    column.setBloomfilter(false);
-                    utils.updateMETARegionInfo(metaRegion, hri);
-                  }
-                }
-              }
-              return true;
-            }
-          });
-          // Stop scanning if only doing a check and we've determined that a
-          // migration is needed. Otherwise continue by returning true
-          return readOnly && migrationNeeded ? false : true;
-        }
-      });
-    } finally {
-      utils.shutdown();
-    }
-  }
-
   @SuppressWarnings("static-access")
   private int parseArgs(String[] args) {
     Options opts = new Options();

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/package.html
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/package.html?rev=699527&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/package.html (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/package.html Fri Sep
26 15:55:46 2008
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<head />
+<body bgcolor="white">
+Package of classes used instantiating objects written with an older
+version of HBase.
+
+Under the <code>hbase.rootdir</code>, a file named
+<code>hbase.version</code> holds the version number for the data
+persisted by HBase.  The version number is upped every time a change
+is made in HBase on-filesystem formats.  Version 0.2.0 and 0.18.0 of
+HBase shipped with an on-filesystem version of <code>4</code>.  This
+package holds classes from version 4 to version 5 used during the
+migration of an HBase instance up to version 5.  See <a
+href="http://wiki.apache.org/hadoop/Hbase/HowToMigrate">How To
+Migrate</a> for more on the migration of HBase across versions and for
+notes on design of the HBase migration system.  </body> </html>

Copied: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java (from
r699497, hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java)
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java?p2=hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java&p1=hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java&r1=699497&r2=699527&rev=699527&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMigrate.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java Fri Sep 26
15:55:46 2008
@@ -27,7 +27,6 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -44,10 +43,10 @@
 import org.apache.hadoop.hbase.io.RowResult;
 
 /**
- * Runs migration of filesystem from hbase 0.1 to 0.2.
+ * Runs migration of filesystem from hbase 0.x to 0.x
  */
-public class TestMigrate extends HBaseTestCase {
-  private static final Log LOG = LogFactory.getLog(TestMigrate.class);
+public class MigrationTest extends HBaseTestCase {
+  private static final Log LOG = LogFactory.getLog(MigrationTest.class);
   
   // This is the name of the table that is in the data file.
   private static final String TABLENAME = "TestUpgrade";
@@ -60,44 +59,10 @@
   private static final int EXPECTED_COUNT = 17576;
 
   /**
-   * Test migration
+   * Test migration. To be used in future migrations
    * @throws IOException 
    */
   public void testUpgrade() throws IOException {
-    MiniDFSCluster dfsCluster = null;
-    try {
-      dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
-      // Set the hbase.rootdir to be the home directory in mini dfs.
-      this.conf.set(HConstants.HBASE_DIR, new Path(
-        dfsCluster.getFileSystem().getHomeDirectory(), "hbase").toString());
-      FileSystem dfs = dfsCluster.getFileSystem();
-      Path rootDir =
-        dfs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
-      dfs.mkdirs(rootDir);
-      loadTestData(dfs, rootDir);
-      listPaths(dfs, rootDir, rootDir.toString().length() + 1);
-      
-      Migrate u = new Migrate(conf);
-      u.run(new String[] {"check"});
-      listPaths(dfs, rootDir, rootDir.toString().length() + 1);
-      
-      u = new Migrate(conf);
-      u.run(new String[] {"upgrade"});
-      listPaths(dfs, rootDir, rootDir.toString().length() + 1);
-      
-      // Try again. No upgrade should be necessary
-      u = new Migrate(conf);
-      u.run(new String[] {"check"});
-      u = new Migrate(conf);
-      u.run(new String[] {"upgrade"});
-      
-      // Now verify that can read contents.
-      verify();
-    } finally {
-      if (dfsCluster != null) {
-        shutdownDfs(dfsCluster);
-      }
-    }
   }
   
   /*



Mime
View raw message