hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1138120 [1/2] - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/master/handler/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop...
Date Tue, 21 Jun 2011 18:31:27 GMT
Author: stack
Date: Tue Jun 21 18:31:26 2011
New Revision: 1138120

URL: http://svn.apache.org/viewvc?rev=1138120&view=rev
Log:
HBASE-451 Remove HTableDescriptor from HRegionInfo -- part 2, some cleanup

Added:
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
Modified:
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Merge.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java?rev=1138120&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java Tue Jun 21 18:31:26 2011
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Get, remove and modify table descriptors.
+ * Used by servers to host descriptors.
+ */
+public interface TableDescriptors {
+  /**
+   * @param tablename
+   * @return HTableDescriptor for tablename
+   * @throws TableExistsException
+   * @throws FileNotFoundException
+   * @throws IOException
+   */
+  public HTableDescriptor get(final String tablename)
+  throws TableExistsException, FileNotFoundException, IOException;
+
+  /**
+   * @param tablename
+   * @return HTableDescriptor for tablename
+   * @throws TableExistsException
+   * @throws FileNotFoundException
+   * @throws IOException
+   */
+  public HTableDescriptor get(final byte[] tablename)
+  throws TableExistsException, FileNotFoundException, IOException;
+
+  /**
+   * Get Map of all HTableDescriptors. Populates the descriptor cache as a
+   * side effect.
+   * @param fs
+   * @param rootdir
+   * @return Map of all descriptors.
+   * @throws IOException
+   */
+  public Map<String, HTableDescriptor> getAll()
+  throws IOException;
+
+  /**
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @throws IOException
+   */
+  public void add(final HTableDescriptor htd)
+  throws IOException;
+
+  /**
+   * @param tablename
+   * @return Instance of table descriptor or null if none found.
+   * @throws IOException
+   */
+  public HTableDescriptor remove(final String tablename)
+  throws IOException;
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Tue Jun 21 18:31:26 2011
@@ -25,7 +25,6 @@ import java.io.IOException;
 import java.lang.Thread.UncaughtExceptionHandler;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -46,7 +45,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
@@ -64,7 +62,6 @@ import org.apache.hadoop.hbase.master.ha
 import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
 import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
@@ -141,10 +138,6 @@ public class AssignmentManager extends Z
 
   private final ExecutorService executorService;
 
-  private Map<String, HTableDescriptor> tableDescMap =
-      new HashMap<String, HTableDescriptor>();
-
-
   /**
    * Constructs a new assignment manager.
    *
@@ -153,10 +146,11 @@ public class AssignmentManager extends Z
    * @param catalogTracker
    * @param service
    * @throws KeeperException
+   * @throws IOException 
    */
   public AssignmentManager(Server master, ServerManager serverManager,
       CatalogTracker catalogTracker, final ExecutorService service)
-  throws KeeperException {
+  throws KeeperException, IOException {
     super(master.getZooKeeper());
     this.master = master;
     this.serverManager = serverManager;
@@ -172,7 +166,6 @@ public class AssignmentManager extends Z
     this.zkTable = new ZKTable(this.master.getZooKeeper());
     this.maximumAssignmentAttempts =
       this.master.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10);
-    initHTableDescriptorMap();
   }
 
   /**
@@ -1070,10 +1063,6 @@ public class AssignmentManager extends Z
     }
     // Move on to open regions.
     try {
-      // Update the tableDesc map.
-      for (HRegionInfo region : regions) {
-        updateDescMap(region.getTableNameAsString());
-      }
       // Send OPEN RPC. This can fail if the server on other end is is not up.
       // If we fail, fail the startup by aborting the server.  There is one
       // exception we will tolerate: ServerNotRunningException.  This is thrown
@@ -2257,140 +2246,6 @@ public class AssignmentManager extends Z
     LOG.info("Bulk assigning done");
   }
 
-
-  private void initHTableDescriptorMap() {
-    try {
-      synchronized (this.tableDescMap) {
-        this.tableDescMap =
-            FSUtils.getTableDescriptors(this.master.getConfiguration());
-      }
-    } catch (IOException e) {
-      LOG.info("IOException while initializing HTableDescriptor Map");
-    }
-  }
-
-  private HTableDescriptor readTableDescriptor(String tableName)
-      throws IOException {
-    return FSUtils.getHTableDescriptor(
-        this.master.getConfiguration(), tableName);
-  }
-
-  private boolean isRootOrMetaRegion(String tableName) {
-    return (
-        tableName.equals(
-            HRegionInfo.ROOT_REGIONINFO.getTableNameAsString())
-        ||
-        tableName.equals(
-            HRegionInfo.FIRST_META_REGIONINFO.getTableNameAsString()));
-  }
-
-  private void updateDescMap(String tableName) throws IOException {
-
-    if (this.tableDescMap == null) {
-      LOG.error("Table Descriptor cache is null. " +
-          "Skipping desc map update for table = " + tableName);
-      return;
-    }
-
-    if (tableName == null || isRootOrMetaRegion(tableName))
-      return;
-    if (!this.tableDescMap.containsKey(tableName)) {
-      HTableDescriptor htd = readTableDescriptor(tableName);
-      if (htd != null) {
-        LOG.info("Updating TableDesc Map for tablename = " + tableName
-        + "htd == " + htd);
-        synchronized (this.tableDescMap) {
-        this.tableDescMap.put(tableName, htd);
-        }
-      } else {
-        LOG.info("HTable Descriptor is NULL for table = " + tableName);
-      }
-    }
-  }
-
-  public void updateTableDesc(String tableName, HTableDescriptor htd) {
-    if (this.tableDescMap == null) {
-      LOG.error("Table Descriptor cache is null. " +
-          "Skipping desc map update for table = " + tableName);
-      return;
-    }
-    if (tableName == null || isRootOrMetaRegion(tableName))
-      return;
-    if (!this.tableDescMap.containsKey(tableName)) {
-      LOG.error("Table descriptor missing in DescMap. for tablename = " + tableName);
-    }
-    synchronized (this.tableDescMap) {
-      this.tableDescMap.put(tableName, htd);
-    }
-    LOG.info("TableDesc updated successfully for table = " + tableName);
-  }
-
-  public void deleteTableDesc(String tableName) {
-    if (this.tableDescMap == null) {
-      LOG.error("Table Descriptor cache is null. " +
-          "Skipping desc map update for table = " + tableName);
-      return;
-    }
-    if (tableName == null || isRootOrMetaRegion(tableName))
-      return;
-    if (!this.tableDescMap.containsKey(tableName)) {
-      LOG.error("Table descriptor missing in DescMap. for tablename = " + tableName);
-    }
-    synchronized (this.tableDescMap) {
-      this.tableDescMap.remove(tableName);
-    }
-    LOG.info("TableDesc removed successfully for table = " + tableName);
-  }
-
-  public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
-    List htdList = null;
-    HTableDescriptor[] htd = null;
-    if (tableNames != null && tableNames.size() > 0) {
-      if (this.tableDescMap != null) {
-        htd = new HTableDescriptor[tableNames.size()];
-        htdList = new ArrayList();
-        synchronized (this.tableDescMap) {
-          int index = 0;
-          for (String tableName : tableNames) {
-            HTableDescriptor htdesc = this.tableDescMap.get(tableName);
-            htd[index++] = this.tableDescMap.get(tableName);
-            if (htdesc != null) {
-              htdList.add(htdesc);
-            }
-
-          }
-        }
-      }
-    }
-    if (htdList != null && htdList.size() > 0 ) {
-      return (HTableDescriptor[]) htdList.toArray(new HTableDescriptor[htdList.size()]);
-    }
-    return null;
-  }
-
-  public HTableDescriptor[] getHTableDescriptors() {
-    if (this.tableDescMap != null) {
-      synchronized (this.tableDescMap) {
-        Collection<HTableDescriptor> htdc = this.tableDescMap.values();
-        if (htdc != null) {
-          return htdc.toArray(new HTableDescriptor[htdc.size()]);
-        }
-      }
-    }
-    return null;
-  }
-
-  public HTableDescriptor getTableDescriptor(String tableName) {
-    HTableDescriptor htd = null;
-    if (tableName != null) {
-      synchronized (this.tableDescMap) {
-        htd = this.tableDescMap.get(tableName);
-      }
-    }
-    return htd;
-  }
-
-
   /**
    * State of a Region while undergoing transitions.
    */

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Tue Jun 21 18:31:26 2011
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Map;
 import java.util.TreeMap;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Result;
@@ -283,9 +285,8 @@ class CatalogJanitor extends Chore {
     return result;
   }
 
-  private HTableDescriptor getTableDescriptor(byte[] tableName) {
-    return this.services.getAssignmentManager().getTableDescriptor(
-        Bytes.toString(tableName));
+  private HTableDescriptor getTableDescriptor(byte[] tableName)
+  throws TableExistsException, FileNotFoundException, IOException {
+    return this.services.getTableDescriptors().get(Bytes.toString(tableName));
   }
-
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Tue Jun 21 18:31:26 2011
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.MasterNot
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
@@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.InfoServer;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Sleeper;
@@ -178,6 +180,8 @@ implements HMasterInterface, HMasterRegi
   private MasterCoprocessorHost cpHost;
   private final ServerName serverName;
 
+  private TableDescriptors tableDescriptors;
+
   /**
    * Initializes the HMaster. The steps are as follows:
    * <p>
@@ -410,7 +414,11 @@ implements HMasterInterface, HMasterRegi
 
     status.setStatus("Initializing Master file system");
     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
-    this.fileSystemManager = new MasterFileSystem(this, metrics);
+    this.fileSystemManager = new MasterFileSystem(this, this, metrics);
+
+    this.tableDescriptors =
+      new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
+      this.fileSystemManager.getRootDir());
 
     // publish cluster ID
     status.setStatus("Publishing Cluster ID in ZooKeeper");
@@ -579,6 +587,11 @@ implements HMasterInterface, HMasterRegi
     return -1;
   }
 
+  @Override
+  public TableDescriptors getTableDescriptors() {
+    return this.tableDescriptors;
+  }
+
   /** @return InfoServer object. Maybe null.*/
   public InfoServer getInfoServer() {
     return this.infoServer;
@@ -948,20 +961,19 @@ implements HMasterInterface, HMasterRegi
   return hRegionInfos;
 }
 
-  private void storeTableDescriptor(HTableDescriptor hTableDescriptor)
-      throws IOException {
-    FSUtils.createTableDescriptor(hTableDescriptor, conf);
-  }
-
   private synchronized void createTable(final HTableDescriptor hTableDescriptor,
                                         final HRegionInfo [] newRegions,
       final boolean sync)
   throws IOException {
     String tableName = newRegions[0].getTableNameAsString();
-    if(MetaReader.tableExists(catalogTracker, tableName)) {
+    if (MetaReader.tableExists(catalogTracker, tableName)) {
       throw new TableExistsException(tableName);
     }
-    storeTableDescriptor(hTableDescriptor);
+    // TODO: Currently we make the table descriptor and as side-effect the
+    // tableDir is created.  Should we change below method to be createTable
+    // where we create table in tmp dir with its table descriptor file and then
+    // do rename to move it into place?
+    FSUtils.createTableDescriptor(hTableDescriptor, conf);
 
     for (HRegionInfo newRegion : newRegions) {
       // 1. Set table enabling flag up in zk.
@@ -1373,37 +1385,40 @@ implements HMasterInterface, HMasterRegi
   }
 
   /**
-   * Get HTD array for given tables
+   * Get HTD array for given tables 
    * @param tableNames
    * @return HTableDescriptor[]
    */
   public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
-    return this.assignmentManager.getHTableDescriptors(tableNames);
+    List<HTableDescriptor> list =
+      new ArrayList<HTableDescriptor>(tableNames.size());
+    for (String s: tableNames) {
+      HTableDescriptor htd = null;
+      try {
+        htd = this.tableDescriptors.get(s);
+      } catch (IOException e) {
+        LOG.warn("Failed getting descriptor for " + s, e);
+      }
+      if (htd == null) continue;
+      list.add(htd);
+    }
+    return list.toArray(new HTableDescriptor [] {});
   }
 
   /**
    * Get all table descriptors
-   * @return HTableDescriptor[]
-   */
-  public HTableDescriptor[] getHTableDescriptors() {
-    return this.assignmentManager.getHTableDescriptors();
-  }
-
-  /**
-   * Get a HTD for a given table name
-   * @param tableName
-   * @return HTableDescriptor
+   * @return All descriptors or null if none.
    */
-/*
-  public HTableDescriptor getHTableDescriptor(byte[] tableName) {
-    if (tableName != null && tableName.length > 0) {
-      return this.assignmentManager.getTableDescriptor(
-          Bytes.toString(tableName));
+  public HTableDescriptor [] getHTableDescriptors() {
+    Map<String, HTableDescriptor> descriptors = null;
+    try {
+      descriptors = this.tableDescriptors.getAll();
+    } catch (IOException e) {
+      LOG.warn("Failed getting all descriptors", e);
     }
-    return null;
+    return descriptors == null?
+      null: descriptors.values().toArray(new HTableDescriptor [] {});
   }
-*/
-
 
   /**
    * Compute the average load across all region servers.

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Tue Jun 21 18:31:26 2011
@@ -33,19 +33,16 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
 import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
@@ -80,11 +77,14 @@ public class MasterFileSystem {
   final Lock splitLogLock = new ReentrantLock();
   final boolean distributedLogSplitting;
   final SplitLogManager splitLogManager;
+  private final MasterServices services;
 
-  public MasterFileSystem(Server master, MasterMetrics metrics)
+  public MasterFileSystem(Server master, MasterServices services,
+      MasterMetrics metrics)
   throws IOException {
     this.conf = master.getConfiguration();
     this.master = master;
+    this.services = services;
     this.metrics = metrics;
     // Set filesystem to be that of this.rootdir else we get complaints about
     // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
@@ -411,28 +411,6 @@ public class MasterFileSystem {
   }
 
     /**
-   * Get table info path for a table.
-   * @param tableName
-   * @return Table info path
-   */
-  private Path getTablePath(byte[] tableName) {
-    return new Path(this.rootdir, Bytes.toString(tableName));
-  }
-  /**
-   * Get a HTableDescriptor of a table.
-   * @param tableName
-   * @return HTableDescriptor
-   */
-  public HTableDescriptor getTableDescriptor(byte[] tableName) {
-    try {
-      return FSUtils.getTableDescriptor(fs, this.rootdir, tableName);
-    } catch (IOException ioe) {
-      LOG.info("Exception during readTableDecriptor ", ioe);
-    }
-    return null;
-  }
-
-    /**
    * Create new HTableDescriptor in HDFS.
    * @param htableDescriptor
    */
@@ -441,19 +419,6 @@ public class MasterFileSystem {
   }
 
   /**
-   * Update a table descriptor.
-   * @param htableDescriptor
-   * @return updated HTableDescriptor
-   * @throws IOException
-   */
-  public HTableDescriptor updateTableDescriptor(HTableDescriptor htableDescriptor)
-      throws IOException {
-    LOG.info("Update Table Descriptor.  Current HTD = " + htableDescriptor);
-    FSUtils.updateHTableDescriptor(fs, conf, htableDescriptor);
-    return htableDescriptor;
-  }
-
-  /**
    * Delete column of a table
    * @param tableName
    * @param familyName
@@ -464,9 +429,9 @@ public class MasterFileSystem {
       throws IOException {
     LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName)
         + " family = " + Bytes.toString(familyName));
-    HTableDescriptor htd = getTableDescriptor(tableName);
+    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
     htd.removeFamily(familyName);
-    updateTableDescriptor(htd);
+    this.services.getTableDescriptors().add(htd);
     return htd;
   }
 
@@ -482,14 +447,14 @@ public class MasterFileSystem {
     LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName)
         + " HCD = " + hcd.toString());
 
-    HTableDescriptor htd = getTableDescriptor(tableName);
+    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
     byte [] familyName = hcd.getName();
     if(!htd.hasFamily(familyName)) {
       throw new InvalidFamilyOperationException("Family '" +
         Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
     }
     htd.addFamily(hcd);
-    updateTableDescriptor(htd);
+    this.services.getTableDescriptors().add(htd);
     return htd;
   }
 
@@ -502,17 +467,15 @@ public class MasterFileSystem {
    */
   public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd)
       throws IOException {
-    LOG.info("AddColumn. Table = " + Bytes.toString(tableName)
-        + " HCD = " + hcd.toString());
-
-    HTableDescriptor htd = getTableDescriptor(tableName);
-    if(htd == null) {
+    LOG.info("AddColumn. Table = " + Bytes.toString(tableName) + " HCD = " +
+      hcd.toString());
+    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
+    if (htd == null) {
       throw new InvalidFamilyOperationException("Family '" +
         hcd.getNameAsString() + "' cannot be modified as HTD is null");
     }
     htd.addFamily(hcd);
-    updateTableDescriptor(htd);
+    this.services.getTableDescriptors().add(htd);
     return htd;
   }
-
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Tue Jun 21 18:31:26 2011
@@ -22,11 +22,10 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
 /**
  * Services Master supplies
@@ -60,4 +59,8 @@ public interface MasterServices extends 
    */
   public void checkTableModifiable(final byte [] tableName) throws IOException;
 
+  /**
+   * @return Return table descriptors implementation.
+   */
+  public TableDescriptors getTableDescriptors();
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Tue Jun 21 18:31:26 2011
@@ -231,7 +231,7 @@ public class SplitLogManager extends Zoo
     for (FileStatus lf : logfiles) {
       // TODO If the log file is still being written to - which is most likely
       // the case for the last log file - then its length will show up here
-      // as zero. The size of such a file can only be retrieved after after
+      // as zero. The size of such a file can only be retrieved after
       // recover-lease is done. totalSize will be under in most cases and the
       // metrics that it drives will also be under-reported.
       totalSize += lf.getLen();

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java Tue Jun 21 18:31:26 2011
@@ -71,7 +71,7 @@ public class DeleteTableHandler extends 
     // Delete table from FS
     this.masterServices.getMasterFileSystem().deleteTable(tableName);
     // Update table descriptor cache
-    am.deleteTableDesc(Bytes.toString(tableName));
+    this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
 
     // If entry for this table in zk, and up in AssignmentManager, remove it.
     // Call to undisableTable does this. TODO: Make a more formal purge table.

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java Tue Jun 21 18:31:26 2011
@@ -25,10 +25,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.util.Bytes;
 
 public class ModifyTableHandler extends TableEventHandler {
   private final HTableDescriptor htd;
@@ -43,20 +40,10 @@ public class ModifyTableHandler extends 
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris)
   throws IOException {
-    AssignmentManager am = this.masterServices.getAssignmentManager();
-    HTableDescriptor htd = am.getTableDescriptor(Bytes.toString(tableName));
-    if (htd == null) {
-      throw new IOException("Modify Table operation could not be completed as " +
-          "HTableDescritor is missing for table = "
-          + Bytes.toString(tableName));
-    }
-    // Update table descriptor in HDFS
-
-    HTableDescriptor updatedHTD = this.masterServices.getMasterFileSystem()
-        .updateTableDescriptor(this.htd);
-    // Update in-memory descriptor cache
-    am.updateTableDesc(Bytes.toString(tableName), updatedHTD);
+    // Update descriptor
+    this.masterServices.getTableDescriptors().add(this.htd);
   }
+
   @Override
   public String toString() {
     String name = "UnknownServerName";

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java Tue Jun 21 18:31:26 2011
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -49,7 +48,7 @@ public class TableAddFamilyHandler exten
   protected void handleTableOperation(List<HRegionInfo> hris)
   throws IOException {
     AssignmentManager am = this.masterServices.getAssignmentManager();
-    HTableDescriptor htd = am.getTableDescriptor(Bytes.toString(tableName));
+    HTableDescriptor htd = this.masterServices.getTableDescriptors().get(Bytes.toString(tableName));
     byte [] familyName = familyDesc.getName();
     if (htd == null) {
       throw new IOException("Add Family operation could not be completed as " +
@@ -65,7 +64,7 @@ public class TableAddFamilyHandler exten
     htd = this.masterServices.getMasterFileSystem()
         .addColumn(tableName, familyDesc);
     // Update in-memory descriptor cache
-    am.updateTableDesc(Bytes.toString(tableName), htd);
+    this.masterServices.getTableDescriptors().add(htd);
   }
   @Override
   public String toString() {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java Tue Jun 21 18:31:26 2011
@@ -26,9 +26,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -48,7 +46,7 @@ public class TableDeleteFamilyHandler ex
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
     AssignmentManager am = this.masterServices.getAssignmentManager();
-    HTableDescriptor htd = am.getTableDescriptor(Bytes.toString(tableName));
+    HTableDescriptor htd = this.masterServices.getTableDescriptors().get(Bytes.toString(tableName));
     if (htd == null) {
       throw new IOException("Add Family operation could not be completed as " +
           "HTableDescritor is missing for table = "
@@ -63,7 +61,7 @@ public class TableDeleteFamilyHandler ex
     htd = this.masterServices.getMasterFileSystem()
         .deleteColumn(tableName, familyName);
     // Update in-memory descriptor cache
-    am.updateTableDesc(Bytes.toString(tableName), htd);
+    this.masterServices.getTableDescriptors().add(htd);
   }
 
   @Override

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java Tue Jun 21 18:31:26 2011
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -49,7 +48,7 @@ public class TableModifyFamilyHandler ex
   @Override
   protected void handleTableOperation(List<HRegionInfo> regions) throws IOException {
     AssignmentManager am = this.masterServices.getAssignmentManager();
-    HTableDescriptor htd = am.getTableDescriptor(Bytes.toString(tableName));
+    HTableDescriptor htd = this.masterServices.getTableDescriptors().get(Bytes.toString(tableName));
     byte [] familyName = familyDesc.getName();
     if (htd == null) {
       throw new IOException("Modify Family operation could not be completed as " +
@@ -61,10 +60,9 @@ public class TableModifyFamilyHandler ex
         Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
     }
     // Update table descriptor in HDFS
-    htd = this.masterServices.getMasterFileSystem()
-        .modifyColumn(tableName, familyDesc);
+    htd = this.masterServices.getMasterFileSystem().modifyColumn(tableName, familyDesc);
     // Update in-memory descriptor cache
-    am.updateTableDesc(Bytes.toString(tableName), htd);
+    this.masterServices.getTableDescriptors().add(htd);
   }
   @Override
   public String toString() {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Jun 21 18:31:26 2011
@@ -199,8 +199,6 @@ public class HRegion implements HeapSize
   final Path regiondir;
   KeyValue.KVComparator comparator;
 
-  private Pair<Long,Long> lastCompactInfo = null;
-
   /*
    * Data structure of write state flags used coordinating flushes,
    * compactions and closes.
@@ -282,6 +280,7 @@ public class HRegion implements HeapSize
     this.log = null;
     this.regiondir = null;
     this.regionInfo = null;
+    this.htableDescriptor = null;
     this.threadWakeFrequency = 0L;
     this.coprocessorHost = null;
   }
@@ -310,26 +309,22 @@ public class HRegion implements HeapSize
    * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
    */
   public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
-      HRegionInfo regionInfo, RegionServerServices rsServices) {
+      HRegionInfo regionInfo, final HTableDescriptor htd,
+      RegionServerServices rsServices) {
     this.tableDir = tableDir;
     this.comparator = regionInfo.getComparator();
     this.log = log;
     this.fs = fs;
     this.conf = conf;
     this.regionInfo = regionInfo;
+    this.htableDescriptor = htd;
     this.rsServices = rsServices;
     this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY,
         10 * 1000);
     String encodedNameStr = this.regionInfo.getEncodedName();
+    setHTableSpecificConf();
     this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
-    try {
-      LOG.info("Setting table desc from HDFS. Region = "
-        + this.regionInfo.getTableNameAsString());
-      loadHTableDescriptor(tableDir);
-      LOG.info(" This HTD from HDFS  == " + this.htableDescriptor);
-    } catch (IOException ioe) {
-      LOG.error("Could not instantiate region as error loading HTableDescriptor");
-    }
+
     // don't initialize coprocessors if not running within a regionserver
     // TODO: revisit if coprocessors should load in other cases
     if (rsServices != null) {
@@ -341,38 +336,17 @@ public class HRegion implements HeapSize
     }
   }
 
-  private void loadHTableDescriptor(Path tableDir) throws IOException {
-    LOG.debug("Assigning tabledesc from .tableinfo for region = "
-        + this.regionInfo.getRegionNameAsString());
-    // load HTableDescriptor
-    this.htableDescriptor = FSUtils.getTableDescriptor(tableDir, fs);
-
-    if (this.htableDescriptor != null) {
-      setHTableSpecificConf();
-    } else {
-      throw new IOException("Table description missing in " +
-          ".tableinfo. Cannot create new region."
-          + " current region is == " + this.regionInfo.toString());
-    }
-
-  }
-
-  private void setHTableSpecificConf() {
-    if (this.htableDescriptor != null) {
-      LOG.info("Setting up tabledescriptor config now ...");
-      long flushSize = this.htableDescriptor.getMemStoreFlushSize();
-      if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
-        flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
-            HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
-      }
-      this.memstoreFlushSize = flushSize;
-      this.blockingMemStoreSize = this.memstoreFlushSize *
-          conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
-    }
-  }
-
-  public void setHtableDescriptor(HTableDescriptor htableDescriptor) {
-    this.htableDescriptor = htableDescriptor;
+  void setHTableSpecificConf() {
+    if (this.htableDescriptor == null) return;
+    LOG.info("Setting up tabledescriptor config now ...");
+    long flushSize = this.htableDescriptor.getMemStoreFlushSize();
+    if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
+      flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
+         HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+    }
+    this.memstoreFlushSize = flushSize;
+    this.blockingMemStoreSize = this.memstoreFlushSize *
+        conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
   }
 
   /**
@@ -2763,11 +2737,12 @@ public class HRegion implements HeapSize
    * @param conf is global configuration settings.
    * @param regionInfo - HRegionInfo that describes the region
    * is new), then read them from the supplied path.
+   * @param htd
    * @param rsServices
    * @return the new instance
    */
   public static HRegion newHRegion(Path tableDir, HLog log, FileSystem fs,
-      Configuration conf, HRegionInfo regionInfo,
+      Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd,
       RegionServerServices rsServices) {
     try {
       @SuppressWarnings("unchecked")
@@ -2776,9 +2751,10 @@ public class HRegion implements HeapSize
 
       Constructor<? extends HRegion> c =
           regionClass.getConstructor(Path.class, HLog.class, FileSystem.class,
-              Configuration.class, HRegionInfo.class, RegionServerServices.class);
+              Configuration.class, HRegionInfo.class, HTableDescriptor.class,
+              RegionServerServices.class);
 
-      return c.newInstance(tableDir, log, fs, conf, regionInfo, rsServices);
+      return c.newInstance(tableDir, log, fs, conf, regionInfo, htd, rsServices);
     } catch (Throwable e) {
       // todo: what should I throw here?
       throw new IllegalStateException("Could not instantiate a region instance.", e);
@@ -2800,9 +2776,8 @@ public class HRegion implements HeapSize
    * @throws IOException
    */
   public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
-                                      final Configuration conf,
-                                      final HTableDescriptor hTableDescriptor)
-      throws IOException {
+      final Configuration conf, final HTableDescriptor hTableDescriptor)
+  throws IOException {
     LOG.info("creating HRegion " + info.getTableNameAsString()
     + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
     " Table name == " + info.getTableNameAsString());
@@ -2812,11 +2787,10 @@ public class HRegion implements HeapSize
     Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
     FileSystem fs = FileSystem.get(conf);
     fs.mkdirs(regionDir);
-    FSUtils.createTableDescriptor(fs, hTableDescriptor, tableDir);
     HRegion region = HRegion.newHRegion(tableDir,
         new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
             new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
-        fs, conf, info, null);
+        fs, conf, info, hTableDescriptor, null);
     region.initialize();
     return region;
   }
@@ -2833,10 +2807,11 @@ public class HRegion implements HeapSize
    *
    * @throws IOException
    */
-  public static HRegion openHRegion(final HRegionInfo info, final HLog wal,
+  public static HRegion openHRegion(final HRegionInfo info,
+      final HTableDescriptor htd, final HLog wal,
       final Configuration conf)
   throws IOException {
-    return openHRegion(info, wal, conf, null, null);
+    return openHRegion(info, htd, wal, conf, null, null);
   }
 
   /**
@@ -2853,8 +2828,9 @@ public class HRegion implements HeapSize
    *
    * @throws IOException
    */
-  public static HRegion openHRegion(final HRegionInfo info, final HLog wal,
-    final Configuration conf, final RegionServerServices rsServices,
+  public static HRegion openHRegion(final HRegionInfo info,
+    final HTableDescriptor htd, final HLog wal, final Configuration conf,
+    final RegionServerServices rsServices,
     final CancelableProgressable reporter)
   throws IOException {
     if (LOG.isDebugEnabled()) {
@@ -2866,14 +2842,14 @@ public class HRegion implements HeapSize
     Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
       info.getTableName());
     HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
-      rsServices);
+      htd, rsServices);
     return r.openHRegion(reporter);
   }
 
   public static HRegion openHRegion(Path tableDir, final HRegionInfo info,
-                                    final HLog wal, final Configuration conf)
-      throws IOException {
-    return openHRegion(tableDir, info, wal, conf, null, null);
+      final HTableDescriptor htd, final HLog wal, final Configuration conf)
+  throws IOException {
+    return openHRegion(tableDir, info, htd, wal, conf, null, null);
   }
 
   /**
@@ -2891,21 +2867,19 @@ public class HRegion implements HeapSize
    * @throws IOException
    */
   public static HRegion openHRegion(final Path tableDir, final HRegionInfo info,
-                                    final HLog wal, final Configuration conf,
-                                    final RegionServerServices rsServices,
-                                    final CancelableProgressable reporter)
-      throws IOException {
+      final HTableDescriptor htd, final HLog wal, final Configuration conf,
+      final RegionServerServices rsServices,
+      final CancelableProgressable reporter)
+  throws IOException {
+    if (info == null) throw new NullPointerException("Passed region info is null");
     LOG.info("HRegion.openHRegion Region name ==" + info.getRegionNameAsString());
     if (LOG.isDebugEnabled()) {
       LOG.debug("Opening region: " + info);
     }
-    if (info == null) {
-      throw new NullPointerException("Passed region info is null");
-    }
     Path dir = HTableDescriptor.getTableDir(tableDir,
         info.getTableName());
     HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
-        rsServices);
+        htd, rsServices);
     return r.openHRegion(reporter);
   }
 
@@ -3077,7 +3051,8 @@ public class HRegion implements HeapSize
    * @return new merged region
    * @throws IOException
    */
-  public static HRegion merge(HRegion a, HRegion b) throws IOException {
+  public static HRegion merge(HRegion a, HRegion b)
+  throws IOException {
     if (!a.getRegionInfo().getTableNameAsString().equals(
         b.getRegionInfo().getTableNameAsString())) {
       throw new IOException("Regions do not belong to the same table");
@@ -3179,7 +3154,8 @@ public class HRegion implements HeapSize
       LOG.debug("Files for new region");
       listPaths(fs, newRegionDir);
     }
-    HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, newRegionInfo, null);
+    HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
+        newRegionInfo, a.getTableDesc(), null);
     dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
     dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
     dstRegion.initialize();
@@ -3592,7 +3568,7 @@ public class HRegion implements HeapSize
 
   public static final long FIXED_OVERHEAD = ClassSize.align(
       (4 * Bytes.SIZEOF_LONG) + ClassSize.ARRAY +
-      ClassSize.align(28 * ClassSize.REFERENCE) + ClassSize.OBJECT +
+      ClassSize.align(27 * ClassSize.REFERENCE) + ClassSize.OBJECT +
       ClassSize.align(Bytes.SIZEOF_INT));
 
   public static final long DEEP_OVERHEAD = FIXED_OVERHEAD +
@@ -3745,10 +3721,11 @@ public class HRegion implements HeapSize
     String metaStr = Bytes.toString(HConstants.META_TABLE_NAME);
     // Currently expects tables have one region only.
     if (p.getName().startsWith(rootStr)) {
-      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
+      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO,
+        HTableDescriptor.ROOT_TABLEDESC, null);
     } else if (p.getName().startsWith(metaStr)) {
-      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
-          null);
+      region = HRegion.newHRegion(p, log, fs, c,
+        HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null);
     } else {
       throw new IOException("Not a known catalog table: " + p.toString());
     }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Tue Jun 21 18:31:26 2011
@@ -63,12 +63,14 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterAddressTracker;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.UnknownRowLockException;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.YouAreDeadException;
@@ -120,6 +122,7 @@ import org.apache.hadoop.hbase.security.
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.InfoServer;
 import org.apache.hadoop.hbase.util.Pair;
@@ -294,6 +297,11 @@ public class HRegionServer implements HR
   private final long startcode;
 
   /**
+   * Go here to get table descriptors.
+   */
+  private TableDescriptors tableDescriptors;
+
+  /**
    * Starts a HRegionServer at the default location
    *
    * @param conf
@@ -863,6 +871,7 @@ public class HRegionServer implements HR
       // Get fs instance used by this RS
       this.fs = FileSystem.get(this.conf);
       this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
+      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
       this.hlog = setupWALAndReplication();
       // Init in here rather than in constructor after thread name has been set
       this.metrics = new RegionServerMetrics();
@@ -2268,12 +2277,13 @@ public class HRegionServer implements HR
     LOG.info("Received request to open region: " +
       region.getRegionNameAsString());
     if (this.stopped) throw new RegionServerStoppedException();
+    HTableDescriptor htd = this.tableDescriptors.get(region.getTableName());
     if (region.isRootRegion()) {
-      this.service.submit(new OpenRootHandler(this, this, region));
+      this.service.submit(new OpenRootHandler(this, this, region, htd));
     } else if(region.isMetaRegion()) {
-      this.service.submit(new OpenMetaHandler(this, this, region));
+      this.service.submit(new OpenMetaHandler(this, this, region, htd));
     } else {
-      this.service.submit(new OpenRegionHandler(this, this, region));
+      this.service.submit(new OpenRegionHandler(this, this, region, htd));
     }
   }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Tue Jun 21 18:31:26 2011
@@ -567,7 +567,7 @@ public class SplitTransaction {
       this.splitdir, hri);
     HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
       this.parent.getLog(), fs, this.parent.getConf(),
-      hri, rsServices);
+      hri, this.parent.getTableDesc(), rsServices);
     r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2);
     HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir());

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java Tue Jun 21 18:31:26 2011
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.regionserver.handler;
 
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
@@ -30,7 +31,8 @@ import org.apache.hadoop.hbase.regionser
  */
 public class OpenMetaHandler extends OpenRegionHandler {
   public OpenMetaHandler(final Server server,
-      final RegionServerServices rsServices, HRegionInfo regionInfo) {
-    super(server,rsServices,  regionInfo, EventType.M_RS_OPEN_META);
+      final RegionServerServices rsServices, HRegionInfo regionInfo,
+      final HTableDescriptor htd) {
+    super(server,rsServices, regionInfo, htd, EventType.M_RS_OPEN_META);
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java Tue Jun 21 18:31:26 2011
@@ -26,6 +26,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -45,6 +46,7 @@ public class OpenRegionHandler extends E
   private final RegionServerServices rsServices;
 
   private final HRegionInfo regionInfo;
+  private final HTableDescriptor htd;
 
   // We get version of our znode at start of open process and monitor it across
   // the total open. We'll fail the open if someone hijacks our znode; we can
@@ -52,16 +54,18 @@ public class OpenRegionHandler extends E
   private volatile int version = -1;
 
   public OpenRegionHandler(final Server server,
-      final RegionServerServices rsServices, HRegionInfo regionInfo) {
-    this(server, rsServices, regionInfo, EventType.M_RS_OPEN_REGION);
+      final RegionServerServices rsServices, HRegionInfo regionInfo,
+      HTableDescriptor htd) {
+    this (server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_REGION);
   }
 
   protected OpenRegionHandler(final Server server,
       final RegionServerServices rsServices, final HRegionInfo regionInfo,
-      EventType eventType) {
+      final HTableDescriptor htd, EventType eventType) {
     super(server, eventType);
     this.rsServices = rsServices;
     this.regionInfo = regionInfo;
+    this.htd = htd;
   }
 
   public HRegionInfo getRegionInfo() {
@@ -184,7 +188,7 @@ public class OpenRegionHandler extends E
 
     // Was there an exception opening the region?  This should trigger on
     // InterruptedException too.  If so, we failed.
-    return !t.interrupted() && t.getException() == null;
+    return !Thread.interrupted() && t.getException() == null;
   }
 
   /**
@@ -269,8 +273,9 @@ public class OpenRegionHandler extends E
     try {
       // Instantiate the region.  This also periodically tickles our zk OPENING
       // state so master doesn't timeout this region in transition.
-      region = HRegion.openHRegion(tableDir, this.regionInfo, this.rsServices.getWAL(),
-        this.server.getConfiguration(), this.rsServices,
+      region = HRegion.openHRegion(tableDir, this.regionInfo, this.htd,
+          this.rsServices.getWAL(), this.server.getConfiguration(),
+          this.rsServices,
         new CancelableProgressable() {
           public boolean progress() {
             // We may lose the znode ownership during the open.  Currently its
@@ -296,8 +301,9 @@ public class OpenRegionHandler extends E
     try {
       // Instantiate the region.  This also periodically tickles our zk OPENING
       // state so master doesn't timeout this region in transition.
-      region = HRegion.openHRegion(this.regionInfo, this.rsServices.getWAL(),
-        this.server.getConfiguration(), this.rsServices,
+      region = HRegion.openHRegion(this.regionInfo, this.htd,
+          this.rsServices.getWAL(), this.server.getConfiguration(),
+          this.rsServices,
         new CancelableProgressable() {
           public boolean progress() {
             // We may lose the znode ownership during the open.  Currently its
@@ -375,4 +381,4 @@ public class OpenRegionHandler extends E
   private boolean isGoodVersion() {
     return this.version != -1;
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java Tue Jun 21 18:31:26 2011
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.regionserver.handler;
 
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
@@ -30,7 +31,8 @@ import org.apache.hadoop.hbase.regionser
  */
 public class OpenRootHandler extends OpenRegionHandler {
   public OpenRootHandler(final Server server,
-      final RegionServerServices rsServices, HRegionInfo regionInfo) {
-    super(server, rsServices, regionInfo, EventType.M_RS_OPEN_ROOT);
+      final RegionServerServices rsServices, HRegionInfo regionInfo,
+      final HTableDescriptor htd) {
+    super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT);
   }
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java Tue Jun 21 18:31:26 2011
@@ -173,7 +173,10 @@ public class SequenceFileLogWriter imple
 
   @Override
   public void close() throws IOException {
-    this.writer.close();
+    if (this.writer != null) {
+      this.writer.close();
+      this.writer = null;
+    }
   }
 
   @Override
@@ -205,4 +208,4 @@ public class SequenceFileLogWriter imple
   public FSDataOutputStream getWriterFSDataOutputStream() {
     return this.writer_out;
   }
-}
\ No newline at end of file
+}

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java?rev=1138120&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java Tue Jun 21 18:31:26 2011
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableExistsException;
+
+/**
+ * Implementation of {@link TableDescriptors} that reads descriptors from the
+ * passed filesystem.  It expects descriptors to be in a file under the
+ * table's directory in FS.  Can be read-only -- i.e. does not modify
+ * the filesystem or can be read and write.
+ */
+public class FSTableDescriptors implements TableDescriptors {
+  private final FileSystem fs;
+  private final Path rootdir;
+  private final boolean fsreadonly;
+  long cachehits = 0;
+  long invocations = 0;
+
+  // This cache does not age out the old stuff.  Thinking is that the amount
+  // of data we keep up in here is so small, no need to do occasional purge.
+  // TODO.
+  private final Map<String, TableDescriptorModtime> cache =
+    new ConcurrentHashMap<String, TableDescriptorModtime>();
+
+  /**
+   * Data structure to hold modification time and table descriptor.
+   */
+  static class TableDescriptorModtime {
+    private final HTableDescriptor descriptor;
+    private final long modtime;
+
+    TableDescriptorModtime(final long modtime, final HTableDescriptor htd) {
+      this.descriptor = htd;
+      this.modtime = modtime;
+    }
+
+    long getModtime() {
+      return this.modtime;
+    }
+
+    HTableDescriptor getTableDescriptor() {
+      return this.descriptor;
+    }
+  }
+
+  public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
+    this(fs, rootdir, false);
+  }
+
+  /**
+   * @param fs
+   * @param rootdir
+   * @param fsreadOnly True if we are read-only when it comes to filesystem
+   * operations; i.e. on remove, we do not do delete in fs.
+   */
+  public FSTableDescriptors(final FileSystem fs, final Path rootdir,
+      final boolean fsreadOnly) {
+    super();
+    this.fs = fs;
+    this.rootdir = rootdir;
+    this.fsreadonly = fsreadOnly;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String)
+   */
+  @Override
+  public HTableDescriptor get(final byte [] tablename)
+  throws TableExistsException, FileNotFoundException, IOException {
+    return get(Bytes.toString(tablename));
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[])
+   */
+  @Override
+  public HTableDescriptor get(final String tablename)
+  throws TableExistsException, FileNotFoundException, IOException {
+    invocations++;
+    if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
+      cachehits++;
+      return HTableDescriptor.ROOT_TABLEDESC;
+    }
+    if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
+      cachehits++;
+      return HTableDescriptor.META_TABLEDESC;
+    }
+
+    // Look in cache of descriptors.
+    TableDescriptorModtime tdm = this.cache.get(tablename);
+
+    // Check mod time has not changed (this is trip to NN).
+    long modtime =
+      FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename);
+    if (tdm != null) {
+      if (modtime <= tdm.getModtime()) {
+        cachehits++;
+        return tdm.getTableDescriptor();
+      }
+    }
+    HTableDescriptor htd =
+      FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename);
+    if (htd == null) {
+      // More likely is above will throw a FileNotFoundException
+      throw new TableExistsException("No descriptor for " + tablename);
+    }
+    this.cache.put(tablename, new TableDescriptorModtime(modtime, htd));
+    return htd;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+   */
+  @Override
+  public Map<String, HTableDescriptor> getAll()
+  throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
+    for (Path d: tableDirs) {
+      HTableDescriptor htd = get(d.getName());
+      if (htd == null) continue;
+      htds.put(d.getName(), htd);
+    }
+    return htds;
+  }
+
+  @Override
+  public void add(HTableDescriptor htd) throws IOException {
+    if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
+      throw new NotImplementedException();
+    }
+    if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
+      throw new NotImplementedException();
+    }
+    if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd);
+    long modtime =
+      FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
+    this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
+  }
+
+  @Override
+  public HTableDescriptor remove(final String tablename)
+  throws IOException {
+    if (!this.fsreadonly) {
+      Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
+      if (this.fs.exists(tabledir)) {
+        if (!this.fs.delete(tabledir, true)) {
+          throw new IOException("Failed delete of " + tabledir.toString());
+        }
+      }
+    }
+    TableDescriptorModtime tdm = this.cache.remove(tablename);
+    return tdm == null? null: tdm.getTableDescriptor();
+  }
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Tue Jun 21 18:31:26 2011
@@ -49,7 +49,9 @@ import java.io.InterruptedIOException;
 import java.lang.reflect.InvocationTargetException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -835,68 +837,68 @@ public class FSUtils {
     LOG.info("Finished lease recover attempt for " + p);
   }
 
-
-  public static Map<String, HTableDescriptor> getTableDescriptors(
-    final Configuration config)
-  throws IOException {
-    Path path = getRootDir(config);
-    // since HMaster.getFileSystem() is package private
-    FileSystem fs = path.getFileSystem(config);
-    return getTableDescriptors(fs, path);
-  }
-
-  public static Map<String, HTableDescriptor> getTableDescriptors(
-    final FileSystem fs, final Path hbaseRootDir)
+  /**
+   * @param fs
+   * @param rootdir
+   * @return All the table directories under <code>rootdir</code>
+   * @throws IOException
+   */
+  public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
   throws IOException {
-    Map<String, HTableDescriptor> desc =
-        new HashMap<String, HTableDescriptor>();
-    DirFilter df = new DirFilter(fs);
     // presumes any directory under hbase.rootdir is a table
-    FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
-    for (FileStatus tableDir : tableDirs) {
-      Path d = tableDir.getPath();
-      String tableName = d.getName();
-
-      if (tableName.equals(HConstants.HREGION_LOGDIR_NAME)
-          || tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))
-          || tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))
-          || tableName.equals(HConstants.HREGION_OLDLOGDIR_NAME)
-          ) {
+    FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
+    List<Path> tabledirs = new ArrayList<Path>(dirs.length);
+    for (FileStatus dir: dirs) {
+      Path p = dir.getPath();
+      String tableName = p.getName();
+      if (tableName.equals(HConstants.HREGION_LOGDIR_NAME) ||
+          tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME)) ||
+          tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME)) ||
+          tableName.equals(HConstants.HREGION_OLDLOGDIR_NAME) ) {
         continue;
       }
-      LOG.info("Adding tabledescriptor for table = " + tableName);
-      HTableDescriptor htd = readTableDescriptor(fs, hbaseRootDir,
-          tableName);
-      if (htd != null) {
-        if (!desc.containsKey(tableName)) {
-          desc.put(tableName, htd);
-        }
-      }
+      tabledirs.add(p);
     }
-    return desc;
-  }
-
-  private static Path getTableInfoPath(Path hbaseRootDir, String tableName) {
-    Path tablePath = new Path(hbaseRootDir, tableName);
-    return new Path(tablePath, HConstants.TABLEINFO_NAME);
+    return tabledirs;
   }
 
   /**
    * Get table info path for a table.
+   * @param rootdir
    * @param tableName
    * @return Table info path
    */
-  private static Path getTableInfoPath(byte[] tableName, Configuration conf) throws IOException {
-    Path tablePath = new Path(getRootDir(conf), Bytes.toString(tableName));
-    Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
-    return tableInfoPath;
+  private static Path getTableInfoPath(Path rootdir, String tablename) {
+    Path tablePath = getTablePath(rootdir, tablename);
+    return new Path(tablePath, HConstants.TABLEINFO_NAME);
+  }
+
+  /**
+   * @param fs
+   * @param rootdir
+   * @param tablename
+   * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file.
+   * @throws IOException
+   */
+  public static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
+      final String tablename)
+  throws IOException {
+    Path p = getTablePath(rootdir, tablename);
+    FileStatus [] status = fs.listStatus(p);
+    if (status.length < 1) throw new FileNotFoundException("No status for " + p.toString());
+    return status[0].getModificationTime();
+  }
+
+  public static Path getTablePath(Path rootdir, byte [] tableName) {
+    return getTablePath(rootdir, Bytes.toString(tableName));
   }
 
-  private static Path getTablePath(byte[] tableName, Configuration conf) throws IOException {
-    return new Path(getRootDir(conf), Bytes.toString(tableName));
+  public static Path getTablePath(Path rootdir, final String tableName) {
+    return new Path(rootdir, tableName);
   }
 
-  private static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
+  private static FileSystem getCurrentFileSystem(Configuration conf)
+  throws IOException {
     return getRootDir(conf).getFileSystem(conf);
   }
 
@@ -908,27 +910,11 @@ public class FSUtils {
    * @throws IOException
    */
   public static HTableDescriptor getHTableDescriptor(Configuration config,
-                                              String tableName)
-      throws IOException {
+      String tableName)
+  throws IOException {
     Path path = getRootDir(config);
     FileSystem fs = path.getFileSystem(config);
-    return readTableDescriptor(fs, path, tableName);
-  }
-
-  private static HTableDescriptor readTableDescriptor(FileSystem fs,
-                                              Path hbaseRootDir,
-                                              String tableName) {
-    try {
-      FSDataInputStream fsDataInputStream =
-          fs.open(getTableInfoPath(hbaseRootDir, tableName));
-      HTableDescriptor hTableDescriptor = new HTableDescriptor();
-      hTableDescriptor.readFields(fsDataInputStream);
-      fsDataInputStream.close();
-      return hTableDescriptor;
-    } catch (IOException ioe) {
-      LOG.info("Exception during readTableDecriptor. Current table name = " + tableName , ioe);
-    }
-    return null;
+    return getTableDescriptor(fs, path, tableName);
   }
 
   /**
@@ -936,92 +922,99 @@ public class FSUtils {
    * @param fs
    * @param hbaseRootDir
    * @param tableName
-   * @return
+   * @return Descriptor or null if none found.
    * @throws IOException
    */
   public static HTableDescriptor getTableDescriptor(FileSystem fs,
-                                                    Path hbaseRootDir,
-                                                    byte[] tableName)
-      throws IOException {
-     return readTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
+      Path hbaseRootDir, byte[] tableName)
+  throws IOException {
+     return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
   }
 
-
-  public static HTableDescriptor getTableDescriptor(Path tableDir, FileSystem fs) {
+  public static HTableDescriptor getTableDescriptor(FileSystem fs,
+      Path hbaseRootDir, String tableName) {
+    HTableDescriptor htd = null;
     try {
-      LOG.info("Reading table descriptor from .tableinfo. current path = "
-          + tableDir);
-      if (tableDir == null) {
-        LOG.info("Reading table descriptor from .tableinfo current tablename is NULL ");
-        return null;
-      }
+      htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName));
+    } catch (IOException ioe) {
+      LOG.debug("Exception during readTableDecriptor. Current table name = " +
+        tableName , ioe);
+    }
+    return htd;
+  }
 
-      FSDataInputStream fsDataInputStream =
-          fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME));
-      HTableDescriptor hTableDescriptor = new HTableDescriptor();
+  public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
+  throws IOException {
+    if (tableDir == null) throw new NullPointerException();
+    FSDataInputStream fsDataInputStream =
+      fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME));
+    HTableDescriptor hTableDescriptor = null;
+    try {
+      hTableDescriptor = new HTableDescriptor();
       hTableDescriptor.readFields(fsDataInputStream);
-      LOG.info("Current tabledescriptor from .tableinfo is " + hTableDescriptor.toString());
+    } finally {
       fsDataInputStream.close();
-      return hTableDescriptor;
-    } catch (IOException ioe) {
-      LOG.info("Exception during getTableDescriptor ", ioe);
     }
-    return null;
+    return hTableDescriptor;
   }
 
-    /**
-   * Create new HTableDescriptor in HDFS.
+  /**
+   * Create new HTableDescriptor in HDFS.  Happens when we are creating table.
+  /**
    * @param htableDescriptor
+   * @param conf
    */
   public static void createTableDescriptor(HTableDescriptor htableDescriptor,
-                                           Configuration conf) {
+      Configuration conf) {
     try {
-      Path tableDir = getTablePath(htableDescriptor.getName(), conf);
       FileSystem fs = getCurrentFileSystem(conf);
-      createTableDescriptor(fs, htableDescriptor, tableDir);
+      createTableDescriptor(fs, getRootDir(conf), htableDescriptor);
     } catch(IOException ioe) {
       LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
     }
   }
 
+  /**
+   * @param fs
+   * @param htableDescriptor
+   * @param rootdir
+   */
   public static void createTableDescriptor(FileSystem fs,
-                                           HTableDescriptor htableDescriptor,
-                                           Path tableDir) {
+      Path rootdir, HTableDescriptor htableDescriptor) {
     try {
-      Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
-      LOG.info("Current tableInfoPath = " + tableInfoPath
-          + " tableDir = " + tableDir) ;
+      Path tableInfoPath =
+        getTableInfoPath(rootdir, htableDescriptor.getNameAsString());
+      LOG.info("Current tableInfoPath = " + tableInfoPath) ;
       if (fs.exists(tableInfoPath) &&
           fs.getFileStatus(tableInfoPath).getLen() > 0) {
         LOG.info("TableInfo already exists.. Skipping creation");
         return;
       }
-      writeTableDescriptor(fs, htableDescriptor, tableDir);
+      writeTableDescriptor(fs, htableDescriptor,
+        getTablePath(rootdir, htableDescriptor.getNameAsString()));
     } catch(IOException ioe) {
       LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
     }
   }
 
+  /**
+   * Called when we are creating a table to write out the tables' descriptor.
+   * @param fs
+   * @param hTableDescriptor
+   * @param tableDir
+   * @throws IOException
+   */
   private static void writeTableDescriptor(FileSystem fs,
-                                           HTableDescriptor hTableDescriptor,
-                                           Path tableDir) throws IOException {
+      HTableDescriptor hTableDescriptor, Path tableDir)
+  throws IOException {
     // Create in tmpdir and then move into place in case we crash after
     // create but before close.  If we don't successfully close the file,
     // subsequent region reopens will fail the below because create is
     // registered in NN.
     Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
-    Path tmpPath = new Path(new Path(tableDir,".tmp"),
-        HConstants.TABLEINFO_NAME);
+    Path tmpPath = new Path(new Path(tableDir,".tmp"), HConstants.TABLEINFO_NAME);
     LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
-    FSDataOutputStream out = fs.create(tmpPath, true);
-    try {
-      hTableDescriptor.write(out);
-      out.write('\n');
-      out.write('\n');
-      out.write(Bytes.toBytes(hTableDescriptor.toString()));
-    } finally {
-      out.close();
-    }
+    writeHTD(fs, tmpPath, hTableDescriptor);
     if (!fs.rename(tmpPath, tableInfoPath)) {
       throw new IOException("Unable to rename " + tmpPath + " to " +
         tableInfoPath);
@@ -1030,29 +1023,34 @@ public class FSUtils {
     }
   }
 
+  /**
+   * Update table descriptor
+   * @param fs
+   * @param conf
+   * @param hTableDescriptor
+   * @throws IOException
+   */
+  public static void updateHTableDescriptor(FileSystem fs, Path rootdir,
+      HTableDescriptor hTableDescriptor)
+  throws IOException {
+    Path tableInfoPath =
+      getTableInfoPath(rootdir, hTableDescriptor.getNameAsString());
+    writeHTD(fs, tableInfoPath, hTableDescriptor);
+    LOG.info("updateHTableDescriptor. Updated tableinfo in HDFS under " +
+      tableInfoPath + " For HTD => " + hTableDescriptor.toString());
+  }
 
-  public static void updateHTableDescriptor(FileSystem fs,
-                                            Configuration conf,
-                                            HTableDescriptor hTableDescriptor) throws IOException
-  {
-    Path tableInfoPath = getTableInfoPath(hTableDescriptor.getName(), conf);
-    FSDataOutputStream out = fs.create(tableInfoPath, true);
+  private static void writeHTD(final FileSystem fs, final Path p,
+      final HTableDescriptor htd)
+  throws IOException {
+    FSDataOutputStream out = fs.create(p, true);
     try {
-      hTableDescriptor.write(out);
+      htd.write(out);
       out.write('\n');
       out.write('\n');
-      out.write(Bytes.toBytes(hTableDescriptor.toString()));
-      LOG.info("updateHTableDescriptor. Updated tableinfo in HDFS under "
-        + tableInfoPath + " For HTD => "
-        + hTableDescriptor.toString());
+      out.write(Bytes.toBytes(htd.toString()));
     } finally {
       out.close();
     }
   }
-
-  private static Path getTmpDir(HTableDescriptor htableDescriptor, Configuration configuration)
-      throws IOException {
-    return new Path(getTablePath(htableDescriptor.getName(), configuration), ".tmp");
-  }
-
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1138120&r1=1138119&r2=1138120&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Tue Jun 21 18:31:26 2011
@@ -55,6 +55,7 @@ import java.util.Random;
  * a table by merging adjacent regions.
  */
 class HMerge {
+  // TODO: Where is this class used?  How does it relate to Merge in same package?
   static final Log LOG = LogFactory.getLog(HMerge.class);
   static final Random rand = new Random();
 
@@ -135,12 +136,12 @@ class HMerge {
     protected final Configuration conf;
     protected final FileSystem fs;
     protected final Path tabledir;
+    protected final HTableDescriptor htd;
     protected final HLog hlog;
     private final long maxFilesize;
 
 
-    protected Merger(Configuration conf, FileSystem fs,
-      final byte [] tableName)
+    protected Merger(Configuration conf, FileSystem fs, final byte [] tableName)
     throws IOException {
       this.conf = conf;
       this.fs = fs;
@@ -151,6 +152,7 @@ class HMerge {
           fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
           Bytes.toString(tableName)
       );
+      this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir);
       Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
           HConstants.HREGION_LOGDIR_NAME);
       Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -188,13 +190,13 @@ class HMerge {
       long nextSize = 0;
       for (int i = 0; i < info.length - 1; i++) {
         if (currentRegion == null) {
-          currentRegion =
-            HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], null);
+          currentRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i],
+            this.htd, null);
           currentRegion.initialize();
           currentSize = currentRegion.getLargestHStoreSize();
         }
-        nextRegion =
-          HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], null);
+        nextRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1],
+          this.htd, null);
         nextRegion.initialize();
         nextSize = nextRegion.getLargestHStoreSize();
 
@@ -357,7 +359,7 @@ class HMerge {
       // Scan root region to find all the meta regions
 
       root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
-          HRegionInfo.ROOT_REGIONINFO, null);
+          HRegionInfo.ROOT_REGIONINFO, HTableDescriptor.ROOT_TABLEDESC, null);
       root.initialize();
 
       Scan scan = new Scan();
@@ -431,4 +433,4 @@ class HMerge {
       }
     }
   }
-}
+}
\ No newline at end of file



Mime
View raw message