hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject svn commit: r1455730 - in /hbase/branches/0.94/src: main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/master/handler/ test/java/org/apache/hadoop/hbase/master/handler/
Date Tue, 12 Mar 2013 22:01:28 GMT
Author: tedyu
Date: Tue Mar 12 22:01:28 2013
New Revision: 1455730

URL: http://svn.apache.org/r1455730
Log:
HBASE-7624 Backport HBASE-5359 and HBASE-7596 to 0.94 (Jefferey Zhong)


Added:
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1455730&r1=1455729&r2=1455730&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
(original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
Tue Mar 12 22:01:28 2013
@@ -341,7 +341,8 @@ public class AssignmentManager extends Z
       MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName);
     Integer pending = 0;
     for(HRegionInfo hri : hris) {
-      if(regionsToReopen.get(hri.getEncodedName()) != null) {
+      String name = hri.getEncodedName();
+      if (regionsToReopen.containsKey(name) || regionsInTransition.containsKey(name)) {
         pending++;
       }
     }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1455730&r1=1455729&r2=1455730&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Tue Mar
12 22:01:28 2013
@@ -94,6 +94,7 @@ import org.apache.hadoop.hbase.master.ha
 import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
 import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
+import org.apache.hadoop.hbase.master.handler.TableEventHandler;
 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
 import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@@ -1387,7 +1388,10 @@ Server {
     if (cpHost != null) {
       cpHost.preModifyTable(tableName, htd);
     }
-    this.executorService.submit(new ModifyTableHandler(tableName, htd, this, this));
+    TableEventHandler tblHandler = new ModifyTableHandler(tableName, htd, this, this);
+    this.executorService.submit(tblHandler);
+    // prevent client from querying status even before the event is being handled.
+    tblHandler.waitForEventBeingHandled();
     if (cpHost != null) {
       cpHost.postModifyTable(tableName, htd);
     }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java?rev=1455730&r1=1455729&r2=1455730&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
(original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
Tue Mar 12 22:01:28 2013
@@ -50,11 +50,8 @@ public class TableAddFamilyHandler exten
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris)
   throws IOException {
-    // Update table descriptor in HDFS
-    HTableDescriptor htd = this.masterServices.getMasterFileSystem()
-        .addColumn(tableName, familyDesc);
-    // Update in-memory descriptor cache
-    this.masterServices.getTableDescriptors().add(htd);
+    // Update table descriptor
+    this.masterServices.getMasterFileSystem().addColumn(tableName, familyDesc);
   }
   @Override
   public String toString() {

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java?rev=1455730&r1=1455729&r2=1455730&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
(original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
Tue Mar 12 22:01:28 2013
@@ -45,13 +45,10 @@ public class TableDeleteFamilyHandler ex
 
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
-    // Update table descriptor in HDFS
-    HTableDescriptor htd =
-      this.masterServices.getMasterFileSystem().deleteColumn(tableName, familyName);
-    // Update in-memory descriptor cache
-    this.masterServices.getTableDescriptors().add(htd);
-    // Remove the column family from the file system
     MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+    // Update table descriptor
+    mfs.deleteColumn(tableName, familyName);
+    // Remove the column family from the file system
     for (HRegionInfo hri : hris) {
       // Delete the family directory in FS for all the regions one by one
       mfs.deleteFamilyFromFS(hri, familyName);

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java?rev=1455730&r1=1455729&r2=1455730&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
(original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
Tue Mar 12 22:01:28 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master.h
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
@@ -59,6 +60,7 @@ public abstract class TableEventHandler 
   protected final MasterServices masterServices;
   protected final byte [] tableName;
   protected final String tableNameStr;
+  protected boolean isEventBeingHandled = false;
 
   public TableEventHandler(EventType eventType, byte [] tableName, Server server,
       MasterServices masterServices)
@@ -108,16 +110,27 @@ public abstract class TableEventHandler 
       LOG.error("Error manipulating table " + Bytes.toString(tableName), e);
     } catch (KeeperException e) {
       LOG.error("Error manipulating table " + Bytes.toString(tableName), e);
+    } finally {
+      notifyEventBeingHandled();
     }
   }
 
   public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException {
     boolean done = false;
+    HTable table = null;
+    TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
+    NavigableMap<HRegionInfo, ServerName> hriHserverMapping;
+
     LOG.info("Bucketing regions by region server...");
-    HTable table = new HTable(masterServices.getConfiguration(), tableName);
-    TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps
-        .newTreeMap();
-    NavigableMap<HRegionInfo, ServerName> hriHserverMapping = table.getRegionLocations();
+
+    try {
+      table = new HTable(masterServices.getConfiguration(), tableName);
+      hriHserverMapping = table.getRegionLocations();
+    } finally {
+      if (table != null) {
+        table.close();
+      }
+    }
     List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
     for (HRegionInfo hri : regions) {
       ServerName rsLocation = hriHserverMapping.get(hri);
@@ -139,6 +152,7 @@ public abstract class TableEventHandler 
     LOG.info("Reopening " + reRegions.size() + " regions on "
         + serverToRegions.size() + " region servers.");
     this.masterServices.getAssignmentManager().setRegionsToReopen(reRegions);
+    notifyEventBeingHandled();
     BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToRegions,
         this.masterServices.getAssignmentManager());
     while (true) {
@@ -189,4 +203,26 @@ public abstract class TableEventHandler 
 
   protected abstract void handleTableOperation(List<HRegionInfo> regions)
   throws IOException, KeeperException;
+
+  /**
+   * Table modifications are processed asynchronously, but provide an API for you to query
their
+   * status.
+   * @throws IOException
+   */
+  public synchronized void waitForEventBeingHandled() throws IOException {
+    if (!this.isEventBeingHandled) {
+      try {
+        wait();
+      } catch (InterruptedException ie) {
+        throw (IOException) new InterruptedIOException().initCause(ie);
+      }
+    }
+  }
+
+  private synchronized void notifyEventBeingHandled() {
+    if (!this.isEventBeingHandled) {
+      isEventBeingHandled = true;
+      notify();
+    }
+  }
 }

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java?rev=1455730&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
(added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
Tue Mar 12 22:01:28 2013
@@ -0,0 +1,160 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Verify that the HTableDescriptor is updated after
+ * addColumn(), deleteColumn() and modifyTable() operations.
+ */
+@Category(LargeTests.class)
+public class TestTableDescriptorModification {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final byte[] TABLE_NAME = Bytes.toBytes("table");
+  private static final byte[] FAMILY_0 = Bytes.toBytes("cf0");
+  private static final byte[] FAMILY_1 = Bytes.toBytes("cf1");
+
+  /**
+   * Start up a mini cluster and put a small table of empty regions into it.
+   *
+   * @throws Exception
+   */
+  @BeforeClass
+  public static void beforeAllTests() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void afterAllTests() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testModifyTable() throws IOException {
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    // Create a table with one family
+    HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+    baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+    admin.createTable(baseHtd);
+    admin.disableTable(TABLE_NAME);
+    try {
+      // Verify the table descriptor
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+      // Modify the table adding another family and verify the descriptor
+      HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME);
+      modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+      modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1));
+      admin.modifyTable(TABLE_NAME, modifiedHtd);
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
+    } finally {
+      admin.deleteTable(TABLE_NAME);
+    }
+  }
+
+  @Test
+  public void testAddColumn() throws IOException {
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    // Create a table with two families
+    HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+    baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+    admin.createTable(baseHtd);
+    admin.disableTable(TABLE_NAME);
+    try {
+      // Verify the table descriptor
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+      // Modify the table removing one family and verify the descriptor
+      admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1));
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
+    } finally {
+      admin.deleteTable(TABLE_NAME);
+    }
+  }
+
+  @Test
+  public void testDeleteColumn() throws IOException {
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    // Create a table with two families
+    HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+    baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+    baseHtd.addFamily(new HColumnDescriptor(FAMILY_1));
+    admin.createTable(baseHtd);
+    admin.disableTable(TABLE_NAME);
+    try {
+      // Verify the table descriptor
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
+
+      // Modify the table removing one family and verify the descriptor
+      admin.deleteColumn(TABLE_NAME, FAMILY_1);
+      verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+    } finally {
+      admin.deleteTable(TABLE_NAME);
+    }
+  }
+
+  private void verifyTableDescriptor(final byte[] tableName, final byte[]... families)
+      throws IOException {
+    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+
+    // Verify descriptor from master
+    HTableDescriptor htd = admin.getTableDescriptor(tableName);
+    verifyTableDescriptor(htd, tableName, families);
+
+    // Verify descriptor from HDFS
+    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName);
+    htd = FSTableDescriptors.getTableDescriptor(mfs.getFileSystem(), tableDir);
+    verifyTableDescriptor(htd, tableName, families);
+  }
+
+  private void verifyTableDescriptor(final HTableDescriptor htd,
+      final byte[] tableName, final byte[]... families) {
+    Set<byte[]> htdFamilies = htd.getFamiliesKeys();
+    assertTrue(Bytes.equals(tableName, htd.getName()));
+    assertEquals(families.length, htdFamilies.size());
+    for (byte[] familyName: families) {
+      assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName));
+    }
+  }
+}
\ No newline at end of file



Mime
View raw message