hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r991397 [7/15] - in /hbase/trunk: ./ bin/ conf/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/catalog/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/or...
Date Tue, 31 Aug 2010 23:51:50 GMT
Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.master.DeadServer;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.zookeeper.KeeperException;
+
+
+public class ServerShutdownHandler extends EventHandler {
+  private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
+  private final HServerInfo hsi;
+  private final Server server;
+  private final MasterServices services;
+  private final DeadServer deadServers;
+
+  public ServerShutdownHandler(final Server server, final MasterServices services,
+      final DeadServer deadServers, final HServerInfo hsi) {
+    super(server, EventType.M_SERVER_SHUTDOWN);
+    this.hsi = hsi;
+    this.server = server;
+    this.services = services;
+    this.deadServers = deadServers;
+    // Add to dead servers.
+    this.deadServers.add(hsi.getServerName());
+  }
+
+  @Override
+  public void process() throws IOException {
+    Pair<Boolean, Boolean> carryingCatalog = null;
+    try {
+      carryingCatalog =
+        this.server.getCatalogTracker().processServerShutdown(this.hsi);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted", e);
+    } catch (KeeperException e) {
+      this.server.abort("In server shutdown processing", e);
+      throw new IOException("Aborting", e);
+    }
+    final String serverName = this.hsi.getServerName();
+
+    LOG.info("Splitting logs for " + serverName);
+    this.services.getMasterFileSystem().splitLog(serverName);
+
+    // Clean out anything in regions in transition.  Being conservative and
+    // doing after log splitting.  Could do some states before -- OPENING?
+    // OFFLINE? -- and then others after like CLOSING that depend on log
+    // splitting.
+    this.services.getAssignmentManager().processServerShutdown(this.hsi);
+
+    // Assign root and meta if we were carrying them.
+    if (carryingCatalog.getFirst()) { // -ROOT-
+      try {
+        this.services.getAssignmentManager().assignRoot();
+      } catch (KeeperException e) {
+        this.server.abort("In server shutdown processing, assigning root", e);
+        throw new IOException("Aborting", e);
+      }
+    }
+    if (carryingCatalog.getSecond()) { // .META.
+      this.services.getAssignmentManager().assignMeta();
+    }
+
+    // Wait on meta to come online; we need it to progress.
+    try {
+      this.server.getCatalogTracker().waitForMeta();
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted", e);
+    }
+
+    NavigableMap<HRegionInfo, Result> hris =
+      MetaReader.getServerRegions(this.server.getCatalogTracker(), this.hsi);
+    LOG.info("Reassigning the " + hris.size() + " region(s) that " + serverName +
+      " was carrying.");
+
+    // We should encounter -ROOT- and .META. first in the Set given how its
+    // a sorted set.
+    for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
+      // If table is not disabled but the region is offlined,
+      HRegionInfo hri = e.getKey();
+      boolean disabled = this.services.getAssignmentManager().
+        isTableDisabled(hri.getTableDesc().getNameAsString());
+      if (disabled) continue;
+      if (hri.isOffline() && hri.isSplit()) {
+        fixupDaughters(hris, e.getValue());
+        continue;
+      }
+      this.services.getAssignmentManager().assign(hri);
+    }
+    this.deadServers.remove(serverName);
+    LOG.info("Finished processing of shutdown of " + serverName);
+  }
+
+  /**
+   * Check that daughter regions are up in .META. and if not, add them.
+   * @param hris All regions for this server in meta.
+   * @param result The contents of the parent row in .META.
+   * @throws IOException
+   */
+  void fixupDaughters(final NavigableMap<HRegionInfo, Result> hris,
+      final Result result) throws IOException {
+    fixupDaughter(hris, result, HConstants.SPLITA_QUALIFIER);
+    fixupDaughter(hris, result, HConstants.SPLITB_QUALIFIER);
+  }
+
+  /**
+   * Check individual daughter is up in .META.; fixup if its not.
+   * @param hris All regions for this server in meta.
+   * @param result The contents of the parent row in .META.
+   * @param qualifier Which daughter to check for.
+   * @throws IOException
+   */
+  void fixupDaughter(final NavigableMap<HRegionInfo, Result> hris,
+      final Result result, final byte [] qualifier)
+  throws IOException {
+    byte [] bytes = result.getValue(HConstants.CATALOG_FAMILY, qualifier);
+    if (bytes == null || bytes.length <= 0) return;
+    HRegionInfo hri = Writables.getHRegionInfo(bytes);
+    if (!hris.containsKey(hri)) {
+      LOG.info("Fixup; missing daughter " + hri.getEncodedNameAsBytes());
+      MetaEditor.addDaughter(this.server.getCatalogTracker(), hri, null);
+      this.services.getAssignmentManager().assign(hri);
+    }
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,66 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Handles adding a new family to an existing table.
+ */
+public class TableAddFamilyHandler extends TableEventHandler {
+
+  private final HColumnDescriptor familyDesc;
+
+  public TableAddFamilyHandler(byte[] tableName, HColumnDescriptor familyDesc,
+      Server server, final MasterServices masterServices) throws IOException {
+    super(EventType.C2M_ADD_FAMILY, tableName, server, masterServices);
+    this.familyDesc = familyDesc;
+  }
+
+  @Override
+  protected void handleTableOperation(List<HRegionInfo> hris)
+  throws IOException {
+    HTableDescriptor htd = hris.get(0).getTableDesc();
+    byte [] familyName = familyDesc.getName();
+    if(htd.hasFamily(familyName)) {
+      throw new InvalidFamilyOperationException(
+          "Family '" + Bytes.toString(familyName) + "' already exists so " +
+          "cannot be added");
+    }
+    for(HRegionInfo hri : hris) {
+      // Update the HTD
+      hri.getTableDesc().addFamily(familyDesc);
+      // Update region in META
+      MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri);
+      // Update region info in FS
+      this.masterServices.getMasterFileSystem().updateRegionInfo(hri);
+    }
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,69 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Handles adding a new family to an existing table.
+ */
+public class TableDeleteFamilyHandler extends TableEventHandler {
+
+  private final byte [] familyName;
+
+  public TableDeleteFamilyHandler(byte[] tableName, byte [] familyName,
+      Server server, final MasterServices masterServices) throws IOException {
+    super(EventType.C2M_ADD_FAMILY, tableName, server, masterServices);
+    this.familyName = familyName;
+  }
+
+  @Override
+  protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
+    HTableDescriptor htd = hris.get(0).getTableDesc();
+    if(!htd.hasFamily(familyName)) {
+      throw new InvalidFamilyOperationException(
+          "Family '" + Bytes.toString(familyName) + "' does not exist so " +
+          "cannot be deleted");
+    }
+    for (HRegionInfo hri : hris) {
+      // Update the HTD
+      hri.getTableDesc().removeFamily(familyName);
+      // Update region in META
+      MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri);
+      MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+      // Update region info in FS
+      mfs.updateRegionInfo(hri);
+      // Delete directory in FS
+      mfs.deleteFamily(hri, familyName);
+      // Update region info in FS
+      this.masterServices.getMasterFileSystem().updateRegionInfo(hri);
+    }
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,72 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Base class for performing operations against tables.
+ * Checks on whether the process can go forward are done in constructor rather
+ * than later on in {@link #process()}.  The idea is to fail fast rather than
+ * later down in an async invocation of {@link #process()} (which currently has
+ * no means of reporting back issues once started).
+ */
+public abstract class TableEventHandler extends EventHandler {
+  private static final Log LOG = LogFactory.getLog(TableEventHandler.class);
+  protected final MasterServices masterServices;
+  protected final byte [] tableName;
+
+  public TableEventHandler(EventType eventType, byte [] tableName, Server server,
+      MasterServices masterServices)
+  throws IOException {
+    super(server, eventType);
+    this.masterServices = masterServices;
+    this.tableName = tableName;
+    this.masterServices.checkTableModifiable(tableName);
+  }
+
+  @Override
+  public void process() {
+    try {
+      LOG.info("Handling table operation " + eventType + " on table " +
+          Bytes.toString(tableName));
+      List<HRegionInfo> hris =
+        MetaReader.getTableRegions(this.server.getCatalogTracker(),
+          tableName);
+      handleTableOperation(hris);
+    } catch (IOException e) {
+      LOG.error("Error trying to delete the table " + Bytes.toString(tableName),
+          e);
+    }
+  }
+
+  protected abstract void handleTableOperation(List<HRegionInfo> regions)
+  throws IOException;
+}
\ No newline at end of file

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,65 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Handles adding a new family to an existing table.
+ */
+public class TableModifyFamilyHandler extends TableEventHandler {
+
+  private final HColumnDescriptor familyDesc;
+
+  public TableModifyFamilyHandler(byte[] tableName,
+      HColumnDescriptor familyDesc, Server server,
+      final MasterServices masterServices) throws IOException {
+    super(EventType.C2M_MODIFY_FAMILY, tableName, server, masterServices);
+    this.familyDesc = familyDesc;
+  }
+
+  @Override
+  protected void handleTableOperation(List<HRegionInfo> regions) throws IOException
{
+    HTableDescriptor htd = regions.get(0).getTableDesc();
+    byte [] familyName = familyDesc.getName();
+    if(!htd.hasFamily(familyName)) {
+      throw new InvalidFamilyOperationException("Family '" +
+        Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
+    }
+    for(HRegionInfo hri : regions) {
+      // Update the HTD
+      hri.getTableDesc().addFamily(familyDesc);
+      // Update region in META
+      MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri);
+      // Update region info in FS
+      this.masterServices.getMasterFileSystem().updateRegionInfo(hri);
+    }
+  }
+}
\ No newline at end of file

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.beans.EventHandler;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+
+/**
+ * Implementors tote an HRegionInfo instance.
+ * This is a marker interface that can be put on {@link EventHandler}s that
+ * have an {@link HRegionInfo}.
+ */
+public interface TotesHRegionInfo {
+  /**
+   * @return HRegionInfo instance.
+   */
+  public HRegionInfo getHRegionInfo();
+}

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=991397&r1=991396&r2=991397&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
(original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
Tue Aug 31 23:51:44 2010
@@ -35,9 +35,8 @@ import org.apache.hadoop.util.StringUtil
 /**
  * Compact region on request and then run split if appropriate
  */
-class CompactSplitThread extends Thread {
+public class CompactSplitThread extends Thread implements CompactionRequestor {
   static final Log LOG = LogFactory.getLog(CompactSplitThread.class);
-
   private final long frequency;
   private final ReentrantLock lock = new ReentrantLock();
 
@@ -60,7 +59,7 @@ class CompactSplitThread extends Thread 
   public CompactSplitThread(HRegionServer server) {
     super();
     this.server = server;
-    this.conf = server.conf;
+    this.conf = server.getConfiguration();
     this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit",
         Integer.MAX_VALUE);
     this.frequency =
@@ -70,11 +69,11 @@ class CompactSplitThread extends Thread 
 
   @Override
   public void run() {
-    while (!this.server.isStopRequested()) {
+    while (!this.server.isStopped()) {
       HRegion r = null;
       try {
         r = compactionQueue.poll(this.frequency, TimeUnit.MILLISECONDS);
-        if (r != null && !this.server.isStopRequested()) {
+        if (r != null && !this.server.isStopped()) {
           synchronized (regionsInQueue) {
             regionsInQueue.remove(r);
           }
@@ -83,7 +82,7 @@ class CompactSplitThread extends Thread 
             // Don't interrupt us while we are working
             byte [] midKey = r.compactStores();
             if (shouldSplitRegion() && midKey != null &&
-                !this.server.isStopRequested()) {
+                !this.server.isStopped()) {
               split(r, midKey);
             }
           } finally {
@@ -113,13 +112,9 @@ class CompactSplitThread extends Thread 
     LOG.info(getName() + " exiting");
   }
 
-  /**
-   * @param r HRegion store belongs to
-   * @param why Why compaction requested -- used in debug messages
-   */
-  public synchronized void compactionRequested(final HRegion r,
+  public synchronized void requestCompaction(final HRegion r,
       final String why) {
-    compactionRequested(r, false, why);
+    requestCompaction(r, false, why);
   }
 
   /**
@@ -127,9 +122,9 @@ class CompactSplitThread extends Thread 
    * @param force Whether next compaction should be major
    * @param why Why compaction requested -- used in debug messages
    */
-  public synchronized void compactionRequested(final HRegion r,
+  public synchronized void requestCompaction(final HRegion r,
       final boolean force, final String why) {
-    if (this.server.stopRequested.get()) {
+    if (this.server.isStopped()) {
       return;
     }
     r.setForceMajorCompaction(force);
@@ -154,7 +149,7 @@ class CompactSplitThread extends Thread 
     // the prepare call -- we are not ready to split just now.  Just return.
     if (!st.prepare()) return;
     try {
-      st.execute(this.server);
+      st.execute(this.server, this.server);
     } catch (IOException ioe) {
       try {
         LOG.info("Running rollback of failed split of " +
@@ -177,8 +172,9 @@ class CompactSplitThread extends Thread 
     this.server.reportSplit(parent.getRegionInfo(), st.getFirstDaughter(),
       st.getSecondDaughter());
     LOG.info("Region split, META updated, and report to master. Parent=" +
-      parent.getRegionInfo() + ", new regions: " +
-      st.getFirstDaughter() + ", " + st.getSecondDaughter() + ". Split took " +
+      parent.getRegionInfo().getRegionNameAsString() + ", new regions: " +
+      st.getFirstDaughter().getRegionNameAsString() + ", " +
+      st.getSecondDaughter().getRegionNameAsString() + ". Split took " +
       StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
   }
 

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java?rev=991397&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
(added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
Tue Aug 31 23:51:44 2010
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+public interface CompactionRequestor {
+  /**
+   * @param r Region to compact
+   * @param why Why compaction was requested -- used in debug messages
+   */
+  public void requestCompaction(final HRegion r, final String why);
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java?rev=991397&r1=991396&r2=991397&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java Tue
Aug 31 23:51:44 2010
@@ -21,10 +21,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 /**
- * Implementors of this interface want to be notified when an HRegion
- * determines that a cache flush is needed. A FlushRequester (or null)
- * must be passed to the HRegion constructor so it knows who to call when it
- * has a filled memstore.
+ * Request a flush.
  */
 public interface FlushRequester {
   /**
@@ -32,5 +29,5 @@ public interface FlushRequester {
    *
    * @param region the HRegion requesting the cache flush
    */
-  void request(HRegion region);
+  void requestFlush(HRegion region);
 }
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=991397&r1=991396&r2=991397&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Aug 31
23:51:44 2010
@@ -211,7 +211,7 @@ public class HRegion implements HeapSize
 
   final long memstoreFlushSize;
   private volatile long lastFlushTime;
-  final FlushRequester flushListener;
+  final FlushRequester flushRequester;
   private final long blockingMemStoreSize;
   final long threadWakeFrequency;
   // Used to guard splits and closes
@@ -238,7 +238,7 @@ public class HRegion implements HeapSize
     this.tableDir = null;
     this.blockingMemStoreSize = 0L;
     this.conf = null;
-    this.flushListener = null;
+    this.flushRequester = null;
     this.fs = null;
     this.memstoreFlushSize = 0L;
     this.log = null;
@@ -266,22 +266,19 @@ public class HRegion implements HeapSize
    * @param conf is global configuration settings.
    * @param regionInfo - HRegionInfo that describes the region
    * is new), then read them from the supplied path.
-   * @param flushListener an object that implements CacheFlushListener or null
-   * making progress to master -- otherwise master might think region deploy
-   * failed.  Can be null.
+   * @param flushRequester an object that implements {@link FlushRequester} or null
    *
    * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo,
FlushRequester)
-
    */
   public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
-      HRegionInfo regionInfo, FlushRequester flushListener) {
+      HRegionInfo regionInfo, FlushRequester flushRequester) {
     this.tableDir = tableDir;
     this.comparator = regionInfo.getComparator();
     this.log = log;
     this.fs = fs;
     this.conf = conf;
     this.regionInfo = regionInfo;
-    this.flushListener = flushListener;
+    this.flushRequester = flushRequester;
     this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY,
         10 * 1000);
     String encodedNameStr = this.regionInfo.getEncodedName();
@@ -378,7 +375,7 @@ public class HRegion implements HeapSize
   /**
    * @return True if this region has references.
    */
-  boolean hasReferences() {
+  public boolean hasReferences() {
     for (Store store : this.stores.values()) {
       for (StoreFile sf : store.getStorefiles()) {
         // Found a reference, return.
@@ -943,7 +940,7 @@ public class HRegion implements HeapSize
     //     and that all updates to the log for this regionName that have lower
     //     log-sequence-ids can be safely ignored.
     if (wal != null) {
-      wal.completeCacheFlush(getRegionName(),
+      wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
         regionInfo.getTableDesc().getName(), completeSequenceId,
         this.getRegionInfo().isMetaRegion());
     }
@@ -1517,28 +1514,6 @@ public class HRegion implements HeapSize
     }
   }
 
-//  /*
-//   * Utility method to verify values length.
-//   * @param batchUpdate The update to verify
-//   * @throws IOException Thrown if a value is too long
-//   */
-//  private void validateValuesLength(Put put)
-//  throws IOException {
-//    Map<byte[], List<KeyValue>> families = put.getFamilyMap();
-//    for(Map.Entry<byte[], List<KeyValue>> entry : families.entrySet()) {
-//      HColumnDescriptor hcd =
-//        this.regionInfo.getTableDesc().getFamily(entry.getKey());
-//      int maxLen = hcd.getMaxValueLength();
-//      for(KeyValue kv : entry.getValue()) {
-//        if(kv.getValueLength() > maxLen) {
-//          throw new ValueOverMaxLengthException("Value in column "
-//            + Bytes.toString(kv.getColumn()) + " is too long. "
-//            + kv.getValueLength() + " > " + maxLen);
-//        }
-//      }
-//    }
-//  }
-
   /*
    * Check if resources to support an update.
    *
@@ -1700,7 +1675,7 @@ public class HRegion implements HeapSize
   }
 
   private void requestFlush() {
-    if (this.flushListener == null) {
+    if (this.flushRequester == null) {
       return;
     }
     synchronized (writestate) {
@@ -1710,7 +1685,7 @@ public class HRegion implements HeapSize
       writestate.flushRequested = true;
     }
     // Make request outside of synchronize block; HBASE-818.
-    this.flushListener.request(this);
+    this.flushRequester.requestFlush(this);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Flush requested on " + this);
     }
@@ -1856,7 +1831,7 @@ public class HRegion implements HeapSize
         // Check this edit is for me. Also, guard against writing the special
         // METACOLUMN info such as HBASE::CACHEFLUSH entries
         if (kv.matchingFamily(HLog.METAFAMILY) ||
-            !Bytes.equals(key.getRegionName(), this.regionInfo.getRegionName())) {
+            !Bytes.equals(key.getEncodedRegionName(), this.regionInfo.getEncodedNameAsBytes()))
{
           skippedEdits++;
           continue;
         }
@@ -2394,27 +2369,48 @@ public class HRegion implements HeapSize
     fs.mkdirs(regionDir);
     HRegion region = HRegion.newHRegion(tableDir,
       new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
-          new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf, null),
+          new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
       fs, conf, info, null);
     region.initialize();
     return region;
   }
 
   /**
-   * Convenience method to open a HRegion outside of an HRegionServer context.
+   * Open a Region.
    * @param info Info for region to be opened.
    * @param rootDir Root directory for HBase instance
-   * @param log HLog for region to use. This method will call
+   * @param wal HLog for region to use. This method will call
+   * HLog#setSequenceNumber(long) passing the result of the call to
+   * HRegion#getMinSequenceId() to ensure the log id is properly kept
+   * up.  HRegionStore does this every time it opens a new region.
+   * @param conf
+   * @return new HRegion
+   *
+   * @throws IOException
+   */
+  public static HRegion openHRegion(final HRegionInfo info, final HLog wal,
+      final Configuration conf)
+  throws IOException {
+    return openHRegion(info, wal, conf, null, null);
+  }
+
+  /**
+   * Open a Region.
+   * @param info Info for region to be opened.
+   * @param wal HLog for region to use. This method will call
    * HLog#setSequenceNumber(long) passing the result of the call to
    * HRegion#getMinSequenceId() to ensure the log id is properly kept
    * up.  HRegionStore does this every time it opens a new region.
    * @param conf
+   * @param flusher An interface we can request flushes against.
+   * @param reporter An interface we can report progress against.
    * @return new HRegion
    *
    * @throws IOException
    */
-  public static HRegion openHRegion(final HRegionInfo info, final Path rootDir,
-    final HLog log, final Configuration conf)
+  public static HRegion openHRegion(final HRegionInfo info, final HLog wal,
+    final Configuration conf, final FlushRequester flusher,
+    final Progressable reporter)
   throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Opening region: " + info);
@@ -2422,13 +2418,27 @@ public class HRegion implements HeapSize
     if (info == null) {
       throw new NullPointerException("Passed region info is null");
     }
-    HRegion r = HRegion.newHRegion(
-        HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()),
-        log, FileSystem.get(conf), conf, info, null);
-    long seqid = r.initialize();
-    // If seqid  > current wal seqid, the wal seqid is updated.
-    if (log != null) log.setSequenceNumber(seqid);
-    return r;
+    Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
+      info.getTableDesc().getName());
+    HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
+      flusher);
+    return r.openHRegion(reporter);
+  }
+
+  /**
+   * Open HRegion.
+   * Calls initialize and sets sequenceid.
+   * @param reporter
+   * @return Returns <code>this</code>
+   * @throws IOException
+   */
+  HRegion openHRegion(final Progressable reporter)
+  throws IOException {
+    long seqid = initialize(reporter);
+    if (this.log != null) {
+      this.log.setSequenceNumber(seqid);
+    }
+    return this;
   }
 
   /**
@@ -3134,7 +3144,7 @@ public class HRegion implements HeapSize
         + EnvironmentEdgeManager.currentTimeMillis());
     final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
         HConstants.HREGION_OLDLOGDIR_NAME);
-    final HLog log = new HLog(fs, logdir, oldLogDir, c, null);
+    final HLog log = new HLog(fs, logdir, oldLogDir, c);
     try {
       processTable(fs, tableDir, log, c, majorCompact);
      } finally {



Mime
View raw message