hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1166524 - in /hbase/trunk: ./ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/ipc/ src/main/java/org/apache/hadoop/hbase/regionserver/ src/main/ruby/ src/main/ruby/hbase/ src/main/ruby/shell/commands/ sr...
Date Thu, 08 Sep 2011 04:22:42 GMT
Author: stack
Date: Thu Sep  8 04:22:42 2011
New Revision: 1166524

URL: http://svn.apache.org/viewvc?rev=1166524&view=rev
Log:
HBASE-4260 Expose a command to manually trigger an HLog roll

Added:
    hbase/trunk/src/main/ruby/shell/commands/hlog_roll.rb
Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/trunk/src/main/ruby/hbase/admin.rb
    hbase/trunk/src/main/ruby/shell.rb
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Thu Sep  8 04:22:42 2011
@@ -453,6 +453,8 @@ Release 0.91.0 - Unreleased
    HBASE-4339  Improve eclipse documentation and project file generation
                (Eric Charles)
    HBASE-4342  Update Thrift to 0.7.0 (Moaz Reyad)
+   HBASE-4260  Expose a command to manually trigger an HLog roll
+               (ramkrishna.s.vasudevan)
 
   TASKS
    HBASE-3559  Move report of split to master OFF the heartbeat channel

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Thu Sep  8 04:22:42
2011
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.catalog.M
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.ipc.HMasterInterface;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -1581,4 +1582,24 @@ public class HBaseAdmin implements Abort
     return this.connection.getHTableDescriptors(tableNames);
   }
 
+  /**
+   * Roll the log writer. That is, start writing log messages to a new file.
+   * 
+   * @param serverName
+   *          The servername of the regionserver. A server name is made of host,
+   *          port and startcode. This is mandatory. Here is an example:
+   *          <code> host187.example.com,60020,1289493121758</code>
+   * @return If lots of logs, flush the returned regions so next time through
+   * we can clean logs. Returns null if nothing to flush.  Names are actual
+   * region names as returned by {@link HRegionInfo#getEncodedName()}  
+   * @throws IOException if a remote or network exception occurs
+   * @throws FailedLogCloseException
+   */
+ public synchronized  byte[][] rollHLogWriter(String serverName)
+      throws IOException, FailedLogCloseException {
+    ServerName sn = new ServerName(serverName);
+    HRegionInterface rs = this.connection.getHRegionConnection(
+        sn.getHostname(), sn.getPort());
+    return rs.rollHLogWriter();
+  }
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java Thu Sep  8
04:22:42 2011
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.filter.Co
 import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
+import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hbase.ipc.VersionedProtocol;
@@ -513,4 +514,14 @@ public interface HRegionInterface extend
    * @throws IOException exception
    */
   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws
IOException;
+  /**
+   * Roll the log writer. That is, start writing log messages to a new file.
+   * 
+   * @throws IOException
+   * @throws FailedLogCloseException
+   * @return If lots of logs, flush the returned regions so next time through
+   * we can clean logs. Returns null if nothing to flush.  Names are actual
+   * region names as returned by {@link HRegionInfo#getEncodedName()} 
+   */
+  public byte[][] rollHLogWriter() throws IOException, FailedLogCloseException;
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Thu
Sep  8 04:22:42 2011
@@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
 import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
+import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -3088,6 +3089,12 @@ public class HRegionServer implements HR
     return c.getBlockCacheColumnFamilySummaries(this.conf);
   }
 
+  @Override
+  public byte[][] rollHLogWriter() throws IOException, FailedLogCloseException {
+    HLog wal = this.getWAL();
+    return wal.rollWriter(true);
+  }
+
 
 
 }

Modified: hbase/trunk/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/ruby/hbase/admin.rb?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/src/main/ruby/hbase/admin.rb Thu Sep  8 04:22:42 2011
@@ -61,6 +61,12 @@ module Hbase
     end
 
     #----------------------------------------------------------------------------------------------
+    # Requests a regionserver's HLog roll
+    def hlog_roll(server_name)
+      @admin.rollHLogWriter(server_name)
+    end
+
+    #----------------------------------------------------------------------------------------------
     # Requests a table or region split
     def split(table_or_region_name, split_point)
       if split_point == nil

Modified: hbase/trunk/src/main/ruby/shell.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/ruby/shell.rb?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/main/ruby/shell.rb (original)
+++ hbase/trunk/src/main/ruby/shell.rb Thu Sep  8 04:22:42 2011
@@ -265,6 +265,7 @@ Shell.load_command_group(
     split
     unassign
     zk_dump
+    hlog_roll
   ]
 )
 

Added: hbase/trunk/src/main/ruby/shell/commands/hlog_roll.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/ruby/shell/commands/hlog_roll.rb?rev=1166524&view=auto
==============================================================================
--- hbase/trunk/src/main/ruby/shell/commands/hlog_roll.rb (added)
+++ hbase/trunk/src/main/ruby/shell/commands/hlog_roll.rb Thu Sep  8 04:22:42 2011
@@ -0,0 +1,40 @@
+#
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+module Shell
+  module Commands
+    class HlogRoll < Command
+      def help
+        return <<-EOF
+Roll the log writer. That is, start writing log messages to a new file.
+The name of the regionserver should be given as the parameter.  A
+'server_name' is the host, port plus startcode of a regionserver. For
+example: host187.example.com,60020,1289493121758 (find servername in
+master ui or when you do detailed status in shell)
+EOF
+      end
+
+      def command(server_name)
+        format_simple_command do
+          admin.hlog_roll(server_name)
+        end
+      end
+    end
+  end
+end

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java?rev=1166524&r1=1166523&r2=1166524&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java Thu Sep  8 04:22:42
2011
@@ -55,7 +55,10 @@ import org.apache.hadoop.hbase.executor.
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.TestHLogUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -1160,5 +1163,109 @@ public class TestAdmin {
         expectedRegions, RegionInfos.size());
     
  }
+
+  @Test
+  public void testHLogRollWriting() throws Exception {
+    setUpforLogRolling();
+    String className = this.getClass().getName();
+    StringBuilder v = new StringBuilder(className);
+    while (v.length() < 1000) {
+      v.append(className);
+    }
+    byte[] value = Bytes.toBytes(v.toString());
+    HRegionServer regionServer = startAndWriteData("TestLogRolling", value);
+    LOG.info("after writing there are "
+        + TestHLogUtils.getNumLogFiles(regionServer.getWAL()) + " log files");
+
+    // flush all regions
+
+    List<HRegion> regions = new ArrayList<HRegion>(regionServer
+        .getOnlineRegionsLocalContext());
+    for (HRegion r : regions) {
+      r.flushcache();
+    }
+    admin.rollHLogWriter(regionServer.getServerName().getServerName());
+    int count = TestHLogUtils.getNumLogFiles(regionServer.getWAL());
+    LOG.info("after flushing all regions and rolling logs there are " +
+        count + " log files");
+    assertTrue(("actual count: " + count), count <= 2);
+  }
+
+  private void setUpforLogRolling() {
+    // Force a region split after every 768KB
+    TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize",
+        768L * 1024L);
+
+    // We roll the log after every 32 writes
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
+
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.regionserver.logroll.errors.tolerated", 2);
+    TEST_UTIL.getConfiguration().setInt("ipc.ping.interval", 10 * 1000);
+    TEST_UTIL.getConfiguration().setInt("ipc.socket.timeout", 10 * 1000);
+    TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000);
+
+    // For less frequently updated regions flush after every 2 flushes
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.hregion.memstore.optionalflushcount", 2);
+
+    // We flush the cache after every 8192 bytes
+    TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.flush.size",
+        8192);
+
+    // Increase the amount of time between client retries
+    TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000);
+
+    // Reduce thread wake frequency so that other threads can get
+    // a chance to run.
+    TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY,
+        2 * 1000);
+
+    /**** configuration for testLogRollOnDatanodeDeath ****/
+    // make sure log.hflush() calls syncFs() to open a pipeline
+    TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
+    // lower the namenode & datanode heartbeat so the namenode
+    // quickly detects datanode failures
+    TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
+    TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
+    // the namenode might still try to choose the recently-dead datanode
+    // for a pipeline, so try to a new pipeline multiple times
+    TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.regionserver.hlog.tolerable.lowreplication", 2);
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.regionserver.hlog.lowreplication.rolllimit", 3);
+  }
+  
+  private HRegionServer startAndWriteData(String tableName, byte[] value)
+      throws IOException {
+    // When the META table can be opened, the region servers are running
+    new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+    HRegionServer regionServer = TEST_UTIL.getHbaseCluster()
+        .getRegionServerThreads().get(0).getRegionServer();
+
+    // Create the test table and open it
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
+    admin.createTable(desc);
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+
+    regionServer = TEST_UTIL.getRSForFirstRegionInTable(Bytes
+        .toBytes(tableName));
+    for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
+      Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
+      put.add(HConstants.CATALOG_FAMILY, null, value);
+      table.put(put);
+      if (i % 32 == 0) {
+        // After every 32 writes sleep to let the log roller run
+        try {
+          Thread.sleep(2000);
+        } catch (InterruptedException e) {
+          // continue
+        }
+      }
+    }
+    return regionServer;
+  }
   
 }



Mime
View raw message