hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject svn commit: r1378752 [2/2] - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/protobuf/ main/java/org/apache/hadoop/hbase/protobuf/generated/ main/java/org/...
Date Wed, 29 Aug 2012 21:58:52 GMT
Modified: hbase/trunk/hbase-server/src/main/protobuf/RegionServerStatus.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/protobuf/RegionServerStatus.proto?rev=1378752&r1=1378751&r2=1378752&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/protobuf/RegionServerStatus.proto (original)
+++ hbase/trunk/hbase-server/src/main/protobuf/RegionServerStatus.proto Wed Aug 29 21:58:51
2012
@@ -67,16 +67,6 @@ message ReportRSFatalErrorRequest {
 message ReportRSFatalErrorResponse {
 }
 
-message GetLastFlushedSequenceIdRequest {
-  /** region name */
-  required bytes regionName = 1;
-}
-
-message GetLastFlushedSequenceIdResponse {
-  /** the last HLog sequence id flushed from MemStore to HFile for the region */
-  required uint64 lastFlushedSequenceId = 1;
-}
-
 service RegionServerStatusService {
   /** Called when a region server first starts. */
   rpc regionServerStartup(RegionServerStartupRequest)
@@ -92,10 +82,4 @@ service RegionServerStatusService {
    */
   rpc reportRSFatalError(ReportRSFatalErrorRequest)
     returns(ReportRSFatalErrorResponse);
-
-  /** Called to get the sequence id of the last MemStore entry flushed to an
-   * HFile for a specified region. Used by the region server to speed up
-   * log splitting. */
-  rpc getLastFlushedSequenceId(GetLastFlushedSequenceIdRequest)
-    returns(GetLastFlushedSequenceIdResponse);
 }

Modified: hbase/trunk/hbase-server/src/main/protobuf/hbase.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/protobuf/hbase.proto?rev=1378752&r1=1378751&r2=1378752&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/protobuf/hbase.proto (original)
+++ hbase/trunk/hbase-server/src/main/protobuf/hbase.proto Wed Aug 29 21:58:51 2012
@@ -132,9 +132,6 @@ message RegionLoad {
 
   /** Region-level coprocessors. */
   repeated Coprocessor coprocessors = 15;
-
-  /** the most recent sequence Id from cache flush */
-  optional uint64 completeSequenceId = 16;
 }
 
 /* Server-level protobufs */

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1378752&r1=1378751&r2=1378752&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
Wed Aug 29 21:58:51 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -176,10 +175,6 @@ public class TestCatalogJanitor {
     }
 
     @Override
-    public void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
-      // no-op
-    }
-    @Override
     public void checkTableModifiable(byte[] tableName) throws IOException {
       //no-op
     }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1378752&r1=1378751&r2=1378752&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
Wed Aug 29 21:58:51 2012
@@ -666,12 +666,13 @@ public class TestHLogSplit {
     fs.initialize(fs.getUri(), conf);
     // Set up a splitter that will throw an IOE on the output side
     HLogSplitter logSplitter = new HLogSplitter(
-        conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
+        conf, hbaseDir, hlogDir, oldLogDir, fs) {
       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
       throws IOException {
         HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
         Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
         return mockWriter;
+
       }
     };
     try {
@@ -698,7 +699,7 @@ public class TestHLogSplit {
         when(spiedFs).append(Mockito.<Path>any());
 
     HLogSplitter logSplitter = new HLogSplitter(
-        conf, hbaseDir, hlogDir, oldLogDir, spiedFs, null);
+        conf, hbaseDir, hlogDir, oldLogDir, spiedFs);
 
     try {
       logSplitter.splitLog();
@@ -756,7 +757,7 @@ public class TestHLogSplit {
 
     // Create a splitter that reads and writes the data without touching disk
     HLogSplitter logSplitter = new HLogSplitter(
-        localConf, hbaseDir, hlogDir, oldLogDir, fs, null) {
+        localConf, hbaseDir, hlogDir, oldLogDir, fs) {
 
       /* Produce a mock writer that doesn't write anywhere */
       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
@@ -1015,7 +1016,7 @@ public class TestHLogSplit {
     generateHLogs(1, 10, -1);
     FileStatus logfile = fs.listStatus(hlogDir)[0];
     fs.initialize(fs.getUri(), conf);
-    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
+    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
     HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
         .toString(), conf);
 
@@ -1044,7 +1045,7 @@ public class TestHLogSplit {
     LOG.info("Region directory is" + regiondir);
     fs.delete(regiondir, true);
     
-    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
+    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
     HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
         .toString(), conf);
     
@@ -1062,7 +1063,7 @@ public class TestHLogSplit {
 
     fs.initialize(fs.getUri(), conf);
 
-    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
+    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
     HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
         .toString(), conf);
     Path tdir = HTableDescriptor.getTableDir(hbaseDir, TABLE_NAME);
@@ -1078,7 +1079,7 @@ public class TestHLogSplit {
     FileStatus logfile = fs.listStatus(hlogDir)[0];
     fs.initialize(fs.getUri(), conf);
 
-    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
+    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
     HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
         .toString(), conf);
     for (String region : regions) {
@@ -1098,7 +1099,7 @@ public class TestHLogSplit {
         Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
 
     fs.initialize(fs.getUri(), conf);
-    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
+    HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
     HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
         .toString(), conf);
 
@@ -1122,7 +1123,7 @@ public class TestHLogSplit {
     generateHLogs(-1);
 
     HLogSplitter logSplitter = new HLogSplitter(
-        conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
+        conf, hbaseDir, hlogDir, oldLogDir, fs) {
       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
       throws IOException {
         HLog.Writer writer = HLog.createWriter(fs, logfile, conf);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1378752&r1=1378751&r2=1378752&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
Wed Aug 29 21:58:51 2012
@@ -664,7 +664,7 @@ public class TestWALReplay {
     wal.close();
     FileStatus[] listStatus = this.fs.listStatus(wal.getDir());
     HLogSplitter.splitLogFile(hbaseRootDir, listStatus[0], this.fs, this.conf,
-        null, null);
+        null);
     FileStatus[] listStatus1 = this.fs.listStatus(new Path(hbaseRootDir + "/"
         + tableNameStr + "/" + hri.getEncodedName() + "/recovered.edits"));
     int editCount = 0;



Mime
View raw message