hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1445918 [5/29] - in /hbase/branches/hbase-7290: ./ bin/ conf/ dev-support/ hbase-client/ hbase-common/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/ hbase-common/src/mai...
Date Wed, 13 Feb 2013 20:58:32 GMT
Modified: hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon Wed Feb 13 20:58:23 2013
@@ -48,27 +48,27 @@ Arrays.sort(serverNames);
 </%java>
 
 <div class="tabbable">
-    <ul class="nav nav-tabs">
-        <li class="active"><a href="#baseStats" data-toggle="tab">Base Stats</a></li>
-        <li class=""><a href="#memoryStats" data-toggle="tab">Memory</a></li>
-        <li class=""><a href="#requestStats" data-toggle="tab">Requests</a></li>
-        <li class=""><a href="#storeStats" data-toggle="tab">Storefiles</a></li>
-        <li class=""><a href="#compactStas" data-toggle="tab">Compactions</a></li>
+    <ul class="nav nav-pills">
+        <li class="active"><a href="#tab_baseStats" data-toggle="tab">Base Stats</a></li>
+        <li class=""><a href="#tab_memoryStats" data-toggle="tab">Memory</a></li>
+        <li class=""><a href="#tab_requestStats" data-toggle="tab">Requests</a></li>
+        <li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
+        <li class=""><a href="#tab_compactStas" data-toggle="tab">Compactions</a></li>
     </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-        <div class="tab-pane active" id="baseStats">
+        <div class="tab-pane active" id="tab_baseStats">
             <& baseStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="memoryStats">
+        <div class="tab-pane" id="tab_memoryStats">
             <& memoryStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="requestStats">
+        <div class="tab-pane" id="tab_requestStats">
             <& requestStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="storeStats">
+        <div class="tab-pane" id="tab_storeStats">
             <& storeStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="compactStas">
+        <div class="tab-pane" id="tab_compactStas">
             <& compactionStats; serverNames = serverNames; &>
         </div>
     </div>

Modified: hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon Wed Feb 13 20:58:23 2013
@@ -68,7 +68,7 @@ org.apache.hadoop.hbase.protobuf.generat
             <span class="icon-bar"></span>
             <span class="icon-bar"></span>
           </a>
-          <a class="brand" href="/rs-status">HBase Region Server</a>
+          <a class="brand" href="/rs-status"><img src="/static/hbase_logo_small.png" alt="HBase Logo"/></a>
           <div class="nav-collapse">
             <ul class="nav">
                 <li class="active"><a href="/">Home</a></li>
@@ -87,22 +87,27 @@ org.apache.hadoop.hbase.protobuf.generat
 
 <div class="container">
     <div class="row inner_header">
-        <div class="span8">
-            <h1>RegionServer: <% serverName.getHostname() %></h1>
-        </div>
-        <div class="span4 logo">
-            <img src="/static/hbase_logo.png" height="66" width="266" alt="HBase logo"/>
+        <div class="page-header">
+            <h1>RegionServer <small><% serverName.getHostname() %></small></h1>
         </div>
     </div>
+    <div class="row">
 
+    <section>
     <h2>Server Metrics</h2>
     <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
+    </section>
 
+    <section>
     <& ../common/TaskMonitorTmpl; filter = filter &>
+    </section>
 
+    <section>
     <h2>Regions</h2>
     <& RegionListTmpl; regionServer = regionServer; onlineRegions = onlineRegions; &>
+    </section>
 
+    <section>
     <h2>Software Attributes</h2>
     <table id="attributes_table" class="table table-striped">
         <tr>
@@ -151,6 +156,8 @@ org.apache.hadoop.hbase.protobuf.generat
             <td>Address of HBase Master</td>
         </tr>
     </table>
+    </section>
+    </div>
 </div>
 <script src="/static/js/jquery.min.js" type="text/javascript"></script>
 <script src="/static/js/bootstrap.min.js" type="text/javascript"></script>

Modified: hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon Wed Feb 13 20:58:23 2013
@@ -38,23 +38,23 @@
     </%java>
 
     <div class="tabbable">
-        <ul class="nav nav-tabs">
-            <li class="active"><a href="#regionBaseInfo" data-toggle="tab">Base Info</a> </li>
-            <li><a href="#regionRequestStats" data-toggle="tab">Request metrics</a></li>
-            <li class=""><a href="#regionStoreStats" data-toggle="tab">Storefile Metrics</a></li>
-            <li class=""><a href="#regionCompactStas" data-toggle="tab">Compaction Metrics</a></li>
+        <ul class="nav nav-pills">
+            <li class="active"><a href="#tab_regionBaseInfo" data-toggle="tab">Base Info</a> </li>
+            <li><a href="#tab_regionRequestStats" data-toggle="tab">Request metrics</a></li>
+            <li class=""><a href="#tab_regionStoreStats" data-toggle="tab">Storefile Metrics</a></li>
+            <li class=""><a href="#tab_regionCompactStas" data-toggle="tab">Compaction Metrics</a></li>
         </ul>
         <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-            <div class="tab-pane active" id="regionBaseInfo">
+            <div class="tab-pane active" id="tab_regionBaseInfo">
                 <& baseInfo; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionRequestStats">
+            <div class="tab-pane" id="tab_regionRequestStats">
                 <& requestStats; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionStoreStats">
+            <div class="tab-pane" id="tab_regionStoreStats">
                 <& storeStats; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionCompactStas">
+            <div class="tab-pane" id="tab_regionCompactStas">
                 <& compactStats; onlineRegions = onlineRegions; &>
             </div>
         </div>

Modified: hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon Wed Feb 13 20:58:23 2013
@@ -35,31 +35,31 @@ com.yammer.metrics.stats.Snapshot;
 java.lang.management.ManagementFactory;
 </%import>
 <div class="tabbable">
-    <ul class="nav nav-tabs">
-        <li class="active"><a href="#baseStats" data-toggle="tab">Base Stats</a></li>
-        <li class=""><a href="#memoryStats" data-toggle="tab">Memory</a></li>
-        <li class=""><a href="#requestStats" data-toggle="tab">Requests</a></li>
-        <li class=""><a href="#storeStats" data-toggle="tab">Storefiles</a></li>
-        <li class=""><a href="#queueStats" data-toggle="tab">Queues</a></li>
-        <li class=""><a href="#blockCacheStats" data-toggle="tab">Block Cache</a></li>
+    <ul class="nav nav-pills">
+        <li class="active"><a href="#tab_baseStats" data-toggle="tab">Base Stats</a></li>
+        <li class=""><a href="#tab_memoryStats" data-toggle="tab">Memory</a></li>
+        <li class=""><a href="#tab_requestStats" data-toggle="tab">Requests</a></li>
+        <li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
+        <li class=""><a href="#tab_queueStats" data-toggle="tab">Queues</a></li>
+        <li class=""><a href="#tab_blockCacheStats" data-toggle="tab">Block Cache</a></li>
     </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-        <div class="tab-pane active" id="baseStats">
+        <div class="tab-pane active" id="tab_baseStats">
             <& baseStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="memoryStats">
+        <div class="tab-pane" id="tab_memoryStats">
             <& memoryStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="requestStats">
+        <div class="tab-pane" id="tab_requestStats">
             <& requestStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="storeStats">
+        <div class="tab-pane" id="tab_storeStats">
             <& storeStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="queueStats">
+        <div class="tab-pane" id="tab_queueStats">
             <& queueStats; mWrap = mWrap  &>
         </div>
-        <div class="tab-pane" id="blockCacheStats">
+        <div class="tab-pane" id="tab_blockCacheStats">
             <& blockCacheStats; mWrap = mWrap &>
         </div>
     </div>

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Wed Feb 13 20:58:23 2013
@@ -29,14 +29,17 @@ import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Text;
@@ -60,14 +63,16 @@ import com.google.protobuf.InvalidProtoc
 public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
   // For future backward compatibility
 
-  // Version 3 was when column names become byte arrays and when we picked up
+  // Version  3 was when column names become byte arrays and when we picked up
   // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
-  // Version 5 was when bloom filter descriptors were removed.
-  // Version 6 adds metadata as a map where keys and values are byte[].
-  // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
-  // Version 8 -- reintroduction of bloom filters, changed from boolean to enum
-  // Version 9 -- add data block encoding
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 9;
+  // Version  5 was when bloom filter descriptors were removed.
+  // Version  6 adds metadata as a map where keys and values are byte[].
+  // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
+  // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
+  // Version  9 -- add data block encoding
+  // Version 10 -- change metadata to standard type.
+  // Version 11 -- add column family level configuration.
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
 
   // These constants are used as FileInfo keys
   public static final String COMPRESSION = "COMPRESSION";
@@ -165,7 +170,7 @@ public class HColumnDescriptor implement
   /**
    * Default setting for whether or not to use bloomfilters.
    */
-  public static final String DEFAULT_BLOOMFILTER = StoreFile.BloomType.NONE.toString();
+  public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString();
 
   /**
    * Default setting for whether to cache bloom filter blocks on write if block
@@ -221,9 +226,16 @@ public class HColumnDescriptor implement
   private byte [] name;
 
   // Column metadata
-  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+  private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
 
+  /**
+   * A map which holds the configuration specific to the column family.
+   * The keys of the map have the same names as config keys and override the defaults with
+   * cf-specific settings. Example usage may be for compactions, etc.
+   */
+  private final Map<String, String> configuration = new HashMap<String, String>();
+
   /*
    * Cache the max versions rather than calculate it every time.
    */
@@ -278,6 +290,9 @@ public class HColumnDescriptor implement
         desc.values.entrySet()) {
       this.values.put(e.getKey(), e.getValue());
     }
+    for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
+      this.configuration.put(e.getKey(), e.getValue());
+    }
     setMaxVersions(desc.getMaxVersions());
   }
 
@@ -409,7 +424,7 @@ public class HColumnDescriptor implement
     setEncodeOnDisk(encodeOnDisk);
     setDataBlockEncoding(DataBlockEncoding.
         valueOf(dataBlockEncoding.toUpperCase()));
-    setBloomFilterType(StoreFile.BloomType.
+    setBloomFilterType(BloomType.
       valueOf(bloomFilter.toUpperCase()));
     setBlocksize(blocksize);
     setScope(scope);
@@ -766,19 +781,19 @@ public class HColumnDescriptor implement
   /**
    * @return bloom filter type used for new StoreFiles in ColumnFamily
    */
-  public StoreFile.BloomType getBloomFilterType() {
+  public BloomType getBloomFilterType() {
     String n = getValue(BLOOMFILTER);
     if (n == null) {
       n = DEFAULT_BLOOMFILTER;
     }
-    return StoreFile.BloomType.valueOf(n.toUpperCase());
+    return BloomType.valueOf(n.toUpperCase());
   }
 
   /**
    * @param bt bloom filter type
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) {
+  public HColumnDescriptor setBloomFilterType(final BloomType bt) {
     return setValue(BLOOMFILTER, bt.toString());
   }
 
@@ -936,7 +951,7 @@ public class HColumnDescriptor implement
     // print all non-reserved, advanced config keys as a separate subset
     if (hasConfigKeys) {
       s.append(", ");
-      s.append(HConstants.CONFIG).append(" => ");
+      s.append(HConstants.METADATA).append(" => ");
       s.append('{');
       boolean printComma = false;
       for (ImmutableBytesWritable k : values.keySet()) {
@@ -955,6 +970,21 @@ public class HColumnDescriptor implement
       }
       s.append('}');
     }
+
+    if (!configuration.isEmpty()) {
+      s.append(", ");
+      s.append(HConstants.CONFIGURATION).append(" => ");
+      s.append('{');
+      boolean printCommaForConfiguration = false;
+      for (Map.Entry<String, String> e : configuration.entrySet()) {
+        if (printCommaForConfiguration) s.append(", ");
+        printCommaForConfiguration = true;
+        s.append('\'').append(e.getKey()).append('\'');
+        s.append(" => ");
+        s.append('\'').append(e.getValue()).append('\'');
+      }
+      s.append("}");
+    }
     return s;
   }
 
@@ -987,6 +1017,7 @@ public class HColumnDescriptor implement
     int result = Bytes.hashCode(this.name);
     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
     result ^= values.hashCode();
+    result ^= configuration.hashCode();
     return result;
   }
 
@@ -1057,6 +1088,19 @@ public class HColumnDescriptor implement
       String value = getValue(HConstants.VERSIONS);
       this.cachedMaxVersions = (value != null)?
           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
+      if (version > 10) {
+        configuration.clear();
+        int numConfigs = in.readInt();
+        for (int i = 0; i < numConfigs; i++) {
+          ImmutableBytesWritable key = new ImmutableBytesWritable();
+          ImmutableBytesWritable val = new ImmutableBytesWritable();
+          key.readFields(in);
+          val.readFields(in);
+          configuration.put(
+            Bytes.toString(key.get(), key.getOffset(), key.getLength()),
+            Bytes.toString(val.get(), val.getOffset(), val.getLength()));
+        }
+      }
     }
   }
 
@@ -1073,6 +1117,11 @@ public class HColumnDescriptor implement
       e.getKey().write(out);
       e.getValue().write(out);
     }
+    out.writeInt(configuration.size());
+    for (Map.Entry<String, String> e : configuration.entrySet()) {
+      new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
+      new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
+    }
   }
 
   // Comparable
@@ -1087,6 +1136,13 @@ public class HColumnDescriptor implement
       else if (result > 0)
         result = 1;
     }
+    if (result == 0) {
+      result = this.configuration.hashCode() - o.configuration.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
     return result;
   }
 
@@ -1127,8 +1183,11 @@ public class HColumnDescriptor implement
     // unrelated-looking test failures that are hard to trace back to here.
     HColumnDescriptor hcd = new HColumnDescriptor();
     hcd.name = cfs.getName().toByteArray();
-    for (ColumnFamilySchema.Attribute a: cfs.getAttributesList()) {
-      hcd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+    for (BytesBytesPair a: cfs.getAttributesList()) {
+      hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+    }
+    for (NameStringPair a: cfs.getConfigurationList()) {
+      hcd.setConfiguration(a.getName(), a.getValue());
     }
     return hcd;
   }
@@ -1140,11 +1199,52 @@ public class HColumnDescriptor implement
     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
     builder.setName(ByteString.copyFrom(getName()));
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
-      ColumnFamilySchema.Attribute.Builder aBuilder = ColumnFamilySchema.Attribute.newBuilder();
-      aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
-      aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+      BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
+      aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
+      aBuilder.setSecond(ByteString.copyFrom(e.getValue().get()));
       builder.addAttributes(aBuilder.build());
     }
+    for (Map.Entry<String, String> e : this.configuration.entrySet()) {
+      NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
+      aBuilder.setName(e.getKey());
+      aBuilder.setValue(e.getValue());
+      builder.addConfiguration(aBuilder.build());
+    }
     return builder.build();
   }
+
+  /**
+   * Getter for accessing the configuration value by key.
+   */
+  public String getConfigurationValue(String key) {
+    return configuration.get(key);
+  }
+
+  /**
+   * Getter for fetching an unmodifiable {@link #configuration} map.
+   */
+  public Map<String, String> getConfiguration() {
+    // shallow pointer copy
+    return Collections.unmodifiableMap(configuration);
+  }
+
+  /**
+   * Setter for storing a configuration setting in {@link #configuration} map.
+   * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
+   * @param value String value. If null, removes the configuration.
+   */
+  public void setConfiguration(String key, String value) {
+    if (value == null) {
+      removeConfiguration(key);
+    } else {
+      configuration.put(key, value);
+    }
+  }
+
+  /**
+   * Remove a configuration setting represented by the key from the {@link #configuration} map.
+   */
+  public void removeConfiguration(final String key) {
+    configuration.remove(key);
+  }
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java Wed Feb 13 20:58:23 2013
@@ -30,7 +30,9 @@ import org.apache.hadoop.classification.
 
 
 /**
- * Data structure to describe the distribution of HDFS blocks amount hosts
+ * Data structure to describe the distribution of HDFS blocks amount hosts.
+ *
+ * Adding erroneous data will be ignored silently.
  */
 @InterfaceAudience.Private
 public class HDFSBlocksDistribution {
@@ -122,8 +124,10 @@ public class HDFSBlocksDistribution {
    */
   public void addHostsAndBlockWeight(String[] hosts, long weight) {
     if (hosts == null || hosts.length == 0) {
-      throw new NullPointerException("empty hosts");
+      // erroneous data
+      return;
     }
+
     addUniqueWeight(weight);
     for (String hostname : hosts) {
       addHostAndBlockWeight(hostname, weight);
@@ -146,7 +150,8 @@ public class HDFSBlocksDistribution {
    */
   private void addHostAndBlockWeight(String host, long weight) {
     if (host == null) {
-      throw new NullPointerException("Passed hostname is null");
+      // erroneous data
+      return;
     }
 
     HostAndWeight hostAndWeight = this.hostAndWeights.get(host);

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Wed Feb 13 20:58:23 2013
@@ -33,16 +33,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.JenkinsHash;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.apache.hadoop.hbase.util.Pair;
@@ -171,14 +167,6 @@ public class HRegionInfo implements Comp
     return encodedRegionName;
   }
 
-  /** HRegionInfo for root region */
-  public static final HRegionInfo ROOT_REGIONINFO =
-    new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
-
-  /** HRegionInfo for first meta region */
-  public static final HRegionInfo FIRST_META_REGIONINFO =
-    new HRegionInfo(1L, Bytes.toBytes(".META."));
-
   private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
   // This flag is in the parent of a split while the parent is still referenced
   // by daughter regions.  We USED to set this flag when we disabled a table
@@ -198,6 +186,14 @@ public class HRegionInfo implements Comp
   // Current TableName
   private byte[] tableName = null;
 
+  /** HRegionInfo for root region */
+  public static final HRegionInfo ROOT_REGIONINFO =
+      new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
+
+  /** HRegionInfo for first meta region */
+  public static final HRegionInfo FIRST_META_REGIONINFO =
+      new HRegionInfo(1L, Bytes.toBytes(".META."));
+
   private void setHashCode() {
     int result = Arrays.hashCode(this.regionName);
     result ^= this.regionId;
@@ -420,6 +416,15 @@ public class HRegionInfo implements Comp
   }
 
   /**
+   * Gets the start key from the specified region name.
+   * @param regionName
+   * @return Start key.
+   */
+  public static byte[] getStartKey(final byte[] regionName) throws IOException {
+    return parseRegionName(regionName)[1];
+  }
+
+  /**
    * Separate elements of a regionName.
    * @param regionName
    * @return Array of byte[] containing tableName, startKey and id
@@ -563,54 +568,6 @@ public class HRegionInfo implements Comp
        Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY));
   }
 
-  /**
-   * @return the tableDesc
-   * @deprecated Do not use; expensive call
-   *         use HRegionInfo.getTableNameAsString() in place of
-   *         HRegionInfo.getTableDesc().getNameAsString()
-   */
-   @Deprecated
-  public HTableDescriptor getTableDesc() {
-    Configuration c = HBaseConfiguration.create();
-    c.set("fs.defaultFS", c.get(HConstants.HBASE_DIR));
-    c.set("fs.default.name", c.get(HConstants.HBASE_DIR));
-    FileSystem fs;
-    try {
-      fs = FileSystem.get(c);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    FSTableDescriptors fstd =
-      new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR)));
-    try {
-      return fstd.get(this.tableName);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * @param newDesc new table descriptor to use
-   * @deprecated Do not use; expensive call
-   */
-  @Deprecated
-  public void setTableDesc(HTableDescriptor newDesc) {
-    Configuration c = HBaseConfiguration.create();
-    FileSystem fs;
-    try {
-      fs = FileSystem.get(c);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    FSTableDescriptors fstd =
-      new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR)));
-    try {
-      fstd.add(newDesc);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   /** @return true if this is the root region */
   public boolean isRootRegion() {
     return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName());
@@ -1056,6 +1013,20 @@ public class HRegionInfo implements Comp
   }
 
   /**
+   * The latest seqnum that the server writing to meta observed when opening the region.
+   * E.g. the seqNum when the result of {@link #getServerName(Result)} was written.
+   * @param r Result to pull the seqNum from
+   * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
+   */
+  public static long getSeqNumDuringOpen(final Result r) {
+    byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER);
+    if (value == null || value.length == 0) return HConstants.NO_SEQNUM;
+    Long result = Bytes.toLong(value);
+    if (result == null) return HConstants.NO_SEQNUM;
+    return result.longValue();
+  }
+
+  /**
    * Parses an HRegionInfo instance from the passed in stream.  Presumes the HRegionInfo was
    * serialized to the stream with {@link #toDelimitedByteArray()}
    * @param in

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java Wed Feb 13 20:58:23 2013
@@ -35,6 +35,7 @@ public class HRegionLocation implements 
   private final HRegionInfo regionInfo;
   private final String hostname;
   private final int port;
+  private final long seqNum;
   // Cache of the 'toString' result.
   private String cachedString = null;
   // Cache of the hostname + port
@@ -43,14 +44,20 @@ public class HRegionLocation implements 
   /**
    * Constructor
    * @param regionInfo the HRegionInfo for the region
-   * @param hostname Hostname
-   * @param port port
    */
   public HRegionLocation(HRegionInfo regionInfo, final String hostname,
-      final int port) {
+      final int port, final long seqNum) {
     this.regionInfo = regionInfo;
     this.hostname = hostname;
     this.port = port;
+    this.seqNum = seqNum;
+  }
+
+  /**
+   * Test constructor w/o seqNum.
+   */
+  public HRegionLocation(HRegionInfo regionInfo, final String hostname, final int port) {
+    this(regionInfo, hostname, port, 0);
   }
 
   /**
@@ -60,7 +67,8 @@ public class HRegionLocation implements 
   public synchronized String toString() {
     if (this.cachedString == null) {
       this.cachedString = "region=" + this.regionInfo.getRegionNameAsString() +
-      ", hostname=" + this.hostname + ", port=" + this.port;
+      ", hostname=" + this.hostname + ", port=" + this.port
+      + ", seqNum=" + seqNum;
     }
     return this.cachedString;
   }
@@ -105,6 +113,10 @@ public class HRegionLocation implements 
     return this.port;
   }
 
+  public long getSeqNum() {
+    return seqNum;
+  }
+
   /**
    * @return String made of hostname and port formatted as per {@link Addressing#createHostAndPortStr(String, int)}
    */

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Wed Feb 13 20:58:23 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -38,7 +39,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -63,8 +66,10 @@ public class HTableDescriptor implements
    *  Version 3 adds metadata as a map where keys and values are byte[].
    *  Version 4 adds indexes
    *  Version 5 removed transactional pollution -- e.g. indexes
+   *  Version 6 changed metadata to BytesBytesPair in PB
+   *  Version 7 adds table-level configuration
    */
-  private static final byte TABLE_DESCRIPTOR_VERSION = 5;
+  private static final byte TABLE_DESCRIPTOR_VERSION = 7;
 
   private byte [] name = HConstants.EMPTY_BYTE_ARRAY;
 
@@ -75,9 +80,16 @@ public class HTableDescriptor implements
    * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
    * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
    */
-  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+  private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
 
+  /**
+   * A map which holds the configuration specific to the table.
+   * The keys of the map have the same names as config keys and override the defaults with
+   * table-specific settings. Example usage may be for compactions, etc.
+   */
+  private final Map<String, String> configuration = new HashMap<String, String>();
+
   public static final String SPLIT_POLICY = "SPLIT_POLICY";
 
   /**
@@ -234,7 +246,7 @@ public class HTableDescriptor implements
     }
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
         values.entrySet()) {
-      this.values.put(entry.getKey(), entry.getValue());
+      setValue(entry.getKey(), entry.getValue());
     }
   }
 
@@ -293,7 +305,10 @@ public class HTableDescriptor implements
     }
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
         desc.values.entrySet()) {
-      this.values.put(e.getKey(), e.getValue());
+      setValue(e.getKey(), e.getValue());
+    }
+    for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
+      this.configuration.put(e.getKey(), e.getValue());
     }
   }
 
@@ -331,7 +346,7 @@ public class HTableDescriptor implements
    */
   protected void setRootRegion(boolean isRoot) {
     // TODO: Make the value a boolean rather than String of boolean.
-    values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE);
+    setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
   }
 
   /**
@@ -358,7 +373,7 @@ public class HTableDescriptor implements
     byte [] value = getValue(key);
     if (value != null) {
       // TODO: Make value be a boolean rather than String of boolean.
-      return Boolean.valueOf(Bytes.toString(value)).booleanValue();
+      return Boolean.valueOf(Bytes.toString(value));
     }
     return valueIfNull;
   }
@@ -372,7 +387,7 @@ public class HTableDescriptor implements
    * <code> .META. </code> region 
    */
   protected void setMetaRegion(boolean isMeta) {
-    values.put(IS_META_KEY, isMeta? TRUE: FALSE);
+    setValue(IS_META_KEY, isMeta? TRUE: FALSE);
   }
 
   /** 
@@ -488,7 +503,7 @@ public class HTableDescriptor implements
    * @see #values
    */
   public void setValue(byte[] key, byte[] value) {
-    setValue(new ImmutableBytesWritable(key), value);
+    setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
   }
 
   /*
@@ -496,8 +511,8 @@ public class HTableDescriptor implements
    * @param value The value.
    */
   private void setValue(final ImmutableBytesWritable key,
-      final byte[] value) {
-    values.put(key, new ImmutableBytesWritable(value));
+      final String value) {
+    setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
   }
 
   /*
@@ -518,30 +533,30 @@ public class HTableDescriptor implements
    */
   public void setValue(String key, String value) {
     if (value == null) {
-      remove(Bytes.toBytes(key));
+      remove(key);
     } else {
       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
     }
   }
-
+  
   /**
    * Remove metadata represented by the key from the {@link #values} map
    * 
    * @param key Key whose key and value we're to remove from HTableDescriptor
    * parameters.
    */
-  public void remove(final byte [] key) {
-    values.remove(new ImmutableBytesWritable(key));
+  public void remove(final String key) {
+    remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
   }
-  
+
   /**
    * Remove metadata represented by the key from the {@link #values} map
-   * 
+   *
    * @param key Key whose key and value we're to remove from HTableDescriptor
    * parameters.
    */
-  public void remove(final String key) {
-    remove(Bytes.toBytes(key));
+  public void remove(ImmutableBytesWritable key) {
+    values.remove(key);
   }
 
   /**
@@ -595,7 +610,7 @@ public class HTableDescriptor implements
    * 
    * @param isDeferredLogFlush
    */
-  public void setDeferredLogFlush(final boolean isDeferredLogFlush) {
+  public synchronized void setDeferredLogFlush(final boolean isDeferredLogFlush) {
     setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE);
     this.deferredLog = isDeferredLogFlush;
   }
@@ -621,12 +636,10 @@ public class HTableDescriptor implements
   /**
    * This get the class associated with the region split policy which 
    * determines when a region split should occur.  The class used by
-   * default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy}
-   * which split the region base on a constant {@link #getMaxFileSize()}
+   * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
    * 
    * @return the class name of the region split policy for this table.
-   * If this returns null, the default constant size based split policy
-   * is used.
+   * If this returns null, the default split policy is used.
    */
    public String getRegionSplitPolicyClassName() {
     return getValue(SPLIT_POLICY);
@@ -645,18 +658,19 @@ public class HTableDescriptor implements
 
   /** 
    * Returns the maximum size upto which a region can grow to after which a region
-   * split is triggered. The region size is represented by the size of the biggest 
+   * split is triggered. The region size is represented by the size of the biggest
    * store file in that region.
-   * 
-   * @return max hregion size for table
-   * 
+   *
+   * @return max hregion size for table, -1 if not set.
+   *
    * @see #setMaxFileSize(long)
    */
   public long getMaxFileSize() {
     byte [] value = getValue(MAX_FILESIZE_KEY);
-    if (value != null)
-      return Long.valueOf(Bytes.toString(value)).longValue();
-    return HConstants.DEFAULT_MAX_FILE_SIZE;
+    if (value != null) {
+      return Long.parseLong(Bytes.toString(value));
+    }
+    return -1;
   }
   
   /**
@@ -675,21 +689,22 @@ public class HTableDescriptor implements
    * before a split is triggered.
    */
   public void setMaxFileSize(long maxFileSize) {
-    setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize)));
+    setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
   }
 
   /**
    * Returns the size of the memstore after which a flush to filesystem is triggered.
-   * 
-   * @return memory cache flush size for each hregion
-   * 
+   *
+   * @return memory cache flush size for each hregion, -1 if not set.
+   *
    * @see #setMemStoreFlushSize(long)
    */
   public long getMemStoreFlushSize() {
     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
-    if (value != null)
-      return Long.valueOf(Bytes.toString(value)).longValue();
-    return DEFAULT_MEMSTORE_FLUSH_SIZE;
+    if (value != null) {
+      return Long.parseLong(Bytes.toString(value));
+    }
+    return -1;
   }
 
   /**
@@ -699,8 +714,7 @@ public class HTableDescriptor implements
    * @param memstoreFlushSize memory cache flush size for each hregion
    */
   public void setMemStoreFlushSize(long memstoreFlushSize) {
-    setValue(MEMSTORE_FLUSHSIZE_KEY,
-      Bytes.toBytes(Long.toString(memstoreFlushSize)));
+    setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
   }
 
   /**
@@ -758,13 +772,13 @@ public class HTableDescriptor implements
 
     // step 1: set partitioning and pruning
     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
-    Set<ImmutableBytesWritable> configKeys = new TreeSet<ImmutableBytesWritable>();
+    Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
     for (ImmutableBytesWritable k : values.keySet()) {
       if (k == null || k.get() == null) continue;
       String key = Bytes.toString(k.get());
       // in this section, print out reserved keywords + coprocessor info
       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
-        configKeys.add(k);
+        userKeys.add(k);
         continue;
       }
       // only print out IS_ROOT/IS_META if true
@@ -781,50 +795,67 @@ public class HTableDescriptor implements
     }
 
     // early exit optimization
-    if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s;
+    boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
+    if (!hasAttributes && configuration.isEmpty()) return s;
 
-    // step 2: printing
-    s.append(", {TABLE_ATTRIBUTES => {");
-
-    // print all reserved keys first
-    boolean printCommaForAttr = false;
-    for (ImmutableBytesWritable k : reservedKeys) {
-      String key = Bytes.toString(k.get());
-      String value = Bytes.toString(values.get(k).get());
-      if (printCommaForAttr) s.append(", ");
-      printCommaForAttr = true;
-      s.append(key);
-      s.append(" => ");
-      s.append('\'').append(value).append('\'');
-    }
-
-    if (!configKeys.isEmpty()) {
-      // print all non-reserved, advanced config keys as a separate subset
-      if (printCommaForAttr) s.append(", ");
-      printCommaForAttr = true;
-      s.append(HConstants.CONFIG).append(" => ");
-      s.append("{");
-      boolean printCommaForCfg = false;
-      for (ImmutableBytesWritable k : configKeys) {
+    s.append(", {");
+    // step 2: printing attributes
+    if (hasAttributes) {
+      s.append("TABLE_ATTRIBUTES => {");
+
+      // print all reserved keys first
+      boolean printCommaForAttr = false;
+      for (ImmutableBytesWritable k : reservedKeys) {
         String key = Bytes.toString(k.get());
         String value = Bytes.toString(values.get(k).get());
-        if (printCommaForCfg) s.append(", ");
-        printCommaForCfg = true;
-        s.append('\'').append(key).append('\'');
+        if (printCommaForAttr) s.append(", ");
+        printCommaForAttr = true;
+        s.append(key);
         s.append(" => ");
         s.append('\'').append(value).append('\'');
       }
-      s.append("}");
+
+      if (!userKeys.isEmpty()) {
+        // print all non-reserved, advanced config keys as a separate subset
+        if (printCommaForAttr) s.append(", ");
+        printCommaForAttr = true;
+        s.append(HConstants.METADATA).append(" => ");
+        s.append("{");
+        boolean printCommaForCfg = false;
+        for (ImmutableBytesWritable k : userKeys) {
+          String key = Bytes.toString(k.get());
+          String value = Bytes.toString(values.get(k).get());
+          if (printCommaForCfg) s.append(", ");
+          printCommaForCfg = true;
+          s.append('\'').append(key).append('\'');
+          s.append(" => ");
+          s.append('\'').append(value).append('\'');
+        }
+        s.append("}");
+      }
     }
 
-    s.append("}}"); // end METHOD
+    // step 3: printing all configuration:
+    if (!configuration.isEmpty()) {
+      if (hasAttributes) {
+        s.append(", ");
+      }
+      s.append(HConstants.CONFIGURATION).append(" => ");
+      s.append('{');
+      boolean printCommaForConfig = false;
+      for (Map.Entry<String, String> e : configuration.entrySet()) {
+        if (printCommaForConfig) s.append(", ");
+        printCommaForConfig = true;
+        s.append('\'').append(e.getKey()).append('\'');
+        s.append(" => ");
+        s.append('\'').append(e.getValue()).append('\'');
+      }
+      s.append("}");
+    }
+    s.append("}"); // end METHOD
     return s;
   }
 
-  public static Map<String, String> getDefaultValues() {
-    return Collections.unmodifiableMap(DEFAULT_VALUES);
-  }
-
   /**
    * Compare the contents of the descriptor with another one passed as a parameter. 
    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
@@ -861,6 +892,7 @@ public class HTableDescriptor implements
       }
     }
     result ^= values.hashCode();
+    result ^= configuration.hashCode();
     return result;
   }
 
@@ -881,13 +913,14 @@ public class HTableDescriptor implements
     setRootRegion(in.readBoolean());
     setMetaRegion(in.readBoolean());
     values.clear();
+    configuration.clear();
     int numVals = in.readInt();
     for (int i = 0; i < numVals; i++) {
       ImmutableBytesWritable key = new ImmutableBytesWritable();
       ImmutableBytesWritable value = new ImmutableBytesWritable();
       key.readFields(in);
       value.readFields(in);
-      values.put(key, value);
+      setValue(key, value);
     }
     families.clear();
     int numFamilies = in.readInt();
@@ -896,8 +929,17 @@ public class HTableDescriptor implements
       c.readFields(in);
       families.put(c.getName(), c);
     }
-    if (version < 4) {
-      return;
+    if (version >= 7) {
+      int numConfigs = in.readInt();
+      for (int i = 0; i < numConfigs; i++) {
+        ImmutableBytesWritable key = new ImmutableBytesWritable();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        key.readFields(in);
+        value.readFields(in);
+        configuration.put(
+          Bytes.toString(key.get(), key.getOffset(), key.getLength()),
+          Bytes.toString(value.get(), value.getOffset(), value.getLength()));
+      }
     }
   }
 
@@ -926,6 +968,11 @@ public class HTableDescriptor implements
       HColumnDescriptor family = it.next();
       family.write(out);
     }
+    out.writeInt(configuration.size());
+    for (Map.Entry<String, String> e : configuration.entrySet()) {
+      new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
+      new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
+    }
   }
 
   // Comparable
@@ -964,6 +1011,13 @@ public class HTableDescriptor implements
       else if (result > 0)
         result = 1;
     }
+    if (result == 0) {
+      result = this.configuration.hashCode() - other.configuration.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
     return result;
   }
 
@@ -999,7 +1053,8 @@ public class HTableDescriptor implements
    * @see #getFamilies()
    */
   public HColumnDescriptor[] getColumnFamilies() {
-    return getFamilies().toArray(new HColumnDescriptor[0]);
+    Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
+    return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
   }
   
 
@@ -1107,10 +1162,10 @@ public class HTableDescriptor implements
     setValue(key, value);
   }
 
-  
+
   /**
    * Check if the table has an attached co-processor represented by the name className
-   * 
+   *
    * @param className - Class name of the co-processor
    * @return true of the table has a co-processor className
    */
@@ -1141,6 +1196,30 @@ public class HTableDescriptor implements
   }
 
   /**
+   * Return the list of attached co-processor represented by their name className
+   *
+   * @return The list of co-processors classNames
+   */
+  public List<String> getCoprocessors() {
+    List<String> result = new ArrayList<String>();
+    Matcher keyMatcher;
+    Matcher valueMatcher;
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
+      keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
+      if (!keyMatcher.matches()) {
+        continue;
+      }
+      valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
+          .toString(e.getValue().get()));
+      if (!valueMatcher.matches()) {
+        continue;
+      }
+      result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
+    }
+    return result;
+  }
+
+  /**
    * Remove a coprocessor from those set on the table
    * @param className Class name of the co-processor
    */
@@ -1170,7 +1249,7 @@ public class HTableDescriptor implements
     }
     // if we found a match, remove it
     if (match != null)
-      this.values.remove(match);
+      remove(match);
   }
   
   /**
@@ -1218,9 +1297,9 @@ public class HTableDescriptor implements
   @Deprecated
   public void setOwnerString(String ownerString) {
     if (ownerString != null) {
-      setValue(OWNER_KEY, Bytes.toBytes(ownerString));
+      setValue(OWNER_KEY, ownerString);
     } else {
-      values.remove(OWNER_KEY);
+      remove(OWNER_KEY);
     }
   }
 
@@ -1257,7 +1336,7 @@ public class HTableDescriptor implements
     }
     int pblen = ProtobufUtil.lengthOfPBMagic();
     TableSchema.Builder builder = TableSchema.newBuilder();
-    TableSchema ts = null;
+    TableSchema ts;
     try {
       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
     } catch (InvalidProtocolBufferException e) {
@@ -1273,14 +1352,20 @@ public class HTableDescriptor implements
     TableSchema.Builder builder = TableSchema.newBuilder();
     builder.setName(ByteString.copyFrom(getName()));
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
-      TableSchema.Attribute.Builder aBuilder = TableSchema.Attribute.newBuilder();
-      aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
-      aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+      BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
+      aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
+      aBuilder.setSecond(ByteString.copyFrom(e.getValue().get()));
       builder.addAttributes(aBuilder.build());
     }
     for (HColumnDescriptor hcd: getColumnFamilies()) {
       builder.addColumnFamilies(hcd.convert());
     }
+    for (Map.Entry<String, String> e : this.configuration.entrySet()) {
+      NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
+      aBuilder.setName(e.getKey());
+      aBuilder.setValue(e.getValue());
+      builder.addConfiguration(aBuilder.build());
+    }
     return builder.build();
   }
 
@@ -1296,9 +1381,47 @@ public class HTableDescriptor implements
       hcds[index++] = HColumnDescriptor.convert(cfs);
     }
     HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds);
-    for (TableSchema.Attribute a: ts.getAttributesList()) {
-      htd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+    for (BytesBytesPair a: ts.getAttributesList()) {
+      htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+    }
+    for (NameStringPair a: ts.getConfigurationList()) {
+      htd.setConfiguration(a.getName(), a.getValue());
     }
     return htd;
   }
+
+  /**
+   * Getter for accessing the configuration value by key
+   */
+  public String getConfigurationValue(String key) {
+    return configuration.get(key);
+  }
+
+  /**
+   * Getter for fetching an unmodifiable {@link #configuration} map.
+   */
+  public Map<String, String> getConfiguration() {
+    // shallow pointer copy
+    return Collections.unmodifiableMap(configuration);
+  }
+
+  /**
+   * Setter for storing a configuration setting in {@link #configuration} map.
+   * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
+   * @param value String value. If null, removes the setting.
+   */
+  public void setConfiguration(String key, String value) {
+    if (value == null) {
+      removeConfiguration(key);
+    } else {
+      configuration.put(key, value);
+    }
+  }
+
+  /**
+   * Remove a config setting represented by the key from the {@link #configuration} map
+   */
+  public void removeConfiguration(final String key) {
+    configuration.remove(key);
+  }
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java Wed Feb 13 20:58:23 2013
@@ -21,19 +21,8 @@ package org.apache.hadoop.hbase;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
-import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.security.TokenInfo;
 
 /**
  * Protocol that a client uses to communicate with the Master (for monitoring purposes).
@@ -43,57 +32,5 @@ import com.google.protobuf.ServiceExcept
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public interface MasterMonitorProtocol extends
-    MasterMonitorService.BlockingInterface, MasterProtocol {
-  public static final long VERSION = 1L;
-
-  /**
-   * Used by the client to get the number of regions that have received the
-   * updated schema
-   *
-   * @param controller Unused (set to null).
-   * @param req GetSchemaAlterStatusRequest that contains:<br>
-   * - tableName
-   * @return GetSchemaAlterStatusResponse indicating the number of regions updated.
-   *         yetToUpdateRegions is the regions that are yet to be updated totalRegions
-   *         is the total number of regions of the table
-   * @throws ServiceException
-   */
-  @Override
-  public GetSchemaAlterStatusResponse getSchemaAlterStatus(
-    RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException;
-
-  /**
-   * Get list of TableDescriptors for requested tables.
-   * @param controller Unused (set to null).
-   * @param req GetTableDescriptorsRequest that contains:<br>
-   * - tableNames: requested tables, or if empty, all are requested
-   * @return GetTableDescriptorsResponse
-   * @throws ServiceException
-   */
-  @Override
-  public GetTableDescriptorsResponse getTableDescriptors(
-      RpcController controller, GetTableDescriptorsRequest req) throws ServiceException;
-
-  /**
-   * Return cluster status.
-   * @param controller Unused (set to null).
-   * @param req GetClusterStatusRequest
-   * @return status object
-   * @throws ServiceException
-   */
-  @Override
-  public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
-  throws ServiceException;
-
-  /**
-   * @param c Unused (set to null).
-   * @param req IsMasterRunningRequest
-   * @return IsMasterRunningRequest that contains:<br>
-   * isMasterRunning: true if master is available
-   * @throws ServiceException
-   */
-  @Override
-  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
-  throws ServiceException;
-}
+public interface MasterMonitorProtocol
+extends MasterMonitorService.BlockingInterface, MasterProtocol {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java Wed Feb 13 20:58:23 2013
@@ -16,29 +16,14 @@
  * limitations under the License.
  */
 
-// Functions implemented by all the master protocols (e.g. MasterAdminProtocol,
-// MasterMonitorProtocol).  Currently, this is only isMasterRunning, which is used,
-// on proxy creation, to check if the master has been stopped.  If it has,
-// a MasterNotRunningException is thrown back to the client, and the client retries.
-
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-public interface MasterProtocol extends VersionedProtocol, MasterService.BlockingInterface {
 
-  /**
-   * @param c Unused (set to null).
-   * @param req IsMasterRunningRequest
-   * @return IsMasterRunningRequest that contains:<br>
-   * isMasterRunning: true if master is available
-   * @throws ServiceException
-   */
-  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
-  throws ServiceException;
-}
+/**
+ * Functions implemented by all the master protocols: e.g. {@link MasterAdminProtocol}
+ * and {@link MasterMonitorProtocol}. Currently, the only shared method
+ * {@link #isMasterRunning(com.google.protobuf.RpcController, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)}
+ * which is used on connection setup to check if the master has been stopped.
+ */
+public interface MasterProtocol extends IpcProtocol, MasterService.BlockingInterface {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java Wed Feb 13 20:58:23 2013
@@ -31,18 +31,22 @@ import org.apache.hadoop.ipc.RemoteExcep
 @InterfaceStability.Evolving
 public class RegionMovedException extends NotServingRegionException {
   private static final Log LOG = LogFactory.getLog(RegionMovedException.class);
-  private static final long serialVersionUID = -7232903522310558397L;
+  private static final long serialVersionUID = -7232903522310558396L;
 
   private final String hostname;
   private final int port;
+  private final long locationSeqNum;
 
   private static final String HOST_FIELD = "hostname=";
   private static final String PORT_FIELD = "port=";
+  private static final String LOCATIONSEQNUM_FIELD = "locationSeqNum=";
 
-  public RegionMovedException(final String hostname, final int port) {
+  public RegionMovedException(final String hostname, final int port,
+    final long locationSeqNum) {
     super();
     this.hostname = hostname;
     this.port = port;
+    this.locationSeqNum = locationSeqNum;
   }
 
   public String getHostname() {
@@ -53,6 +57,10 @@ public class RegionMovedException extend
     return port;
   }
 
+  public long getLocationSeqNum() {
+    return locationSeqNum;
+  }
+
   /**
    * For hadoop.ipc internal call. Do NOT use.
    * We have to parse the hostname to recreate the exception.
@@ -61,24 +69,31 @@ public class RegionMovedException extend
   public RegionMovedException(String s) {
     int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length();
     int posPort = s.indexOf(PORT_FIELD) + PORT_FIELD.length();
+    int posSeqNum = s.indexOf(LOCATIONSEQNUM_FIELD) + LOCATIONSEQNUM_FIELD.length();
 
     String tmpHostname = null;
     int tmpPort = -1;
+    long tmpSeqNum = HConstants.NO_SEQNUM;
     try {
+      // TODO: this whole thing is extremely brittle.
       tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname));
       tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf('.', posPort)));
+      tmpSeqNum = Long.parseLong(s.substring(posSeqNum, s.indexOf('.', posSeqNum)));
     } catch (Exception ignored) {
-      LOG.warn("Can't parse the hostname and the port from this string: " + s + ", "+
-        "Continuing");
+      LOG.warn("Can't parse the hostname and the port from this string: " + s + ", continuing");
     }
 
     hostname = tmpHostname;
     port = tmpPort;
+    locationSeqNum = tmpSeqNum;
   }
 
   @Override
   public String getMessage() {
-    return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + ".";
+    // TODO: deserialization above depends on this. That is bad, but also means this
+    // should be modified carefully.
+    return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + ". As of "
+      + LOCATIONSEQNUM_FIELD + locationSeqNum + ".";
   }
 
   /**

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java Wed Feb 13 20:58:23 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
 import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
@@ -33,7 +32,5 @@ import org.apache.hadoop.hbase.security.
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public interface RegionServerStatusProtocol extends
-    RegionServerStatusService.BlockingInterface, VersionedProtocol {
-  public static final long VERSION = 1L;
-}
+public interface RegionServerStatusProtocol
+extends RegionServerStatusService.BlockingInterface, IpcProtocol {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java Wed Feb 13 20:58:23 2013
@@ -71,7 +71,7 @@ public class ServerName implements Compa
    */
   public static final String SERVERNAME_SEPARATOR = ",";
 
-  public static Pattern SERVERNAME_PATTERN =
+  public static final Pattern SERVERNAME_PATTERN =
     Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" +
       SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX +
       SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$");

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java Wed Feb 13 20:58:23 2013
@@ -29,73 +29,63 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 public class SplitLogCounters {
   //SplitLogManager counters
-  public static AtomicLong tot_mgr_log_split_batch_start = new AtomicLong(0);
-  public static AtomicLong tot_mgr_log_split_batch_success =
-    new AtomicLong(0);
-  public static AtomicLong tot_mgr_log_split_batch_err = new AtomicLong(0);
-  public static AtomicLong tot_mgr_new_unexpected_hlogs = new AtomicLong(0);
-  public static AtomicLong tot_mgr_log_split_start = new AtomicLong(0);
-  public static AtomicLong tot_mgr_log_split_success = new AtomicLong(0);
-  public static AtomicLong tot_mgr_log_split_err = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_create_queued = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_create_result = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_already_exists = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_create_err = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_create_retry = new AtomicLong(0);
-  public static AtomicLong tot_mgr_get_data_queued = new AtomicLong(0);
-  public static AtomicLong tot_mgr_get_data_result = new AtomicLong(0);
-  public static AtomicLong tot_mgr_get_data_nonode = new AtomicLong(0);
-  public static AtomicLong tot_mgr_get_data_err = new AtomicLong(0);
-  public static AtomicLong tot_mgr_get_data_retry = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_delete_queued = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_delete_result = new AtomicLong(0);
-  public static AtomicLong tot_mgr_node_delete_err = new AtomicLong(0);
-  public static AtomicLong tot_mgr_resubmit = new AtomicLong(0);
-  public static AtomicLong tot_mgr_resubmit_failed = new AtomicLong(0);
-  public static AtomicLong tot_mgr_null_data = new AtomicLong(0);
-  public static AtomicLong tot_mgr_orphan_task_acquired = new AtomicLong(0);
-  public static AtomicLong tot_mgr_wait_for_zk_delete = new AtomicLong(0);
-  public static AtomicLong tot_mgr_unacquired_orphan_done = new AtomicLong(0);
-  public static AtomicLong tot_mgr_resubmit_threshold_reached =
-    new AtomicLong(0);
-  public static AtomicLong tot_mgr_missing_state_in_delete =
-    new AtomicLong(0);
-  public static AtomicLong tot_mgr_heartbeat = new AtomicLong(0);
-  public static AtomicLong tot_mgr_rescan = new AtomicLong(0);
-  public static AtomicLong tot_mgr_rescan_deleted = new AtomicLong(0);
-  public static AtomicLong tot_mgr_task_deleted = new AtomicLong(0);
-  public static AtomicLong tot_mgr_resubmit_unassigned = new AtomicLong(0);
-  public static AtomicLong tot_mgr_relist_logdir = new AtomicLong(0);
-  public static AtomicLong tot_mgr_resubmit_dead_server_task =
-    new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_batch_start = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_batch_success = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_batch_err = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_new_unexpected_hlogs = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_start = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_success = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_log_split_err = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_create_queued = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_create_result = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_already_exists = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_create_err = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_create_retry = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_get_data_queued = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_get_data_result = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_get_data_nonode = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_get_data_err = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_get_data_retry = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_delete_queued = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_delete_result = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_node_delete_err = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_resubmit = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_resubmit_failed = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_null_data = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_orphan_task_acquired = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_wait_for_zk_delete = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_unacquired_orphan_done = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_resubmit_threshold_reached = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_missing_state_in_delete = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_heartbeat = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_rescan = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_rescan_deleted = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_task_deleted = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_resubmit_unassigned = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_relist_logdir = new AtomicLong(0);
+  public final static AtomicLong tot_mgr_resubmit_dead_server_task = new AtomicLong(0);
 
   // SplitLogWorker counters
-  public static AtomicLong tot_wkr_failed_to_grab_task_no_data =
-    new AtomicLong(0);
-  public static AtomicLong tot_wkr_failed_to_grab_task_exception =
-    new AtomicLong(0);
-  public static AtomicLong tot_wkr_failed_to_grab_task_owned =
-    new AtomicLong(0);
-  public static AtomicLong tot_wkr_failed_to_grab_task_lost_race =
-    new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_acquired = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_resigned = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_done = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_err = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_heartbeat = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_acquired_rescan = new AtomicLong(0);
-  public static AtomicLong tot_wkr_get_data_queued = new AtomicLong(0);
-  public static AtomicLong tot_wkr_get_data_result = new AtomicLong(0);
-  public static AtomicLong tot_wkr_get_data_retry = new AtomicLong(0);
-  public static AtomicLong tot_wkr_preempt_task = new AtomicLong(0);
-  public static AtomicLong tot_wkr_task_heartbeat_failed = new AtomicLong(0);
-  public static AtomicLong tot_wkr_final_transistion_failed =
-    new AtomicLong(0);
+  public final static AtomicLong tot_wkr_failed_to_grab_task_no_data = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_failed_to_grab_task_exception = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_failed_to_grab_task_owned = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_failed_to_grab_task_lost_race = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_acquired = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_resigned = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_done = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_err = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_heartbeat = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_acquired_rescan = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_get_data_queued = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_get_data_result = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_get_data_retry = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_preempt_task = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_task_heartbeat_failed = new AtomicLong(0);
+  public final static AtomicLong tot_wkr_final_transition_failed = new AtomicLong(0);
 
   public static void resetCounters() throws Exception {
     Class<?> cl = (new SplitLogCounters()).getClass();
-    Field[] flds = cl.getDeclaredFields();
-    for (Field fld : flds) {
+    for (Field fld : cl.getDeclaredFields()) {
       ((AtomicLong)fld.get(null)).set(0);
     }
   }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java Wed Feb 13 20:58:23 2013
@@ -122,11 +122,6 @@ public class CatalogTracker {
    */
   private ServerName metaLocation;
 
-  /*
-   * Timeout waiting on root or meta to be set.
-   */
-  private final int defaultTimeout;
-
   private boolean stopped = false;
 
   static final byte [] ROOT_REGION_NAME =
@@ -162,33 +157,13 @@ public class CatalogTracker {
    * @throws IOException 
    */
   public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
-      final Abortable abortable)
+      Abortable abortable)
   throws IOException {
-    this(zk, conf, abortable,
-      conf.getInt("hbase.catalogtracker.default.timeout", 1000));
-  }
-
-  /**
-   * Constructs the catalog tracker.  Find current state of catalog tables.
-   * Begin active tracking by executing {@link #start()} post construction.
-   * @param zk If zk is null, we'll create an instance (and shut it down
-   * when {@link #stop()} is called) else we'll use what is passed.
-   * @param conf
-   * @param abortable If fatal exception we'll call abort on this.  May be null.
-   * If it is we'll use the Connection associated with the passed
-   * {@link Configuration} as our Abortable.
-   * @param defaultTimeout Timeout to use.  Pass zero for no timeout
-   * ({@link Object#wait(long)} when passed a <code>0</code> waits for ever).
-   * @throws IOException
-   */
-  public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
-      Abortable abortable, final int defaultTimeout)
-  throws IOException {
-    this(zk, conf, HConnectionManager.getConnection(conf), abortable, defaultTimeout);
+    this(zk, conf, HConnectionManager.getConnection(conf), abortable);
   }
 
   public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
-      HConnection connection, Abortable abortable, final int defaultTimeout)
+      HConnection connection, Abortable abortable)
   throws IOException {
     this.connection = connection;
     if (abortable == null) {
@@ -226,7 +201,6 @@ public class CatalogTracker {
         ct.resetMetaLocation();
       }
     };
-    this.defaultTimeout = defaultTimeout;
   }
 
   /**
@@ -364,24 +338,6 @@ public class CatalogTracker {
   }
 
   /**
-   * Gets a connection to the server hosting root, as reported by ZooKeeper,
-   * waiting for the default timeout specified on instantiation.
-   * @see #waitForRoot(long) for additional information
-   * @return connection to server hosting root
-   * @throws NotAllMetaRegionsOnlineException if timed out waiting
-   * @throws IOException
-   * @deprecated Use #getRootServerConnection(long)
-   */
-  public AdminProtocol waitForRootServerConnectionDefault()
-  throws NotAllMetaRegionsOnlineException, IOException {
-    try {
-      return getRootServerConnection(this.defaultTimeout);
-    } catch (InterruptedException e) {
-      throw new NotAllMetaRegionsOnlineException("Interrupted");
-    }
-  }
-
-  /**
    * Gets a connection to the server currently hosting <code>.META.</code> or
    * null if location is not currently available.
    * <p>
@@ -470,10 +426,10 @@ public class CatalogTracker {
    */
   public ServerName waitForMeta(long timeout)
   throws InterruptedException, IOException, NotAllMetaRegionsOnlineException {
-    long stop = System.currentTimeMillis() + timeout;
+    long stop = timeout == 0 ? Long.MAX_VALUE : System.currentTimeMillis() + timeout;
     long waitTime = Math.min(50, timeout);
     synchronized (metaAvailable) {
-      while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) {
+      while(!stopped && System.currentTimeMillis() < stop) {
         if (getMetaServerConnection() != null) {
           return metaLocation;
         }
@@ -503,25 +459,6 @@ public class CatalogTracker {
   }
 
   /**
-   * Gets a connection to the server hosting meta, as reported by ZooKeeper,
-   * waiting up to the specified timeout for availability.
-   * Used in tests.
-   * @see #waitForMeta(long) for additional information
-   * @return connection to server hosting meta
-   * @throws NotAllMetaRegionsOnlineException if timed out or interrupted
-   * @throws IOException
-   * @deprecated Does not retry; use an HTable instance instead.
-   */
-  public AdminProtocol waitForMetaServerConnectionDefault()
-  throws NotAllMetaRegionsOnlineException, IOException {
-    try {
-      return getCachedConnection(waitForMeta(defaultTimeout));
-    } catch (InterruptedException e) {
-      throw new NotAllMetaRegionsOnlineException("Interrupted");
-    }
-  }
-
-  /**
    * Called when we figure current meta is off (called from zk callback).
    */
   public void resetMetaLocation() {

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java Wed Feb 13 20:58:23 2013
@@ -211,8 +211,8 @@ public class MetaEditor {
    * Adds a (single) META row for the specified new region and its daughters. Note that this does
    * not add its daughter's as different rows, but adds information about the daughters
    * in the same row as the parent. Use
-   * {@link #offlineParentInMeta(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo)}
-   * and {@link #addDaughter(CatalogTracker, HRegionInfo, ServerName)}  if you want to do that.
+   * {@link #offlineParentInMeta(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo)} and
+   * {@link #addDaughter(CatalogTracker, HRegionInfo, ServerName, long)}  if you want to do that.
    * @param meta the HTable for META
    * @param regionInfo region information
    * @param splitA first split daughter of the parent regionInfo
@@ -272,12 +272,20 @@ public class MetaEditor {
     }
   }
 
+  /**
+   * Adds a daughter region entry to meta.
+   * @param regionInfo the region to put
+   * @param sn the location of the region
+   * @param openSeqNum the latest sequence number obtained when the region was open
+   */
   public static void addDaughter(final CatalogTracker catalogTracker,
-      final HRegionInfo regionInfo, final ServerName sn)
+      final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
   throws NotAllMetaRegionsOnlineException, IOException {
     Put put = new Put(regionInfo.getRegionName());
     addRegionInfo(put, regionInfo);
-    if (sn != null) addLocation(put, sn);
+    if (sn != null) {
+      addLocation(put, sn, openSeqNum);
+    }
     putToMetaTable(catalogTracker, put);
     LOG.info("Added daughter " + regionInfo.getRegionNameAsString() +
       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
@@ -293,15 +301,16 @@ public class MetaEditor {
    * @param catalogTracker catalog tracker
    * @param regionInfo region to update location of
    * @param sn Server name
+   * @param openSeqNum the latest sequence number obtained when the region was open
    * @throws IOException
    * @throws ConnectException Usually because the regionserver carrying .META.
    * is down.
    * @throws NullPointerException Because no -ROOT- server connection
    */
   public static void updateMetaLocation(CatalogTracker catalogTracker,
-      HRegionInfo regionInfo, ServerName sn)
+      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
   throws IOException, ConnectException {
-    updateLocation(catalogTracker, regionInfo, sn);
+    updateLocation(catalogTracker, regionInfo, sn, openSeqNum);
   }
 
   /**
@@ -317,9 +326,9 @@ public class MetaEditor {
    * @throws IOException
    */
   public static void updateRegionLocation(CatalogTracker catalogTracker,
-      HRegionInfo regionInfo, ServerName sn)
+      HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
   throws IOException {
-    updateLocation(catalogTracker, regionInfo, sn);
+    updateLocation(catalogTracker, regionInfo, sn, updateSeqNum);
   }
 
   /**
@@ -331,14 +340,15 @@ public class MetaEditor {
    * @param catalogTracker
    * @param regionInfo region to update location of
    * @param sn Server name
+   * @param openSeqNum the latest sequence number obtained when the region was open
    * @throws IOException In particular could throw {@link java.net.ConnectException}
    * if the server is down on other end.
    */
   private static void updateLocation(final CatalogTracker catalogTracker,
-      HRegionInfo regionInfo, ServerName sn)
+      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
   throws IOException {
     Put put = new Put(regionInfo.getRegionName());
-    addLocation(put, sn);
+    addLocation(put, sn, openSeqNum);
     putToCatalogTable(catalogTracker, put);
     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
       " with server=" + sn);
@@ -430,11 +440,13 @@ public class MetaEditor {
     return p;
   }
 
-  private static Put addLocation(final Put p, final ServerName sn) {
+  private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
     p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
       Bytes.toBytes(sn.getHostAndPort()));
     p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
       Bytes.toBytes(sn.getStartcode()));
+    p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
+        Bytes.toBytes(openSeqNum));
     return p;
   }
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java Wed Feb 13 20:58:23 2013
@@ -115,7 +115,6 @@ public class MetaReader {
       public boolean visit(Result r) throws IOException {
         if (r ==  null || r.isEmpty()) return true;
         Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(r);
-        if (region == null) return true;
         HRegionInfo hri = region.getFirst();
         if (hri  == null) return true;
         if (hri.getTableNameAsString() == null) return true;

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java Wed Feb 13 20:58:23 2013
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class Action<R> implements Comparable {
+public class Action<R> implements Comparable<R> {
 
   private Row action;
   private int originalIndex;
@@ -77,4 +77,12 @@ public class Action<R> implements Compar
   public int compareTo(Object o) {
     return action.compareTo(((Action) o).getAction());
   }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (obj == null || getClass() != obj.getClass()) return false;
+    Action<?> other = (Action<?>) obj;
+    return compareTo(other) == 0;
+  }
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java Wed Feb 13 20:58:23 2013
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hbase.client;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
+import org.apache.hadoop.hbase.IpcProtocol;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
@@ -31,7 +31,5 @@ import org.apache.hadoop.hbase.security.
   serverPrincipal = "hbase.regionserver.kerberos.principal")
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Private
-public interface AdminProtocol extends
-    AdminService.BlockingInterface, VersionedProtocol {
-  public static final long VERSION = 1L;
-}
+public interface AdminProtocol
+extends AdminService.BlockingInterface, IpcProtocol {}
\ No newline at end of file



Mime
View raw message