hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r677517 [1/6] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/ipc/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserv...
Date Thu, 17 Jul 2008 07:17:28 GMT
Author: stack
Date: Thu Jul 17 00:17:26 2008
New Revision: 677517

URL: http://svn.apache.org/viewvc?rev=677517&view=rev
Log:
HBASE-62 Allow user add arbitrary key/value pairs to table and column descriptors, and HBASE-34, 42, 43, and 700

Added:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/FlushRequester.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HColumnDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HConstants.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HLog.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HLogEdit.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HLogKey.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HRegionInfo.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HStore.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HStoreFile.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HStoreScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/HTableDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/LogRollListener.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/Memcache.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/MetaUtils.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/RegionHistorian.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/StoreFileScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/package.html
Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Jul 17 00:17:26 2008
@@ -186,6 +186,13 @@
    HBASE-744   BloomFilter serialization/deserialization broken
    HBASE-742   Column length limit is not enforced (Jean-Daniel Cryans via Stack)
    HBASE-737   Scanner: every cell in a row has the same timestamp
+   HBASE-700   hbase.io.index.interval need be configuratable in column family
+               (Andrew Purtell via Stack)
+   HBASE-62    Allow user add arbitrary key/value pairs to table and column
+               descriptors (Andrew Purtell via Stack)
+   HBASE-34    Set memcache flush size per column (Andrew Purtell via Stack)
+   HBASE-42    Set region split size on table creation (Andrew Purtell via Stack)
+   HBASE-43    Add a read-only attribute to columns (Andrew Purtell via Stack)
 
   IMPROVEMENTS
    HBASE-559   MR example job to count table rows

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java Thu Jul 17 00:17:26 2008
@@ -73,7 +73,19 @@
   public BloomFilterDescriptor() {
     super();
   }
-  
+
+  /*
+   * Constructor.
+   * <p>
+   * Creates a deep copy of the supplied BloomFilterDescriptor.
+   */
+  public BloomFilterDescriptor(BloomFilterDescriptor desc) {
+    super();
+    this.filterType = desc.filterType;
+    this.nbHash = desc.nbHash;
+    this.vectorSize = desc.vectorSize;
+  }
+
   /**
    * Creates a BloomFilterDescriptor for the specified type of filter, fixes
    * the number of hash functions to 4 and computes a vector size using:

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java Thu Jul 17 00:17:26 2008
@@ -22,7 +22,10 @@
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
@@ -39,10 +42,11 @@
 public class HColumnDescriptor implements WritableComparable {
   // For future backward compatibility
 
-  // Version 3 was when column names becaome byte arrays and when we picked up
+  // Version 3 was when column names become byte arrays and when we picked up
   // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
   // Version 5 was when bloom filter descriptors were removed.
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)5;
+  // Version 6 adds metadata as a map where keys and values are byte[].
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)6;
 
   /** 
    * The type of compression.
@@ -57,14 +61,17 @@
     BLOCK
   }
 
-  // Defines for jruby/shell
   public static final String COMPRESSION = "COMPRESSION";
   public static final String IN_MEMORY = "IN_MEMORY";
   public static final String BLOCKCACHE = "BLOCKCACHE";
   public static final String LENGTH = "LENGTH";
   public static final String TTL = "TTL";
+  public static final String VERSIONS = "VERSIONS";
   public static final String BLOOMFILTER = "BLOOMFILTER";
   public static final String FOREVER = "FOREVER";
+  public static final String MAPFILE_INDEX_INTERVAL =
+      "MAPFILE_INDEX_INTERVAL";
+  public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE";
 
   /**
    * Default compression type.
@@ -104,20 +111,16 @@
 
   // Column family name
   private byte [] name;
-  // Number of versions to keep
-  private int maxVersions = DEFAULT_VERSIONS;
-  // Compression setting if any
-  private CompressionType compressionType = DEFAULT_COMPRESSION;
-  // Serve reads from in-memory cache
-  private boolean inMemory = DEFAULT_IN_MEMORY;
-  // Serve reads from in-memory block cache
-  private boolean blockCacheEnabled = DEFAULT_BLOCKCACHE;
-  // Maximum value size
-  private int maxValueLength = DEFAULT_LENGTH;
-  // Time to live of cell contents, in seconds from last timestamp
-  private int timeToLive = DEFAULT_TTL;
-  // True if bloom filter was specified
-  private boolean bloomFilter = false;
+
+  /**
+   * Default mapfile index interval.
+   */
+  public static final int DEFAULT_MAPFILE_INDEX_INTERVAL = 128;
+
+  // Column metadata
+  protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
+    new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
+
 
   /**
    * Default constructor. Must be present for Writable.
@@ -160,6 +163,21 @@
   }
 
   /**
+   * Constructor.
+   * Makes a deep copy of the supplied descriptor. 
+   * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
+   * @param desc The descriptor.
+   */
+  public HColumnDescriptor(HColumnDescriptor desc) {
+    super();
+    this.name = desc.name.clone();
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        desc.values.entrySet()) {
+      this.values.put(e.getKey(), e.getValue());
+    }
+  }
+
+  /**
    * Constructor
    * @param columnName Column family name.  Must have the ':' ending.
    * @param maxVersions Maximum number of versions to keep
@@ -188,13 +206,13 @@
       // Until there is support, consider 0 or < 0 -- a configuration error.
       throw new IllegalArgumentException("Maximum versions must be positive");
     }
-    this.maxVersions = maxVersions;
-    this.inMemory = inMemory;
-    this.blockCacheEnabled = blockCacheEnabled;
-    this.maxValueLength = maxValueLength;
-    this.timeToLive = timeToLive;
-    this.bloomFilter = bloomFilter;
-    this.compressionType = compression;
+    setMaxVersions(maxVersions);
+    setInMemory(inMemory);
+    setBlockCacheEnabled(blockCacheEnabled);
+    setMaxValueLength(maxValueLength);
+    setTimeToLive(timeToLive);
+    setCompressionType(compression);
+    setBloomfilter(bloomFilter);
   }
   
   private static byte [] stripColon(final byte [] n) {
@@ -203,7 +221,7 @@
     System.arraycopy(n, 0, result, 0, n.length - 1);
     return result;
   }
-  
+
   /**
    * @param b Family name.
    * @return <code>b</code>
@@ -238,77 +256,229 @@
   }
 
   /**
+   * @return Name of this column family with colon as required by client API
+   */
+  public byte [] getNameWithColon() {
+    return HStoreKey.addDelimiter(this.name);
+  }
+
+  /**
    * @return Name of this column family
    */
   public String getNameAsString() {
     return Bytes.toString(this.name);
   }
 
+  /**
+   * @param key The key.
+   * @return The value.
+   */
+  public byte[] getValue(byte[] key) {
+    ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
+    if (ibw == null)
+      return null;
+    return ibw.get();
+  }
+
+  /**
+   * @param key The key.
+   * @return The value as a string.
+   */
+  public String getValue(String key) {
+    byte[] value = getValue(Bytes.toBytes(key));
+    if (value == null)
+      return null;
+    return Bytes.toString(value);
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(byte[] key, byte[] value) {
+    values.put(new ImmutableBytesWritable(key),
+      new ImmutableBytesWritable(value));
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(String key, String value) {
+    setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+  }
+
   /** @return compression type being used for the column family */
   public CompressionType getCompression() {
-    return this.compressionType;
+    String value = getValue(COMPRESSION);
+    if (value != null) {
+      if (value.equalsIgnoreCase("BLOCK"))
+        return CompressionType.BLOCK;
+      else if (value.equalsIgnoreCase("RECORD"))
+        return CompressionType.RECORD;
+    }
+    return CompressionType.NONE;
   }
   
   /** @return maximum number of versions */
   public int getMaxVersions() {
-    return this.maxVersions;
+    String value = getValue(VERSIONS);
+    if (value != null)
+      return Integer.valueOf(value);
+    return DEFAULT_VERSIONS;
+  }
+
+  /**
+   * @param maxVersions maximum number of versions
+   */
+  public void setMaxVersions(int maxVersions) {
+    setValue(VERSIONS, Integer.toString(maxVersions));
   }
   
   /**
    * @return Compression type setting.
    */
   public CompressionType getCompressionType() {
-    return this.compressionType;
+    return getCompression();
+  }
+
+  /**
+   * @param type Compression type setting.
+   */
+  public void setCompressionType(CompressionType type) {
+    String compressionType;
+    switch (type) {
+      case BLOCK:  compressionType = "BLOCK";   break;
+      case RECORD: compressionType = "RECORD";  break;
+      default:     compressionType = "NONE";    break;
+    }
+    setValue(COMPRESSION, compressionType);
   }
 
   /**
    * @return True if we are to keep all in use HRegionServer cache.
    */
   public boolean isInMemory() {
-    return this.inMemory;
+    String value = getValue(IN_MEMORY);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return DEFAULT_IN_MEMORY;
   }
   
   /**
+   * @param inMemory True if we are to keep all values in the HRegionServer
+   * cache
+   */
+  public void setInMemory(boolean inMemory) {
+    setValue(IN_MEMORY, Boolean.toString(inMemory));
+  }
+
+  /**
    * @return Maximum value length.
    */
   public int getMaxValueLength() {
-    return this.maxValueLength;
+    String value = getValue(LENGTH);
+    if (value != null)
+      return Integer.valueOf(value);
+    return DEFAULT_LENGTH;
+  }
+
+  /**
+   * @param maxLength Maximum value length.
+   */
+  public void setMaxValueLength(int maxLength) {
+    setValue(LENGTH, Integer.toString(maxLength));
   }
 
   /**
    * @return Time to live.
    */
   public int getTimeToLive() {
-    return this.timeToLive;
+    String value = getValue(TTL);
+    if (value != null)
+      return Integer.valueOf(value);
+    return DEFAULT_TTL;
+  }
+
+  /**
+   * @param timeToLive
+   */
+  public void setTimeToLive(int timeToLive) {
+    setValue(TTL, Integer.toString(timeToLive));
   }
 
   /**
    * @return True if MapFile blocks should be cached.
    */
   public boolean isBlockCacheEnabled() {
-    return blockCacheEnabled;
+    String value = getValue(BLOCKCACHE);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return DEFAULT_BLOCKCACHE;
+  }
+
+  /**
+   * @param blockCacheEnabled True if MapFile blocks should be cached.
+   */
+  public void setBlockCacheEnabled(boolean blockCacheEnabled) {
+    setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
   }
 
   /**
    * @return true if a bloom filter is enabled
    */
-  public boolean isBloomFilterEnabled() {
-    return this.bloomFilter;
+  public boolean isBloomfilter() {
+    String value = getValue(BLOOMFILTER);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return DEFAULT_BLOOMFILTER;
+  }
+
+  /**
+   * @param onOff Enable/Disable bloom filter
+   */
+  public void setBloomfilter(final boolean onOff) {
+    setValue(BLOOMFILTER, Boolean.toString(onOff));
+  }
+
+  /**
+   * @return The number of entries that are added to the store MapFile before
+   * an index entry is added.
+   */
+  public int getMapFileIndexInterval() {
+    String value = getValue(MAPFILE_INDEX_INTERVAL);
+    if (value != null)
+      return Integer.valueOf(value);
+    return DEFAULT_MAPFILE_INDEX_INTERVAL;
+  }
+
+  /**
+   * @param interval The number of entries that are added to the store MapFile before
+   * an index entry is added.
+   */
+  public void setMapFileIndexInterval(int interval) {
+    setValue(MAPFILE_INDEX_INTERVAL, Integer.toString(interval));
   }
 
   /** {@inheritDoc} */
   @Override
   public String toString() {
-    return "{" + HConstants.NAME + " => '" + Bytes.toString(name) +
-      "', " + HConstants.VERSIONS + " => " + maxVersions +
-      ", " + COMPRESSION + " => '" + this.compressionType +
-      "', " + IN_MEMORY + " => " + inMemory +
-      ", " + BLOCKCACHE + " => " + blockCacheEnabled +
-      ", " + LENGTH + " => " + maxValueLength +
-      ", " + TTL + " => " +
-          (timeToLive == HConstants.FOREVER ? "FOREVER" : 
-              Integer.toString(timeToLive)) +
-      ", " + BLOOMFILTER + " => " + bloomFilter + "}";
+    StringBuffer s = new StringBuffer();
+    s.append('{');
+    s.append(HConstants.NAME);
+    s.append(" => '");
+    s.append(Bytes.toString(name));
+    s.append("'");
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      s.append(", ");
+      s.append(Bytes.toString(e.getKey().get()));
+      s.append(" => '");
+      s.append(Bytes.toString(e.getValue().get()));
+      s.append("'");
+    }
+    s.append('}');
+    return s.toString();
   }
   
   /** {@inheritDoc} */
@@ -321,14 +491,8 @@
   @Override
   public int hashCode() {
     int result = Bytes.hashCode(this.name);
-    result ^= Integer.valueOf(this.maxVersions).hashCode();
-    result ^= this.compressionType.hashCode();
-    result ^= Boolean.valueOf(this.inMemory).hashCode();
-    result ^= Boolean.valueOf(this.blockCacheEnabled).hashCode();
-    result ^= Integer.valueOf(this.maxValueLength).hashCode();
-    result ^= Integer.valueOf(this.timeToLive).hashCode();
-    result ^= Boolean.valueOf(this.bloomFilter).hashCode();
     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
+    result ^= values.hashCode();
     return result;
   }
   
@@ -336,37 +500,51 @@
 
   /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
-    int versionNumber = in.readByte();
-    if (versionNumber <= 2) {
-      Text t = new Text();
-      t.readFields(in);
-      this.name = t.getBytes();
-      if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) {
-        this.name = stripColon(this.name);
+    int version = in.readByte();
+    if (version < 6) {
+      if (version <= 2) {
+        Text t = new Text();
+        t.readFields(in);
+        this.name = t.getBytes();
+        if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) {
+          this.name = stripColon(this.name);
+        }
+      } else {
+        this.name = Bytes.readByteArray(in);
+      }
+      this.values.clear();
+      setMaxVersions(in.readInt());
+      int ordinal = in.readInt();
+      setCompressionType(CompressionType.values()[ordinal]);
+      setInMemory(in.readBoolean());
+      setMaxValueLength(in.readInt());
+      setBloomfilter(in.readBoolean());
+      if (isBloomfilter() && version < 5) {
+        // If a bloomFilter is enabled and the column descriptor is less than
+        // version 5, we need to skip over it to read the rest of the column
+        // descriptor. There are no BloomFilterDescriptors written to disk for
+        // column descriptors with a version number >= 5
+        BloomFilterDescriptor junk = new BloomFilterDescriptor();
+        junk.readFields(in);
+      }
+      if (version > 1) {
+        setBlockCacheEnabled(in.readBoolean());
+      }
+      if (version > 2) {
+       setTimeToLive(in.readInt());
       }
     } else {
+      // version 6+
       this.name = Bytes.readByteArray(in);
-    }
-    this.maxVersions = in.readInt();
-    int ordinal = in.readInt();
-    this.compressionType = CompressionType.values()[ordinal];
-    this.inMemory = in.readBoolean();
-    this.maxValueLength = in.readInt();
-    this.bloomFilter = in.readBoolean();
-    if (this.bloomFilter && versionNumber < 5) {
-      // If a bloomFilter is enabled and the column descriptor is less than
-      // version 5, we need to skip over it to read the rest of the column
-      // descriptor. There are no BloomFilterDescriptors written to disk for
-      // column descriptors with a version number >= 5
-      BloomFilterDescriptor junk = new BloomFilterDescriptor();
-      junk.readFields(in);
-    }
-    if (versionNumber > 1) {
-      this.blockCacheEnabled = in.readBoolean();
-    }
-
-    if (versionNumber > 2) {
-      this.timeToLive = in.readInt();
+      this.values.clear();
+      int numValues = in.readInt();
+      for (int i = 0; i < numValues; i++) {
+        ImmutableBytesWritable key = new ImmutableBytesWritable();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        key.readFields(in);
+        value.readFields(in);
+        values.put(key, value);
+      }
     }
   }
 
@@ -374,13 +552,12 @@
   public void write(DataOutput out) throws IOException {
     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
     Bytes.writeByteArray(out, this.name);
-    out.writeInt(this.maxVersions);
-    out.writeInt(this.compressionType.ordinal());
-    out.writeBoolean(this.inMemory);
-    out.writeInt(this.maxValueLength);
-    out.writeBoolean(this.bloomFilter);
-    out.writeBoolean(this.blockCacheEnabled);
-    out.writeInt(this.timeToLive);
+    out.writeInt(values.size());
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      e.getKey().write(out);
+      e.getValue().write(out);
+    }
   }
 
   // Comparable
@@ -389,57 +566,13 @@
   public int compareTo(Object o) {
     HColumnDescriptor other = (HColumnDescriptor)o;
     int result = Bytes.compareTo(this.name, other.getName());
-    if(result == 0) {
-      result = Integer.valueOf(this.maxVersions).compareTo(
-          Integer.valueOf(other.maxVersions));
-    }
-    
-    if(result == 0) {
-      result = this.compressionType.compareTo(other.compressionType);
-    }
-    
-    if(result == 0) {
-      if(this.inMemory == other.inMemory) {
-        result = 0;
-        
-      } else if(this.inMemory) {
-        result = -1;
-        
-      } else {
-        result = 1;
-      }
-    }
-    
-    if(result == 0) {
-      if(this.blockCacheEnabled == other.blockCacheEnabled) {
-        result = 0;
-        
-      } else if(this.blockCacheEnabled) {
-        result = -1;
-        
-      } else {
-        result = 1;
-      }
-    }
-    
-    if(result == 0) {
-      result = other.maxValueLength - this.maxValueLength;
-    }
-
-    if(result == 0) {
-      result = other.timeToLive - this.timeToLive;
-    }
-
-    if(result == 0) {
-      if(this.bloomFilter == other.bloomFilter) {
-        result = 0;
-        
-      } else if(this.bloomFilter) {
+    if (result == 0) {
+      // punt on comparison for ordering, just calculate difference
+      result = this.values.hashCode() - other.values.hashCode();
+      if (result < 0)
         result = -1;
-        
-      } else {
+      else if (result > 0)
         result = 1;
-      }
     }
     return result;
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java Thu Jul 17 00:17:26 2008
@@ -27,6 +27,7 @@
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JenkinsHash;
+import org.apache.hadoop.io.VersionedWritable;
 import org.apache.hadoop.io.WritableComparable;
 
 /**
@@ -34,7 +35,9 @@
  * Contains HRegion id, start and end keys, a reference to this
  * HRegions' table descriptor, etc.
  */
-public class HRegionInfo implements WritableComparable {
+public class HRegionInfo extends VersionedWritable implements WritableComparable {
+  private final byte VERSION = 0;
+
   /**
    * @param regionName
    * @return the encodedName
@@ -137,6 +140,7 @@
    * first meta regions
    */
   private HRegionInfo(long regionId, HTableDescriptor tableDesc) {
+    super();
     this.regionId = regionId;
     this.tableDesc = tableDesc;
     this.regionName = createRegionName(tableDesc.getName(), null, regionId);
@@ -146,6 +150,7 @@
 
   /** Default constructor - creates empty object */
   public HRegionInfo() {
+    super();
     this.tableDesc = new HTableDescriptor();
   }
   
@@ -193,6 +198,7 @@
   public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
     final byte [] endKey, final boolean split, final long regionid)
   throws IllegalArgumentException {
+    super();
     if (tableDesc == null) {
       throw new IllegalArgumentException("tableDesc cannot be null");
     }
@@ -214,6 +220,7 @@
    * @param other
    */
   public HRegionInfo(HRegionInfo other) {
+    super();
     this.endKey = other.getEndKey();
     this.offLine = other.isOffline();
     this.regionId = other.getRegionId();
@@ -307,7 +314,14 @@
   public HTableDescriptor getTableDesc(){
     return tableDesc;
   }
-  
+
+  /**
+   * @param newDesc new table descriptor to use
+   */
+  public void setTableDesc(HTableDescriptor newDesc) {
+    this.tableDesc = newDesc;
+  }
+
   /** @return true if this is the root region */
   public boolean isRootRegion() {
     return this.tableDesc.isRootRegion();
@@ -381,6 +395,10 @@
   public int hashCode() {
     return this.hashCode;
   }
+  
+  public byte getVersion() {
+    return VERSION;
+  }
 
   //
   // Writable
@@ -390,6 +408,7 @@
    * {@inheritDoc}
    */
   public void write(DataOutput out) throws IOException {
+    super.write(out);
     Bytes.writeByteArray(out, endKey);
     out.writeBoolean(offLine);
     out.writeLong(regionId);
@@ -404,6 +423,7 @@
    * {@inheritDoc}
    */
   public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
     this.endKey = Bytes.readByteArray(in);
     this.offLine = in.readBoolean();
     this.regionId = in.readLong();

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Thu Jul 17 00:17:26 2008
@@ -29,6 +29,7 @@
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableComparable;
 
@@ -53,13 +54,32 @@
           new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
               HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
               false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) });
-  
-  private boolean rootregion = false;
-  private boolean metaregion = false;
+
+  // Changes prior to version 3 were not recorded here.
+  // Version 3 adds metadata as a map where keys and values are byte[].
+  public static final byte TABLE_DESCRIPTOR_VERSION = 3;
+
   private byte [] name = HConstants.EMPTY_BYTE_ARRAY;
   private String nameAsString = "";
+
+  // Table metadata
+  protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
+    new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
   
   public static final String FAMILIES = "FAMILIES";
+
+  public static final String MAX_FILESIZE = "MAX_FILESIZE";
+  public static final String IN_MEMORY = "IN_MEMORY";
+  public static final String READONLY = "READONLY";
+  public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE";
+  public static final String IS_ROOT = "IS_ROOT";
+  public static final String IS_META = "IS_META";
+
+  public static final boolean DEFAULT_IN_MEMORY = false;
+
+  public static final boolean DEFAULT_READONLY = false;
+
+  public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64;
   
   // Key is hash of the family name.
   private final Map<Integer, HColumnDescriptor> families =
@@ -107,9 +127,32 @@
    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
    */
   public HTableDescriptor(final byte [] name) {
-    setMetaFlags(name);
-    this.name = this.metaregion? name: isLegalTableName(name);
+    super();
+    this.name = this.isMetaRegion() ? name: isLegalTableName(name);
+    this.nameAsString = Bytes.toString(this.name);
+    setMetaFlags(this.name);
+  }
+
+  /**
+   * Constructor.
+   * <p>
+   * Makes a deep copy of the supplied descriptor. 
+   * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
+   * @param desc The descriptor.
+   */
+  public HTableDescriptor(final HTableDescriptor desc)
+  {
+    super();
+    this.name = desc.name.clone();
     this.nameAsString = Bytes.toString(this.name);
+    setMetaFlags(this.name);
+    for (HColumnDescriptor c: desc.families.values()) {
+      this.families.put(Bytes.mapKey(c.getName()), new HColumnDescriptor(c));
+    }
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        desc.values.entrySet()) {
+      this.values.put(e.getKey(), e.getValue());
+    }
   }
 
   /*
@@ -118,11 +161,46 @@
    * @param name
    */
   private void setMetaFlags(final byte [] name) {
-    this.rootregion = Bytes.equals(name, HConstants.ROOT_TABLE_NAME);
-    this.metaregion =
-      this.rootregion? true: Bytes.equals(name, HConstants.META_TABLE_NAME);
+    setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME));
+    setMetaRegion(isRootRegion() ||
+      Bytes.equals(name, HConstants.META_TABLE_NAME));
   }
-  
+
+  /** @return true if this is the root region */
+  public boolean isRootRegion() {
+    String value = getValue(IS_ROOT);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return false;
+  }
+
+  /** @param isRoot true if this is the root region */
+  protected void setRootRegion(boolean isRoot) {
+    values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)),
+      new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isRoot))));
+  }
+
+  /** @return true if this is a meta region (part of the root or meta tables) */
+  public boolean isMetaRegion() {
+    String value = getValue(IS_META);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return false;
+  }
+
+  /**
+   * @param isMeta true if this is a meta region (part of the root or meta
+   * tables) */
+  protected void setMetaRegion(boolean isMeta) {
+    values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_META)),
+      new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isMeta))));
+  }
+
+  /** @return true if table is the meta table */
+  public boolean isMetaTable() {
+    return isMetaRegion() && !isRootRegion();
+  }
+
   /**
    * Check passed buffer is legal user-space table name.
    * @param b Table name.
@@ -147,19 +225,80 @@
     return b;
   }
 
-  /** @return true if this is the root region */
-  public boolean isRootRegion() {
-    return rootregion;
+  /**
+   * @param key The key.
+   * @return The value.
+   */
+  public byte[] getValue(byte[] key) {
+    ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
+    if (ibw == null)
+      return null;
+    return ibw.get();
   }
-  
-  /** @return true if table is the meta table */
-  public boolean isMetaTable() {
-    return metaregion && !rootregion;
+
+  /**
+   * @param key The key.
+   * @return The value as a string.
+   */
+  public String getValue(String key) {
+    byte[] value = getValue(Bytes.toBytes(key));
+    if (value == null)
+      return null;
+    return Bytes.toString(value);
   }
-  
-  /** @return true if this is a meta region (part of the root or meta tables) */
-  public boolean isMetaRegion() {
-    return metaregion;
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(byte[] key, byte[] value) {
+    values.put(new ImmutableBytesWritable(key),
+      new ImmutableBytesWritable(value));
+  }
+
+  /**
+   * @param key The key.
+   * @param value The value.
+   */
+  public void setValue(String key, String value) {
+    setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+  }
+
+  /**
+   * @return true if all columns in the table should be kept in the 
+   * HRegionServer cache only
+   */
+  public boolean isInMemory() {
+    String value = getValue(IN_MEMORY);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return DEFAULT_IN_MEMORY;
+  }
+
+  /**
+   * @param inMemory True if all of the columns in the table should be kept in
+   * the HRegionServer cache only.
+   */
+  public void setInMemory(boolean inMemory) {
+    setValue(IN_MEMORY, Boolean.toString(inMemory));
+  }
+
+  /**
+   * @return true if all columns in the table should be read only
+   */
+  public boolean isReadOnly() {
+    String value = getValue(READONLY);
+    if (value != null)
+      return Boolean.valueOf(value);
+    return DEFAULT_READONLY;
+  }
+
+  /**
+   * @param readOnly True if all of the columns in the table should be read
+   * only.
+   */
+  public void setReadOnly(boolean readOnly) {
+    setValue(READONLY, Boolean.toString(readOnly));
   }
 
   /** @return name of table */
@@ -172,6 +311,39 @@
     return this.nameAsString;
   }
 
+  /** @return max hregion size for table */
+  public long getMaxFileSize() {
+    String value = getValue(MAX_FILESIZE);
+    if (value != null)
+      return Long.valueOf(value);
+    return HConstants.DEFAULT_MAX_FILE_SIZE;
+  }
+
+  /**
+   * @param maxFileSize The maximum file size that a store file can grow to
+   * before a split is triggered.
+   */
+  public void setMaxFileSize(long maxFileSize) {
+    setValue(MAX_FILESIZE, Long.toString(maxFileSize));
+  }
+
+  /**
+   * @return memory cache flush size for each hregion
+   */
+  public int getMemcacheFlushSize() {
+    String value = getValue(MEMCACHE_FLUSHSIZE);
+    if (value != null)
+      return Integer.valueOf(value);
+    return DEFAULT_MEMCACHE_FLUSH_SIZE;
+  }
+
+  /**
+   * @param memcacheFlushSize memory cache flush size for each hregion
+   */
+  public void setMemcacheFlushSize(int memcacheFlushSize) {
+    setValue(MEMCACHE_FLUSHSIZE, Integer.toString(memcacheFlushSize));
+  }
+
   /**
    * Adds a column family.
    * @param family HColumnDescriptor of familyto add.
@@ -211,10 +383,28 @@
    */
   @Override
   public String toString() {
-    return HConstants.NAME + " => '" + Bytes.toString(this.name) +
-      "', " + FAMILIES + " => " + this.families.values();
+    StringBuffer s = new StringBuffer();
+    s.append('{');
+    s.append(HConstants.NAME);
+    s.append(" => '");
+    s.append(Bytes.toString(name));
+    s.append("'");
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      s.append(", ");
+      s.append(Bytes.toString(e.getKey().get()));
+      s.append(" => '");
+      s.append(Bytes.toString(e.getValue().get()));
+      s.append("'");
+    }
+    s.append(", ");
+    s.append(FAMILIES);
+    s.append(" => ");
+    s.append(families.values());
+    s.append('}');
+    return s.toString();
   }
-  
+
   /** {@inheritDoc} */
   @Override
   public boolean equals(Object obj) {
@@ -224,42 +414,64 @@
   /** {@inheritDoc} */
   @Override
   public int hashCode() {
-    // TODO: Cache.
     int result = Bytes.hashCode(this.name);
+    result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
     if (this.families != null && this.families.size() > 0) {
       for (HColumnDescriptor e: this.families.values()) {
         result ^= e.hashCode();
       }
     }
+    result ^= values.hashCode();
     return result;
   }
-  
+
   // Writable
 
   /** {@inheritDoc} */
+  public void readFields(DataInput in) throws IOException {
+    int version = in.readInt();
+    if (version < 3)
+      throw new IOException("versions < 3 are not supported (and never existed!?)");
+    // version 3+
+    name = Bytes.readByteArray(in);
+    nameAsString = Bytes.toString(this.name);
+    setRootRegion(in.readBoolean());
+    setMetaRegion(in.readBoolean());
+    values.clear();
+    int numVals = in.readInt();
+    for (int i = 0; i < numVals; i++) {
+      ImmutableBytesWritable key = new ImmutableBytesWritable();
+      ImmutableBytesWritable value = new ImmutableBytesWritable();
+      key.readFields(in);
+      value.readFields(in);
+      values.put(key, value);
+    }
+    families.clear();
+    int numFamilies = in.readInt();
+    for (int i = 0; i < numFamilies; i++) {
+      HColumnDescriptor c = new HColumnDescriptor();
+      c.readFields(in);
+      families.put(Bytes.mapKey(c.getName()), c);
+    }
+  }
+
+  /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {
-    out.writeBoolean(rootregion);
-    out.writeBoolean(metaregion);
+	out.writeInt(TABLE_DESCRIPTOR_VERSION);
     Bytes.writeByteArray(out, name);
+    out.writeBoolean(isRootRegion());
+    out.writeBoolean(isMetaRegion());
+    out.writeInt(values.size());
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+        values.entrySet()) {
+      e.getKey().write(out);
+      e.getValue().write(out);
+    }
     out.writeInt(families.size());
     for(Iterator<HColumnDescriptor> it = families.values().iterator();
         it.hasNext(); ) {
-      it.next().write(out);
-    }
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    this.rootregion = in.readBoolean();
-    this.metaregion = in.readBoolean();
-    this.name = Bytes.readByteArray(in);
-    this.nameAsString = Bytes.toString(this.name);
-    int numCols = in.readInt();
-    this.families.clear();
-    for (int i = 0; i < numCols; i++) {
-      HColumnDescriptor c = new HColumnDescriptor();
-      c.readFields(in);
-      this.families.put(Bytes.mapKey(c.getName()), c);
+      HColumnDescriptor family = it.next();
+      family.write(out);
     }
   }
 
@@ -272,12 +484,10 @@
     if (result == 0) {
       result = families.size() - other.families.size();
     }
-    
     if (result == 0 && families.size() != other.families.size()) {
       result = Integer.valueOf(families.size()).compareTo(
           Integer.valueOf(other.families.size()));
     }
-    
     if (result == 0) {
       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
           it2 = other.families.values().iterator(); it.hasNext(); ) {
@@ -287,6 +497,14 @@
         }
       }
     }
+    if (result == 0) {
+      // punt on comparison for ordering, just calculate difference
+      result = this.values.hashCode() - other.values.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
     return result;
   }
 
@@ -323,4 +541,4 @@
   public static Path getTableDir(Path rootdir, final byte [] tableName) {
     return new Path(rootdir, Bytes.toString(tableName));
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Thu Jul 17 00:17:26 2008
@@ -563,7 +563,26 @@
     }
   }
 
-  
+  /**
+   * Modify a table's HTableDescriptor
+   * 
+   * @param tableName name of table
+   * @param desc the updated descriptor
+   * @throws IOException
+   */
+  public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc)
+  throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    HTableDescriptor.isLegalTableName(tableName);
+    try {
+      this.master.modifyTableMeta(tableName, desc);
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+  }
+
   /** 
    * Shuts down the HBase instance 
    * @throws IOException

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Thu Jul 17 00:17:26 2008
@@ -243,7 +243,8 @@
    * @throws IOException
    */
   public HTableDescriptor getTableDescriptor() throws IOException {
-    return this.connection.getHTableDescriptor(this.tableName);
+    return new UnmodifyableHTableDescriptor(
+      this.connection.getHTableDescriptor(this.tableName));
   }
 
   /**

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java Thu Jul 17 00:17:26 2008
@@ -82,8 +82,7 @@
   /**
    * Visitor class called to process each row of the .META. table
    */
-  protected interface MetaScannerVisitor {
-
+  interface MetaScannerVisitor {
     /**
      * Visitor method that accepts a RowResult and the meta region location.
      * Implementations can return false to stop the region's loop if it becomes

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,55 @@
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+
+public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
+
+  public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) {
+    super(desc);
+  }
+
+  @Override
+  public void setValue(byte[] key, byte[] value) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setValue(String key, String value) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setMaxVersions(int maxVersions) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setInMemory(boolean inMemory) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setBlockCacheEnabled(boolean blockCacheEnabled) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setMaxValueLength(int maxLength) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setTimeToLive(int timeToLive) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setCompressionType(CompressionType type) {
+    throw new UnsupportedOperationException("HColumnDescriptor is read-only");
+  }
+
+  @Override
+  public void setMapFileIndexInterval(int interval) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java Thu Jul 17 00:17:26 2008
@@ -27,7 +27,6 @@
  * Read-only table descriptor.
  */
 public class UnmodifyableHTableDescriptor extends HTableDescriptor {
-
   public UnmodifyableHTableDescriptor() {
 	  super();
   }
@@ -39,7 +38,7 @@
   UnmodifyableHTableDescriptor(final HTableDescriptor desc) {
     super(desc.getName());
     for (HColumnDescriptor c: desc.getFamilies()) {
-      super.addFamily(c);
+      super.addFamily(new UnmodifyableHColumnDescriptor(c));
     }
   }
 
@@ -61,4 +60,34 @@
   public HColumnDescriptor removeFamily(final byte [] column) {
     throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
+
+  @Override
+  public void setInMemory(boolean inMemory) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  @Override
+  public void setReadOnly(boolean readOnly) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  @Override
+  public void setValue(byte[] key, byte[] value) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  @Override
+  public void setValue(String key, String value) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  @Override
+  public void setMaxFileSize(long maxFileSize) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
+
+  @Override
+  public void setMemcacheFlushSize(int memcacheFlushSize) {
+    throw new UnsupportedOperationException("HTableDescriptor is read-only");
+  }
 }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java Thu Jul 17 00:17:26 2008
@@ -104,7 +104,16 @@
    * @throws IOException
    */
   public void disableTable(final byte [] tableName) throws IOException;
-  
+
+  /**
+   * Modify a table's metadata
+   * 
+   * @param tableName
+   * @param desc
+   */
+  public void modifyTableMeta(byte[] tableName, HTableDescriptor desc)
+    throws IOException;
+
   /**
    * Shutdown an HBase cluster.
    * @throws IOException

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Thu Jul 17 00:17:26 2008
@@ -689,6 +689,13 @@
   }
 
   /** {@inheritDoc} */
+  public void modifyTableMeta(final byte[] tableName, HTableDescriptor desc)
+    throws IOException
+  {
+    new ModifyTableMeta(this, tableName, desc).process();
+  }
+
+  /** {@inheritDoc} */
   public HServerAddress findRootRegion() {
     return regionManager.getRootRegionLocation();
   }

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,79 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+
+/** Instantiated to modify table descriptor metadata */
+class ModifyTableMeta extends TableOperation {
+
+  private static Log LOG = LogFactory.getLog(ModifyTableMeta.class);
+
+  private HTableDescriptor desc;
+
+  ModifyTableMeta(final HMaster master, final byte [] tableName, 
+    HTableDescriptor desc) 
+  throws IOException {
+    super(master, tableName);
+    this.desc = desc;
+    LOG.debug("modifying " + Bytes.toString(tableName) + ": " +
+        desc.toString());
+  }
+
+  protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
+    HRegionInfo i)
+  throws IOException {
+    BatchUpdate b = new BatchUpdate(i.getRegionName());
+    b.put(COL_REGIONINFO, Writables.getBytes(i));
+    server.batchUpdate(regionName, b);
+    LOG.debug("updated HTableDescriptor for region " + i.getRegionNameAsString());
+  }
+
+  @Override
+  protected void processScanItem(
+    @SuppressWarnings("unused") String serverName,
+    @SuppressWarnings("unused") long startCode, final HRegionInfo info) 
+      throws IOException {
+    if (isEnabled(info)) {
+      throw new TableNotDisabledException(tableName.toString());
+    }
+  }
+
+  @Override
+  protected void postProcessMeta(MetaRegion m, HRegionInterface server)
+  throws IOException {
+    for (HRegionInfo i: unservedRegions) {
+      i.setTableDesc(desc);
+      updateRegionInfo(server, m.getRegionName(), i);
+    }
+    // kick off a meta scan right away
+    master.regionManager.metaScannerThread.interrupt();
+  }
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java Thu Jul 17 00:17:26 2008
@@ -125,7 +125,7 @@
   private boolean multipleMatchers;
 
   /** Constructor for abstract base class */
-  HAbstractScanner(long timestamp, byte [][] targetCols) throws IOException {
+  protected HAbstractScanner(long timestamp, byte [][] targetCols) throws IOException {
     this.timestamp = timestamp;
     this.wildcardMatch = false;
     this.multipleMatchers = false;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Thu Jul 17 00:17:26 2008
@@ -156,6 +156,7 @@
         b.getRegionInfo().getTableDesc().getNameAsString())) {
       throw new IOException("Regions do not belong to the same table");
     }
+
     FileSystem fs = a.getFilesystem();
 
     // Make sure each region's cache is empty
@@ -483,13 +484,19 @@
       fs.delete(merges, true);
     }
 
-    // By default, we flush the cache when 64M.
-    this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size",
-      1024*1024*64);
+    int flushSize = regionInfo.getTableDesc().getMemcacheFlushSize();
+    if (flushSize == HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE) {
+      flushSize = conf.getInt("hbase.hregion.memcache.flush.size",
+                      HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE);
+    }
+    this.memcacheFlushSize = flushSize;
 
     this.blockingMemcacheSize = this.memcacheFlushSize *
       conf.getInt("hbase.hregion.memcache.block.multiplier", 1);
 
+    if (this.regionInfo.getTableDesc().isReadOnly())
+      this.writestate.writesEnabled = false;
+
     // HRegion is ready to go!
     this.writestate.compacting = false;
     this.lastFlushTime = System.currentTimeMillis();
@@ -1311,6 +1318,10 @@
   public void batchUpdate(BatchUpdate b)
   throws IOException {
 
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
+
     // Do a rough check that we have resources to accept a write.  The check is
     // 'rough' in that between the resource check and the call to obtain a 
     // read lock, resources may run out.  For now, the thought is that this
@@ -1418,6 +1429,9 @@
   public void deleteAll(final byte [] row, final byte [] column, final long ts)
   throws IOException {
     checkColumn(column);
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     Integer lid = obtainRowLock(row);
     try {
       deleteMultiple(row, column, ts, ALL_VERSIONS);
@@ -1434,6 +1448,9 @@
    */
   public void deleteAll(final byte [] row, final long ts)
   throws IOException {
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     Integer lid = obtainRowLock(row);    
     try {
       for (HStore store : stores.values()){
@@ -1461,6 +1478,9 @@
    */
   public void deleteFamily(byte [] row, byte [] family, long timestamp)
   throws IOException{
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     Integer lid = obtainRowLock(row);    
     try {
       // find the HStore for the column family
@@ -1493,6 +1513,9 @@
   private void deleteMultiple(final byte [] row, final byte [] column,
       final long ts, final int versions)
   throws IOException {
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     HStoreKey origin = new HStoreKey(row, column, ts);
     Set<HStoreKey> keys = getKeys(origin, versions);
     if (keys.size() > 0) {
@@ -1520,6 +1543,9 @@
       final byte [] val)
   throws IOException {
     checkColumn(key.getColumn());
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     TreeMap<HStoreKey, byte []> targets = this.targetColumns.get(lockid);
     if (targets == null) {
       targets = new TreeMap<HStoreKey, byte []>();
@@ -1541,6 +1567,9 @@
     if (updatesByColumn == null || updatesByColumn.size() <= 0) {
       return;
     }
+    if (!this.writestate.writesEnabled) {
+      throw new IOException("region is read only");
+    }
     boolean flush = false;
     this.updatesLock.readLock().lock();
     try {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Thu Jul 17 00:17:26 2008
@@ -172,15 +172,20 @@
     this.storeName = Bytes.toBytes(this.info.getEncodedName() + "/" +
       Bytes.toString(this.family.getName()));
     this.storeNameStr = Bytes.toString(this.storeName);
-    
+
     // By default, we compact if an HStore has more than
     // MIN_COMMITS_FOR_COMPACTION map files
     this.compactionThreshold =
       conf.getInt("hbase.hstore.compactionThreshold", 3);
     
     // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
-    this.desiredMaxFileSize =
-      conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
+    long maxFileSize = info.getTableDesc().getMaxFileSize();
+    if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
+      maxFileSize = conf.getLong("hbase.hregion.max.filesize",
+        HConstants.DEFAULT_MAX_FILE_SIZE);
+    }
+    this.desiredMaxFileSize = maxFileSize;
+
     this.storeSize = 0L;
 
     if (family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) {
@@ -242,11 +247,11 @@
       if (first) {
         // Use a block cache (if configured) for the first reader only
         // so as to control memory usage.
-        r = e.getValue().getReader(this.fs, this.family.isBloomFilterEnabled(),
+        r = e.getValue().getReader(this.fs, this.family.isBloomfilter(),
           family.isBlockCacheEnabled());
         first = false;
       } else {
-        r = e.getValue().getReader(this.fs, this.family.isBloomFilterEnabled(),
+        r = e.getValue().getReader(this.fs, this.family.isBloomfilter(),
             false);
       }
       this.readers.put(e.getKey(), r);
@@ -582,7 +587,8 @@
       HStoreFile flushedFile = new HStoreFile(conf, fs, basedir,
         info.getEncodedName(),  family.getName(), -1L, null);
       MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
-        this.family.isBloomFilterEnabled(), cache.size());
+        this.family.isBloomfilter(), cache.size());
+       out.setIndexInterval(family.getMapFileIndexInterval());
       
       // Here we tried picking up an existing HStoreFile from disk and
       // interlacing the memcache flush compacting as we go.  The notion was
@@ -651,7 +657,7 @@
       Long flushid = Long.valueOf(logCacheFlushId);
       // Open the map file reader.
       this.readers.put(flushid,
-        flushedFile.getReader(this.fs, this.family.isBloomFilterEnabled(),
+        flushedFile.getReader(this.fs, this.family.isBloomfilter(),
         this.family.isBlockCacheEnabled()));
       this.storefiles.put(flushid, flushedFile);
       // Tell listeners of the change in readers.
@@ -737,9 +743,9 @@
         return checkSplit();
       }
       /*
-       * We create a new list of MapFile.Reader objects so we don't screw up the
-       * caching associated with the currently-loaded ones. Our iteration-based
-       * access pattern is practically designed to ruin the cache.
+       * We create a new list of MapFile.Reader objects so we don't screw up
+       * the caching associated with the currently-loaded ones. Our iteration-
+       * based access pattern is practically designed to ruin the cache.
        */
       List<MapFile.Reader> readers = new ArrayList<MapFile.Reader>();
       for (HStoreFile file: filesToCompact) {
@@ -749,7 +755,7 @@
           readers.add(reader);
           
           // Compute the size of the new bloomfilter if needed
-          if (this.family.isBloomFilterEnabled()) {
+          if (this.family.isBloomfilter()) {
             nrows += reader.getBloomFilterSize();
           }
         } catch (IOException e) {
@@ -775,7 +781,8 @@
           FSUtils.getPath(compactedOutputFile.getMapFilePath()));
       }
       MapFile.Writer writer = compactedOutputFile.getWriter(this.fs,
-        this.compression, this.family.isBloomFilterEnabled(), nrows);
+        this.compression, this.family.isBloomfilter(), nrows);
+      writer.setIndexInterval(family.getMapFileIndexInterval());
       try {
         compactHStoreFiles(writer, readers);
       } finally {
@@ -1029,7 +1036,7 @@
               // Use a block cache (if configured) for this reader since
               // it is the only one.
               finalCompactedFile.getReader(this.fs,
-                  this.family.isBloomFilterEnabled(),
+                  this.family.isBloomfilter(),
                   this.family.isBlockCacheEnabled()));
           this.storefiles.put(orderVal, finalCompactedFile);
           // Tell observers that list of Readers has changed.
@@ -1814,4 +1821,4 @@
       return key;
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java Thu Jul 17 00:17:26 2008
@@ -413,7 +413,7 @@
           doElement(outputter, "name", Bytes.toString(e.getName()));
           doElement(outputter, "compression", e.getCompression().toString());
           doElement(outputter, "bloomfilter",
-              Boolean.toString(e.isBloomFilterEnabled()));
+              Boolean.toString(e.isBloomfilter()));
           doElement(outputter, "max-versions",
             Integer.toString(e.getMaxVersions()));
           doElement(outputter, "maximum-cell-size",

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java Thu Jul 17 00:17:26 2008
@@ -67,7 +67,7 @@
     col.inMemory = in.isInMemory();
     col.blockCacheEnabled = in.isBlockCacheEnabled();
     col.maxValueLength = in.getMaxValueLength();
-    col.bloomFilterType = Boolean.toString(in.isBloomFilterEnabled());
+    col.bloomFilterType = Boolean.toString(in.isBloomfilter());
     return col;
   }
   

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Thu Jul 17 00:17:26 2008
@@ -37,9 +37,11 @@
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
@@ -188,7 +190,7 @@
       float version = Float.parseFloat(versionStr);
       if (version == 0.1f) {
         checkForUnrecoveredLogFiles(getRootDirFiles());
-        migrate();
+        migrateToV5();
       } else {
         throw new IOException("Unrecognized or non-migratable version: " +
           version);
@@ -209,10 +211,94 @@
     }
   }
   
-  private void migrate() throws IOException {
+  private void migrateToV5() throws IOException {
+    rewriteMetaHRegionInfo();
     addHistorianFamilyToMeta();
     updateBloomFilters();
   }
+  
+  /**
+   * Rewrite the meta tables so that HRI is versioned and so we move to new
+   * HCD and HCD.
+   * @throws IOException 
+   */
+  private void rewriteMetaHRegionInfo() throws IOException {
+    if (this.readOnly && this.migrationNeeded) {
+      return;
+    }
+    // Read using old classes.
+    final org.apache.hadoop.hbase.util.migration.v5.MetaUtils utils =
+      new org.apache.hadoop.hbase.util.migration.v5.MetaUtils(this.conf);
+    try {
+      // Scan the root region
+      utils.scanRootRegion(new org.apache.hadoop.hbase.util.migration.v5.MetaUtils.ScannerListener() {
+        public boolean processRow(org.apache.hadoop.hbase.util.migration.v5.HRegionInfo info)
+        throws IOException {
+          // Scan every meta region
+          final org.apache.hadoop.hbase.util.migration.v5.HRegion metaRegion =
+            utils.getMetaRegion(info);
+          // If here, we were able to read with old classes.  If readOnly, then
+          // needs migration.
+          if (readOnly && !migrationNeeded) {
+            migrationNeeded = true;
+            return false;
+          }
+          updateHRegionInfo(utils.getRootRegion(), info);
+          utils.scanMetaRegion(info, new org.apache.hadoop.hbase.util.migration.v5.MetaUtils.ScannerListener() {
+            public boolean processRow(org.apache.hadoop.hbase.util.migration.v5.HRegionInfo hri)
+            throws IOException {
+              updateHRegionInfo(metaRegion, hri);
+              return true;
+            }
+          });
+          return true;
+        }
+      });
+    } catch (Exception e) {
+      LOG.error("", e);
+    } finally {
+      utils.shutdown();
+    }
+  }
+  
+  /*
+   * Move from old pre-v5 hregioninfo to current HRegionInfo
+   * Persist back into <code>r</code>
+   * @param mr
+   * @param oldHri
+   */
+  void updateHRegionInfo(org.apache.hadoop.hbase.util.migration.v5.HRegion mr,
+    org.apache.hadoop.hbase.util.migration.v5.HRegionInfo oldHri)
+  throws IOException {
+    byte [] oldHriTableName = oldHri.getTableDesc().getName();
+    HTableDescriptor newHtd =
+      Bytes.equals(HConstants.ROOT_TABLE_NAME, oldHriTableName)?
+        HTableDescriptor.ROOT_TABLEDESC:
+        Bytes.equals(HConstants.META_TABLE_NAME, oldHriTableName)?
+          HTableDescriptor.META_TABLEDESC:
+          new HTableDescriptor(oldHri.getTableDesc().getName());
+    for (org.apache.hadoop.hbase.util.migration.v5.HColumnDescriptor oldHcd:
+        oldHri.getTableDesc().getFamilies()) {
+      HColumnDescriptor newHcd = new HColumnDescriptor(
+        HStoreKey.addDelimiter(oldHcd.getName()),
+        oldHcd.getMaxValueLength(),
+        HColumnDescriptor.CompressionType.valueOf(oldHcd.getCompressionType().toString()),
+        oldHcd.isInMemory(), oldHcd.isBlockCacheEnabled(),
+        oldHcd.getMaxValueLength(), oldHcd.getTimeToLive(),
+        oldHcd.isBloomFilterEnabled());
+      newHtd.addFamily(newHcd);
+    }
+    HRegionInfo newHri = new HRegionInfo(newHtd, oldHri.getStartKey(),
+      oldHri.getEndKey(), oldHri.isSplit(), oldHri.getRegionId());
+    BatchUpdate b = new BatchUpdate(newHri.getRegionName());
+    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(newHri));
+    mr.batchUpdate(b);
+    if (LOG.isDebugEnabled()) {
+        LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
+          " for " + oldHri.toString() + " in " + mr.toString() + " is: " +
+          newHri.toString());
+    }
+  }
 
   private FileStatus[] getRootDirFiles() throws IOException {
     FileStatus[] stats = fs.listStatus(FSUtils.getRootDir(this.conf));
@@ -243,77 +329,6 @@
     }
   }
 
-  void migrateRegionDir(final byte [] tableName, String oldPath)
-  throws IOException {
-    // Create directory where table will live
-    Path rootdir = FSUtils.getRootDir(this.conf);
-    Path tableDir = new Path(rootdir, Bytes.toString(tableName));
-    fs.mkdirs(tableDir);
-
-    // Move the old region directory under the table directory
-
-    Path newPath = new Path(tableDir,
-        oldPath.substring(OLD_PREFIX.length()));
-    fs.rename(new Path(rootdir, oldPath), newPath);
-
-    processRegionSubDirs(fs, newPath);
-  }
-  
-  private void processRegionSubDirs(FileSystem fs, Path newPath)
-  throws IOException {
-    String newName = newPath.getName();
-    FileStatus[] children = fs.listStatus(newPath);
-    for (int i = 0; i < children.length; i++) {
-      String child = children[i].getPath().getName();
-      if (children[i].isDir()) {
-        processRegionSubDirs(fs, children[i].getPath());
-
-        // Rename old compaction directories
-
-        if (child.startsWith(OLD_PREFIX)) {
-          fs.rename(children[i].getPath(),
-              new Path(newPath, child.substring(OLD_PREFIX.length())));
-        }
-      } else {
-        if (newName.compareTo("mapfiles") == 0) {
-          // Check to see if this mapfile is a reference
-
-          if (HStore.isReference(children[i].getPath())) {
-            // Keep track of references in case we come across a region
-            // that we can't otherwise account for.
-            references.add(child.substring(child.indexOf(".") + 1));
-          }
-        }
-      }
-    }
-  }
-  
-  private void scanRootRegion() throws IOException {
-    final MetaUtils utils = new MetaUtils(this.conf);
-    try {
-      utils.scanRootRegion(new MetaUtils.ScannerListener() {
-        public boolean processRow(HRegionInfo info) throws IOException {
-          // First move the meta region to where it should be and rename
-          // subdirectories as necessary
-          migrateRegionDir(HConstants.META_TABLE_NAME, OLD_PREFIX
-              + info.getEncodedName());
-          utils.scanMetaRegion(info, new MetaUtils.ScannerListener() {
-            public boolean processRow(HRegionInfo tableInfo) throws IOException {
-              // Move the region to where it should be and rename
-              // subdirectories as necessary
-              migrateRegionDir(tableInfo.getTableDesc().getName(), OLD_PREFIX
-                  + tableInfo.getEncodedName());
-              return true;
-            }
-          });
-          return true;
-        }
-      });
-    } finally {
-      utils.shutdown();
-    }
-  }
-
   private void addHistorianFamilyToMeta() throws IOException {
     if (this.migrationNeeded) {
       // Be careful. We cannot use MetAutils if current hbase in the
@@ -359,17 +374,16 @@
           // Scan every meta region
           final HRegion metaRegion = utils.getMetaRegion(info);
           utils.scanMetaRegion(info, new MetaUtils.ScannerListener() {
-            public boolean processRow(HRegionInfo tableInfo) throws IOException {
-              HTableDescriptor desc = tableInfo.getTableDesc();
+            public boolean processRow(HRegionInfo hri) throws IOException {
+              HTableDescriptor desc = hri.getTableDesc();
               Path tableDir =
                 HTableDescriptor.getTableDir(rootDir, desc.getName()); 
               for (HColumnDescriptor column: desc.getFamilies()) {
-                if (column.isBloomFilterEnabled()) {
+                if (column.isBloomfilter()) {
                   // Column has a bloom filter
                   migrationNeeded = true;
-
                   Path filterDir = HStoreFile.getFilterDir(tableDir,
-                      tableInfo.getEncodedName(), column.getName());
+                      hri.getEncodedName(), column.getName());
                   if (fs.exists(filterDir)) {
                     // Filter dir exists
                     if (readOnly) {
@@ -379,8 +393,10 @@
                     }
                     // Delete the filter
                     fs.delete(filterDir, true);
-                    // Update the HRegionInfo in meta
-                    utils.updateMETARegionInfo(metaRegion, tableInfo);
+                    // Update the HRegionInfo in meta setting the bloomfilter
+                    // to be disabled.
+                    column.setBloomfilter(false);
+                    utils.updateMETARegionInfo(metaRegion, hri);
                   }
                 }
               }

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/FlushRequester.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/FlushRequester.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/FlushRequester.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/FlushRequester.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.migration.v5;
+
+/**
+ * Implementors of this interface want to be notified when an HRegion
+ * determines that a cache flush is needed. A FlushRequester (or null)
+ * must be passed to the HRegion constructor so it knows who to call when it
+ * has a filled memcache.
+ */
+public interface FlushRequester {
+  /**
+   * Tell the listener the cache needs to be flushed.
+   * 
+   * @param region the HRegion requesting the cache flush
+   */
+  void request(HRegion region);
+}
\ No newline at end of file



Mime
View raw message