hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r884310 [1/7] - in /hadoop/hbase/branches/0.20_on_hadoop-0.18.3: ./ bin/ conf/ lib/ src/contrib/ src/contrib/ec2/ src/contrib/ec2/bin/ src/contrib/ec2/bin/image/ src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/tableindexed...
Date Wed, 25 Nov 2009 22:30:39 GMT
Author: apurtell
Date: Wed Nov 25 22:30:29 2009
New Revision: 884310

URL: http://svn.apache.org/viewvc?rev=884310&view=rev
Log:
merge up to 0.20.2 release

Added:
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/add_table.rb
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/loadtable.rb
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/lib/zookeeper-3.2.1.jar   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/README.txt
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-zookeeper-remote.sh
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/ec2-run-user-data
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/init-hbase-cluster-secgroups   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/launch-hbase-cluster   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/launch-hbase-master   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/launch-hbase-slaves   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/launch-hbase-zookeeper   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/list-hbase-clusters   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/revoke-hbase-cluster-secgroups   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/terminate-hbase-cluster   (with props)
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/cygwin.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Row.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/CompressionTest.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHMsg.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilterAcrossRegions.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/master/TestRegionManager.java
Removed:
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/lib/zookeeper-r785019-hbase-1329.jar
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/RegionHistorian.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/webapps/master/regionhistorian.jsp
Modified:
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/Formatter.rb
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/HBase.rb
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hbase
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hirb.rb
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/build.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/log4j.properties
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/build-contrib.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexSpecification.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/tableindexed/IndexedTableAdmin.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/transactional/HBaseBackedTransactionLogger.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/transactional/TransactionLogger.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/transactional/package.html
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLog.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLogKey.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLogRecoveryManager.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionState.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/transactional/TestTransactions.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLog.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/test/org/apache/hadoop/hbase/regionserver/transactional/TestTHLogRecovery.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/forrest.properties
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/metrics.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/site.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/docs/src/documentation/content/xdocs/tabs.xml
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HConstants.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/HServerInfo.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/KeyValue.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Delete.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Get.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnection.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HTablePool.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Put.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Scan.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/FilterList.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/filter/package-info.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/RowResult.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RegionManager.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/ServerManager.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/TableOperation.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Bytes.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/FSUtils.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Migrate.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/overview.html
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java
    hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/webapps/master/table.jsp

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt Wed Nov 25 22:30:29 2009
@@ -1,5 +1,143 @@
 HBase Change Log
 
+Release 0.20.2 - Unreleased
+  INCOMPATIBLE CHANGES
+
+  BUG FIXES
+   HBASE-1905  Remove unused config. hbase.hstore.blockCache.blockSize
+   HBASE-1906  FilterList of prefix and columnvalue not working properly
+               with deletes and multiple values
+   HBASE-1896  WhileMatchFilter.reset should call encapsulated filter reset
+   HBASE-1912  When adding a secondary index to an existing table, it will
+               cause NPE during re-indexing (Mingjui Ray Liao via Andrew
+               Purtell)
+   HBASE-1917  TestScanner.testFilters failing
+   HBASE-1908  ROOT not reassigned if only one regionserver left
+   HBASE-1916  FindBugs and javac warnings cleanup
+   HBASE-1924  MapReduce Driver lost hsf2sf backporting hbase-1684
+   HBASE-1777  column length is not checked before saved to memstore
+   HBASE-1895  HConstants.MAX_ROW_LENGTH is incorrectly 64k, should be 32k
+   HBASE-1925  IllegalAccessError: Has not been initialized (getMaxSequenceId)
+   HBASE-1929  If hbase-default.xml is not in CP, zk session timeout is 10 secs!
+   HBASE-1927  Scanners not closed properly in certain circumstances
+   HBASE-1934  NullPointerException in ClientScanner (Andrew Purtell via Stack)
+   HBASE-1946  Unhandled exception at regionserver (Dmitriy Lyfar via Stack)
+   HBASE-1941  Put's copy feature has a bug (Dave Latham via Stack) 
+   HBASE-1682  IndexedRegion does not properly handle deletes
+               (Andrew McCall via Clint Morgan and Stack)
+   HBASE-1953  Overhaul of overview.html (html fixes, typos, consistency) -
+               no content changes (Lars Francke via Stack)
+   HBASE-1954  Transactional scans do not see newest put (Clint Morgan via Stack)
+   HBASE-1919  code: HRS.delete seems to ignore exceptions it shouldnt
+   HBASE-1951  Stack overflow when calling HTable.checkAndPut() 
+               when deleting a lot of values
+   HBASE-1781  Weird behavior of WildcardColumnTracker.checkColumn(), 
+               looks like recursive loop
+   HBASE-1949  KeyValue expiration by Time-to-Live during major compaction is
+               broken (Gary Helmling via Stack)
+   HBASE-1957  Get-s can't set a Filter (Roman Kalyakin via Stack)
+   HBASE-1959  Compress tables during 0.19 to 0.20 migration (Dave Latham via Stack)
+   HBASE-1928  ROOT and META tables stay in transition state (making the system
+               not usable) if the designated regionServer dies before the
+               assignment is complete (Yannis Pavlidis via Stack)
+   HBASE-1962  Bulk loading script makes regions incorrectly (loadtable.rb)
+   HBASE-1966  Apply the fix from site/ to remove the forrest dependency on
+               java5
+   HBASE-1967  [Transactional] client.TestTransactions.testPutPutScan fails
+               sometimes - Temporary fix
+   HBASE-1965  On restart of branch, master complains about not being able
+               to set safe mode
+
+  IMPROVEMENTS
+   HBASE-1899  Use scanner caching in shell count
+   HBASE-1903  Enable DEBUG by default
+   HBASE-1918  Don't do DNS resolving in .META. scanner for each row
+   HBASE-1921  When the Master's session times out and there's only one,
+               cluster is wedged
+   HBASE-1947  If HBase starts/stops often in less than 24 hours, 
+               you end up with lots of store files
+   HBASE-1867  Tool to regenerate an hbase table from the data files
+   HBASE-1829  Make use of start/stop row in TableInputFormat
+   HBASE-1904  Add tutorial for installing HBase on Windows using Cygwin as a
+               test and development environment (Wim Van Leuven via Stack)
+   HBASE-1968  Give clients access to the write buffer
+
+Release 0.20.1 - Released October 12th, 2009
+  INCOMPATIBLE CHANGES
+   HBASE-1854  Remove the Region Historian
+
+  BUG FIXES
+   HBASE-1824  [stargate] default timestamp should be LATEST_TIMESTAMP
+   HBASE-1795  log recovery doesnt reset the max sequence id, new logfiles can
+               get tossed as 'duplicates'
+   HBASE-1794  recovered log files are not inserted into the storefile map
+   HBASE-1740  ICV has a subtle race condition only visible under high load
+   HBASE-1808  [stargate] fix how columns are specified for scanners
+   HBASE-1828  CompareFilters are broken from client-side
+   HBASE-1836  test of indexed hbase broken
+   HBASE-1838  [javadoc] Add javadoc to Delete explaining behavior when no
+               timestamp provided
+   HBASE-1821  Filtering by SingleColumnValueFilter bug
+   HBASE-1840  RowLock fails when used with IndexTable
+   HBASE-1818  HFile code review and refinement (Schubert Zhang via Stack)
+   HBASE-1830  HbaseObjectWritable methods should allow null HBCs
+               for when Writable is not Configurable (Stack via jgray)
+   HBASE-1847  Delete latest of a null qualifier when non-null qualifiers
+               exist throws a RuntimeException 
+   HBASE-1853  Each time around the regionserver core loop, we clear the
+               messages to pass master, even if we failed to deliver them
+   HBASE-1815  HBaseClient can get stuck in an infinite loop while attempting
+               to contact a failed regionserver
+   HBASE-1856  HBASE-1765 broke MapReduce when using Result.list()
+               (Lars George via Stack)
+   HBASE-1857  WrongRegionException when setting region online after .META.
+               split (Cosmin Lehane via Stack)
+   HBASE-1809  NPE thrown in BoundedRangeFileInputStream
+   HBASE-1859  Misc shell fixes patch (Kyle Oba via Stack)
+   HBASE-1865  0.20.0 TableInputFormatBase NPE
+   HBASE-1866  Scan(Scan) copy constructor does not copy value of
+               cacheBlocks
+   HBASE-1858  Master can't split logs created by THBase (Clint Morgan via Stack)
+   HBASE-1871  Wrong type used in TableMapReduceUtil.initTableReduceJob()
+               (Lars George via Stack)
+   HBASE-1869  IndexedTable delete fails when used in conjunction with RowLock()
+               (Keith Thomas via Stack)
+   HBASE-1883  HRegion passes the wrong minSequenceNumber to doReconstructionLog
+               (Clint Morgan via Stack)
+   HBASE-1878  BaseScanner results can't be trusted at all (Related to
+               hbase-1784)
+   HBASE-1831  Scanning API must be reworked to allow for fully functional
+               Filters client-side
+   HBASE-1890  hbase-1506 where assignment is done at regionserver doesn't work
+
+  IMPROVEMENTS
+   HBASE-1819  Update to 0.20.1 hadoop and zk 3.2.1
+   HBASE-1765  Delay Result deserialization until asked for and permit
+               access to the raw binary to prevent forced deserialization
+   HBASE-1687  bin/hbase script doesn't allow for different memory settings for
+               each daemon type
+   HBASE-1823  Ability for Scanners to bypass the block cache
+   HBASE-1827  Add disabling block cache scanner flag to the shell
+   HBASE-1574  Client and server APIs to do batch deletes
+   HBASE-1833  hfile.main fixes
+   HBASE-1684  Backup (Export/Import) contrib tool for 0.20
+   HBASE-1860  Change HTablePool#createHTable from private to protected
+   HBASE-48    Bulk load tools
+   HBASE-1855  HMaster web application doesn't show the region end key in the
+               table detail page (Andrei Dragomir via Stack)
+   HBASE-1870  Bytes.toFloat(bytes[],int) is marked private
+   HBASE-1874  Client Scanner mechanism that is used for HbaseAdmin methods
+               (listTables, tableExists), is very slow if the client is far
+               away from the HBase cluster (Andrei Dragomir via Stack)
+   HBASE-1879  ReadOnly transactions generate WAL activity (Clint Morgan via Stack)
+   HBASE-1875  Compression test utility (elsif via Stack)
+   HBASE-1832  Faster enable/disable/delete
+   HBASE-1481  Add fast row key only scanning
+   HBASE-1506  [performance] Make splits faster
+   HBASE-1722  Add support for exporting HBase metrics via JMX
+               (Gary Helming via Stack)
+
+
 Release 0.20.0 - Tue Sep  8 12:48:41 PDT 2009
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/Formatter.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/Formatter.rb?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/Formatter.rb (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/Formatter.rb Wed Nov 25 22:30:29 2009
@@ -2,12 +2,20 @@
 module Formatter
   # Base abstract class for results formatting.
   class Formatter
+    def is_kernel?(obj)
+      obj.kind_of?(Module) and obj.name == "Kernel"
+    end
+
     # Takes an output stream and a print width.
-    def initialize(o, w = 100)
-      raise TypeError.new("Type %s of parameter %s is not IO" % [o.class, o]) \
-        unless o.instance_of? IO
-      @out = o
-      @maxWidth = w
+    def initialize(opts={})
+      defaults = {:output_stream => Kernel, :format_width => 100}
+      options = defaults.merge(opts)
+
+      @out = options[:output_stream]
+      raise TypeError.new("Type %s of parameter %s is not IO" % [@out.class, @out]) \
+        unless @out.instance_of? IO or is_kernel?(@out)
+
+      @maxWidth = options[:format_width]
       @rowCount = 0
     end
 
@@ -27,7 +35,7 @@
       end
       if args.class == String
         output(@maxWidth, args)
-        puts
+        @out.puts
         return
       end
       # TODO: Look at the type.  Is it RowResult?
@@ -35,7 +43,7 @@
         splits = split(@maxWidth, dump(args[0]))
         for l in splits
           output(@maxWidth, l)
-          puts
+          @out.puts
         end
       elsif args.length == 2
         col1width = (not widths or widths.length == 0) ? @maxWidth / 4 : @maxWidth * widths[0] / 100
@@ -57,7 +65,7 @@
           @out.print(" ")
           output(col2width, splits2[index])
           index += 1
-          puts
+          @out.puts
         end
       else
         # Print a space to set off multi-column rows

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/HBase.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/HBase.rb?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/HBase.rb (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/HBase.rb Wed Nov 25 22:30:29 2009
@@ -11,11 +11,14 @@
 include_class('java.lang.Long') {|package,name| "J#{name}" }
 include_class('java.lang.Boolean') {|package,name| "J#{name}" }
 
+import org.apache.hadoop.hbase.KeyValue
 import org.apache.hadoop.hbase.client.HBaseAdmin
 import org.apache.hadoop.hbase.client.HTable
 import org.apache.hadoop.hbase.client.Get
 import org.apache.hadoop.hbase.client.Put
+import org.apache.hadoop.hbase.client.Scan
 import org.apache.hadoop.hbase.client.Delete
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
 import org.apache.hadoop.hbase.HConstants
 import org.apache.hadoop.hbase.io.BatchUpdate
 import org.apache.hadoop.hbase.io.RowResult
@@ -43,6 +46,7 @@
   LIMIT = "LIMIT"
   METHOD = "METHOD"
   MAXLENGTH = "MAXLENGTH"
+  CACHE_BLOCKS = "CACHE_BLOCKS"
 
   # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
   class Admin
@@ -392,42 +396,56 @@
         filter = args["FILTER"] || nil
         startrow = args["STARTROW"] || ""
         stoprow = args["STOPROW"] || nil
-        timestamp = args["TIMESTAMP"] || HConstants::LATEST_TIMESTAMP
+        timestamp = args["TIMESTAMP"] || nil
         columns = args["COLUMNS"] || getAllColumns()
+        cache = args["CACHE_BLOCKS"] || true
         
         if columns.class == String
           columns = [columns]
         elsif columns.class != Array
           raise ArgumentError.new("COLUMNS must be specified as a String or an Array")
         end
-        cs = columns.to_java(java.lang.String)
-        
         if stoprow
-          s = @table.getScanner(cs, startrow, stoprow, timestamp)
+          scan = Scan.new(startrow.to_java_bytes, stoprow.to_java_bytes)
         else
-          s = @table.getScanner(cs, startrow, timestamp, filter) 
+          scan = Scan.new(startrow.to_java_bytes)
+        end
+        for c in columns
+          split = KeyValue.parseColumn(c.to_java_bytes)
+          if split[1] != nil
+            scan.addColumn(split[0], split[1])
+          else
+            scan.addFamily(split[0])
+          end
+        end
+        if filter != nil
+          scan.setFilter(filter)
         end
+        if timestamp != nil
+          scan.setTimeStamp(timestamp)
+        end
+        scan.setCacheBlocks(cache)
       else
-        columns = getAllColumns()
-        s = @table.getScanner(columns.to_java(java.lang.String))
+        scan = Scan.new()
       end
+      s = @table.getScanner(scan)
       count = 0
       @formatter.header(["ROW", "COLUMN+CELL"])
       i = s.iterator()
       while i.hasNext()
-        r = i.next()
+        r = i.next().getRowResult()
         row = String.from_java_bytes r.getRow()
+        count += 1
+        if limit != -1 and count >= limit
+          break
+        end
         for k, v in r
           column = String.from_java_bytes k
           cell = toString(column, v, maxlength)
           @formatter.row([row, "column=%s, %s" % [column, cell]])
         end
-        count += 1
-        if limit != -1 and count >= limit
-          break
-        end
       end
-      @formatter.footer(now)
+      @formatter.footer(now, count)
     end
 
     def put(row, column, value, timestamp = nil)
@@ -529,9 +547,12 @@
     
     def count(interval = 1000)
       now = Time.now
-      columns = getAllColumns()
-      cs = columns.to_java(java.lang.String)
-      s = @table.getScanner(cs)
+      scan = Scan.new()
+      scan.setCacheBlocks(false)
+      # We can safely set scanner caching with the first key only filter
+      scan.setCaching(10)
+      scan.setFilter(FirstKeyOnlyFilter.new())
+      s = @table.getScanner(scan)
       count = 0
       i = s.iterator()
       @formatter.header()

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/add_table.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/add_table.rb?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/add_table.rb (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/add_table.rb Wed Nov 25 22:30:29 2009
@@ -0,0 +1,123 @@
+# Script adds a table back to a running hbase.
+# Currently only works on a copied aside table.
+# You cannot parse arbitrary table name.
+# 
+# To see usage for this script, run: 
+#
+#  ${HBASE_HOME}/bin/hbase org.jruby.Main addtable.rb
+#
+include Java
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.HRegionInfo
+import org.apache.hadoop.hbase.client.HTable
+import org.apache.hadoop.hbase.client.Delete
+import org.apache.hadoop.hbase.client.Put
+import org.apache.hadoop.hbase.client.Scan
+import org.apache.hadoop.hbase.HTableDescriptor
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.util.FSUtils
+import org.apache.hadoop.hbase.util.Writables
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.fs.FileSystem
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "add_table"
+
+# Print usage for this script
+def usage
+  puts 'Usage: %s.rb TABLE_DIR [alternate_tablename]' % NAME
+  exit!
+end
+
+# Get configuration to use.
+c = HBaseConfiguration.new()
+
+# Set hadoop filesystem configuration using the hbase.rootdir.
+# Otherwise, we'll always use localhost though the hbase.rootdir
+# might be pointing at hdfs location.
+c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
+fs = FileSystem.get(c)
+
+# Get a logger and a metautils instance.
+LOG = LogFactory.getLog(NAME)
+
+# Check arguments
+if ARGV.size < 1 || ARGV.size > 2
+  usage
+end
+
+# Get cmdline args.
+srcdir = fs.makeQualified(Path.new(java.lang.String.new(ARGV[0])))
+
+# Get table name
+tableName = nil
+if ARGV.size > 1
+  tableName = ARGV[1]
+  raise IOError("Not supported yet")
+elsif
+  # If none provided use dirname
+  tableName = srcdir.getName()
+end
+HTableDescriptor.isLegalTableName(tableName.to_java_bytes)
+
+# Figure locations under hbase.rootdir 
+# Move directories into place; be careful not to overwrite.
+rootdir = FSUtils.getRootDir(c)
+tableDir = fs.makeQualified(Path.new(rootdir, tableName))
+
+# If a directory currently in place, move it aside.
+if srcdir.equals(tableDir)
+  LOG.info("Source directory is in place under hbase.rootdir: " + srcdir.toString());
+elsif fs.exists(tableDir)
+  movedTableName = tableName + "." + java.lang.System.currentTimeMillis().to_s
+  movedTableDir = Path.new(rootdir, java.lang.String.new(movedTableName))
+  LOG.warn("Moving " + tableDir.toString() + " aside as " + movedTableDir.toString());
+  raise IOError.new("Failed move of " + tableDir.toString()) unless fs.rename(tableDir, movedTableDir)
+  LOG.info("Moving " + srcdir.toString() + " to " + tableDir.toString());
+  raise IOError.new("Failed move of " + srcdir.toString()) unless fs.rename(srcdir, tableDir)
+end
+
+# Clean mentions of table from .META.
+# Scan the .META. and remove all lines that begin with tablename
+LOG.info("Deleting mention of " + tableName + " from .META.")
+metaTable = HTable.new(c, HConstants::META_TABLE_NAME)
+scan = Scan.new(tableName.to_java_bytes)
+scanner = metaTable.getScanner(scan)
+# Use java.lang.String doing compares.  Ruby String is a bit odd.
+tableNameStr = java.lang.String.new(tableName)
+while (result = scanner.next())
+  rowid = Bytes.toString(result.getRow())
+  rowidStr = java.lang.String.new(rowid)
+  if not rowidStr.startsWith(tableNameStr)
+    # Gone too far, break
+    break
+  end
+  LOG.info("Deleting row from catalog: " + rowid);
+  d = Delete.new(result.getRow())
+  metaTable.delete(d)
+end
+scanner.close()
+
+# Now, walk the table and per region, add an entry
+LOG.info("Walking " + srcdir.toString() + " adding regions to catalog table")
+statuses = fs.listStatus(srcdir)
+for status in statuses
+  next unless status.isDir()
+  next if status.getPath().getName() == "compaction.dir"
+  regioninfofile =  Path.new(status.getPath(), ".regioninfo")
+  unless fs.exists(regioninfofile)
+    LOG.warn("Missing .regioninfo: " + regioninfofile.toString())
+    next
+  end
+  is = fs.open(regioninfofile) 
+  hri = HRegionInfo.new()
+  hri.readFields(is)
+  is.close() 
+  # TODO: Need to redo table descriptor with passed table name and then recalculate the region encoded names.
+  p = Put.new(hri.getRegionName())
+  p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
+  metaTable.put(p)
+  LOG.info("Added to catalog: " + hri.toString())
+end

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hbase
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hbase?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hbase (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hbase Wed Nov 25 22:30:29 2009
@@ -188,16 +188,20 @@
   CLASS="org.jruby.Main ${HBASE_HOME}/bin/hirb.rb"
 elif [ "$COMMAND" = "master" ] ; then
   CLASS='org.apache.hadoop.hbase.master.HMaster'
+  HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS"
 elif [ "$COMMAND" = "regionserver" ] ; then
   CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer'
+  HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS"
 elif [ "$COMMAND" = "rest" ] ; then
   CLASS='org.apache.hadoop.hbase.rest.Dispatcher'
 elif [ "$COMMAND" = "thrift" ] ; then
   CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
+  HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS"
 elif [ "$COMMAND" = "migrate" ] ; then
   CLASS='org.apache.hadoop.hbase.util.Migrate'
 elif [ "$COMMAND" = "zookeeper" ] ; then
   CLASS='org.apache.hadoop.hbase.zookeeper.HQuorumPeer'
+  HBASE_OPTS="$HBASE_OPTS $HBASE_ZOOKEEPER_OPTS"
 else
   CLASS=$COMMAND
 fi

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hirb.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hirb.rb?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hirb.rb (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/hirb.rb Wed Nov 25 22:30:29 2009
@@ -71,7 +71,8 @@
   ARGV.delete(arg)
 end
 # Presume console format.
-@formatter = Formatter::Console.new(STDOUT, format_width)
+# Formatter takes an :output_stream parameter, if you don't want STDOUT.
+@formatter = Formatter::Console.new(:format_width => format_width)
 # TODO, etc.  @formatter = Formatter::XHTML.new(STDOUT)
 
 # Setup the HBase module.  Create a configuration.
@@ -100,9 +101,6 @@
 promoteConstants(org.apache.hadoop.hbase.HTableDescriptor.constants)
 promoteConstants(HBase.constants)
 
-# If script2run, try running it.  Will go on to run the shell unless
-# script calls 'exit' or 'exit 0' or 'exit errcode'.
-load(script2run) if script2run
 
 # Start of the hbase shell commands.
 
@@ -248,6 +246,12 @@
            hbase> scan '.META.', {COLUMNS => 'info:regioninfo'}
            hbase> scan 't1', {COLUMNS => ['c1', 'c2'], LIMIT => 10, \\
              STARTROW => 'xyz'}
+           
+           For experts, there is an additional option -- CACHE_BLOCKS -- which
+           switches block caching for the scanner on (true) or off (false).  By
+           default it is enabled.  Examples:
+           
+           hbase> scan 't1', {COLUMNS => ['c1', 'c2'], CACHE_BLOCKS => false}
 
  status    Show cluster status. Can be 'summary', 'simple', or 'detailed'. The
            default is 'summary'. Examples:
@@ -422,6 +426,12 @@
   admin().split(tableNameOrRegionName)
 end
 
+
+# If script2run, try running it.  Will go on to run the shell unless
+# script calls 'exit' or 'exit 0' or 'exit errcode'.
+load(script2run) if script2run
+
+
 # Output a banner message that tells users where to go for help
 puts <<HERE
 HBase Shell; enter 'help<RETURN>' for list of supported commands.

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/loadtable.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/loadtable.rb?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/loadtable.rb (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/bin/loadtable.rb Wed Nov 25 22:30:29 2009
@@ -0,0 +1,131 @@
+# Script that takes over from org.apache.hadoop.hbase.mapreduce.HFileOutputFormat.
+# Pass it output directory of HFileOutputFormat. It will read the passed files,
+# move them into place and update the catalog table appropriately.  Warning:
+# it will overwrite anything that exists already for passed table.
+# It expects hbase to be up and running so it can insert table info.
+#
+# To see usage for this script, run: 
+#
+#  ${HBASE_HOME}/bin/hbase org.jruby.Main loadtable.rb
+#
+include Java
+import java.util.TreeMap
+import org.apache.hadoop.hbase.client.HTable
+import org.apache.hadoop.hbase.client.Put
+import org.apache.hadoop.hbase.util.FSUtils
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.util.Writables
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.HRegionInfo
+import org.apache.hadoop.hbase.HTableDescriptor
+import org.apache.hadoop.hbase.HColumnDescriptor
+import org.apache.hadoop.hbase.HRegionInfo
+import org.apache.hadoop.hbase.io.hfile.HFile
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.fs.FileSystem
+import org.apache.hadoop.mapred.OutputLogFilter
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "loadtable"
+
+# Print usage for this script
+def usage
+  puts 'Usage: %s.rb TABLENAME HFILEOUTPUTFORMAT_OUTPUT_DIR' % NAME
+  exit!
+end
+
+# Passed 'dir' exists and is a directory else exception
+def isDirExists(fs, dir)
+  raise IOError.new("Does not exit: " + dir.toString()) unless fs.exists(dir)
+  raise IOError.new("Not a directory: " + dir.toString()) unless fs.isDirectory(dir)
+end
+
+# Check arguments
+if ARGV.size != 2
+  usage
+end
+
+# Check good table names were passed.
+tableName = HTableDescriptor.isLegalTableName(ARGV[0].to_java_bytes)
+outputdir = Path.new(ARGV[1])
+
+# Get configuration to use.
+c = HBaseConfiguration.new()
+# Get a logger and a metautils instance.
+LOG = LogFactory.getLog(NAME)
+
+# Set hadoop filesystem configuration using the hbase.rootdir.
+# Otherwise, we'll always use localhost though the hbase.rootdir
+# might be pointing at hdfs location.
+c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
+fs = FileSystem.get(c)
+
+# If hfiles directory does not exist, exit.
+isDirExists(fs, outputdir)
+# Create table dir if it doesn't exist.
+rootdir = FSUtils.getRootDir(c)
+tableDir = Path.new(rootdir, Path.new(Bytes.toString(tableName)))
+fs.mkdirs(tableDir) unless fs.exists(tableDir)
+
+# Start. Per hfile, move it, and insert an entry in catalog table.
+families = fs.listStatus(outputdir, OutputLogFilter.new())
+throw IOError.new("Can do one family only") if families.length > 1
+# Read meta on all files. Put in map keyed by start key.
+map = TreeMap.new(Bytes::ByteArrayComparator.new())
+family = families[0]
+# Make sure this subdir exists under table
+hfiles = fs.listStatus(family.getPath())
+LOG.info("Found " + hfiles.length.to_s + " hfiles");
+count = 0
+for hfile in hfiles
+  reader = HFile::Reader.new(fs, hfile.getPath(), nil, false)
+  begin
+    fileinfo = reader.loadFileInfo() 
+    firstkey = reader.getFirstKey()
+    # First key is row/column/ts.  We just want the row part.
+    rowlen = Bytes.toShort(firstkey)
+    firstkeyrow = firstkey[2, rowlen] 
+    LOG.info(count.to_s + " read firstkey of " +
+      Bytes.toString(firstkeyrow) + " from " + hfile.getPath().toString())
+    map.put(firstkeyrow, [hfile, fileinfo])
+    count = count + 1
+  ensure
+    reader.close()
+  end
+end
+# Now I have sorted list of fileinfo+paths.  Start insert.
+# Get a client on catalog table.
+meta = HTable.new(c, HConstants::META_TABLE_NAME)
+# I can't find out from hfile how its compressed.
+# Using all defaults. Change manually after loading if
+# something else wanted in column or table attributes.
+familyName = family.getPath().getName()
+hcd = HColumnDescriptor.new(familyName)
+htd = HTableDescriptor.new(tableName)
+htd.addFamily(hcd)
+previouslastkey = HConstants::EMPTY_START_ROW
+count = map.size()
+for i in map.descendingIterator()
+  tuple = map.get(i)
+  startkey = i
+  count = count - 1
+  # If last time through loop, set start row as EMPTY_START_ROW
+  startkey = HConstants::EMPTY_START_ROW unless count > 0
+  # Next time around, lastkey is this startkey
+  hri = HRegionInfo.new(htd, startkey, previouslastkey)  
+  previouslastkey = startkey 
+  LOG.info(hri.toString())
+  hfile = tuple[0].getPath()
+  rdir = Path.new(Path.new(tableDir, hri.getEncodedName().to_s), familyName)
+  fs.mkdirs(rdir)
+  tgt = Path.new(rdir, hfile.getName())
+  fs.rename(hfile, tgt)
+  LOG.info("Moved " + hfile.toString() + " to " + tgt.toString())
+  p = Put.new(hri.getRegionName())
+  p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
+  meta.put(p)
+  LOG.info("Inserted " + hri.toString())
+end

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/build.xml?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/build.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/build.xml Wed Nov 25 22:30:29 2009
@@ -18,7 +18,7 @@
 -->
 
 <project name="hbase" default="jar">
-  <property name="version" value="0.20.0-0.18.3"/>
+  <property name="version" value="0.20.2-0.18.3"/>
   <property name="Name" value="HBase"/>
   <property name="final.name" value="hbase-${version}"/>
   <property name="year" value="2009"/>
@@ -274,7 +274,7 @@
   <!-- Package                                                            -->
   <!-- ================================================================== -->
   <target name="package" depends="jar,javadoc" 
-      description="Build distribution; must define -Djava5.home and -Dforrest.home so can generate doc"> 
+      description="Build distribution; must define -Dforrest.home so can generate doc"> 
     <echo message="Be sure to run 'docs' target before this one else package will be missing site documentation" />
     <mkdir dir="${dist.dir}"/>
     <copy todir="${dist.dir}" includeEmptyDirs="false" flatten="true">
@@ -383,9 +383,7 @@
   <!-- ================================================================== -->
   <target name="docs" depends="forrest.check"
       description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home">
-    <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest" failonerror="true" >
-      <env key="JAVA_HOME" value="${java5.home}"/>
-    </exec>
+    <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest" failonerror="true" />
     <copy todir="${build.docs}">
       <fileset dir="${docs.src}/build/site/" />
     </copy>
@@ -397,14 +395,10 @@
     </copy>
   </target>
 
-  <target name="forrest.check" unless="forrest.home" depends="java5.check">
+  <target name="forrest.check" unless="forrest.home">
     <fail message="'forrest.home' is not defined. Please pass -Dforrest.home=&lt;base of Apache Forrest installation&gt; to Ant on the command-line." />
   </target>
 
-  <target name="java5.check" unless="java5.home">
-    <fail message="'java5.home' is not defined.  Forrest requires Java 5.  Please pass -Djava5.home=&lt;base of Java 5 distribution&gt; to Ant on the command-line." />
-  </target>
-
   <!-- Javadoc -->
   <target name="javadoc" description="Generate javadoc">
     <mkdir dir="${build.javadoc}"/>

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml Wed Nov 25 22:30:29 2009
@@ -385,15 +385,6 @@
       </description>
   </property>
   <property>
-    <name>hbase.hstore.blockCache.blockSize</name>
-    <value>16384</value>
-    <description>The size of each block in the block cache.
-    Enable blockcaching on a per column family basis; see the BLOCKCACHE setting
-    in HColumnDescriptor.  Blocks are kept in a java Soft Reference cache so are
-    let go when high pressure on memory.  Block caching is not enabled by default.
-    </description>
-  </property>
-  <property>
     <name>hbase.hash.type</name>
     <value>murmur</value>
     <description>The hashing algorithm for use in HashFunction. Two values are

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/log4j.properties?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/log4j.properties (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/log4j.properties Wed Nov 25 22:30:29 2009
@@ -42,5 +42,5 @@
 
 log4j.logger.org.apache.zookeeper=INFO
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-#log4j.logger.org.apache.hadoop.hbase=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
 #log4j.logger.org.apache.hadoop.dfs=DEBUG

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/lib/zookeeper-3.2.1.jar
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/lib/zookeeper-3.2.1.jar?rev=884310&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/lib/zookeeper-3.2.1.jar
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/build-contrib.xml?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/build-contrib.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/build-contrib.xml Wed Nov 25 22:30:29 2009
@@ -21,7 +21,7 @@
 
 <project name="hbasebuildcontrib">
 
-  <property name="version" value="0.20.0"/>
+  <property name="version" value="0.20.2-0.18.3"/>
   <property name="name" value="${ant.project.name}"/>
   <property name="root" value="${basedir}"/>
 

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/README.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/README.txt?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/README.txt (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/README.txt Wed Nov 25 22:30:29 2009
@@ -0,0 +1,70 @@
+HBase EC2
+
+This collection of scripts allows you to run HBase clusters on Amazon.com's Elastic Compute Cloud (EC2) service described at:
+
+  http://aws.amazon.com/ec2
+  
+To get help, type the following in a shell:
+  
+  bin/hbase-ec2
+
+You need both the EC2 API and AMI tools 
+
+  http://developer.amazonwebservices.com/connect/entry.jspa?externalID=351
+
+  http://developer.amazonwebservices.com/connect/entry.jspa?externalID=368&categoryID=88
+
+installed and on the path. 
+
+When setting up keypairs on EC2, be sure to name your keypair as 'root'. 
+
+Quick Start:
+
+1) Download and unzip the EC2 AMI and API tools zipfiles.
+
+   For Ubuntu, "apt-get install ec2-ami-tools ec2-api-tools".
+
+2) Put the tools on the path and set EC2_HOME in the environment to point to
+   the top level directory of the API tools.
+
+3) Configure src/contrib/ec2/bin/hbase-ec2-env.sh
+
+   Fill in AWS_ACCOUNT_ID with your EC2 account number.
+
+   Fill in AWS_ACCESS_KEY_ID with your EC2 access key.
+
+   Fill in AWS_SECRET_ACCESS_KEY with your EC2 secret access key.
+
+   Fill in EC2_PRIVATE_KEY with the location of your AWS private key file --
+   must begin with 'pk' and end with '.pem'.
+
+   Fill in EC2_CERT with the location of your AWS certificate -- must begin
+   with 'cert' and end with '.pem'.
+
+   Make sure the private part of your AWS SSH keypair exists in the same
+   directory as EC2_PRIVATE_KEY with the name id_rsa_root.
+
+4) ./bin/hbase-ec2 launch-cluster <name> <nr-zoos> <nr-slaves>, e.g
+
+       ./bin/hbase-ec2 launch-cluster testcluster 3 3
+
+5) Once the above command has finished without error, ./bin/hbase-ec2 login
+   <name>, e.g.
+
+       ./bin/hbase-ec2 login testcluster
+
+6) Check that the cluster is up and functional:
+
+       hbase shell
+       > status 'simple'
+
+   You should see something like:
+
+       3 live servers
+         domU-12-31-39-09-75-11.compute-1.internal:60020 1258653694915
+           requests=0, regions=1, usedHeap=29, maxHeap=987
+         domU-12-31-39-01-AC-31.compute-1.internal:60020 1258653709041
+           requests=0, regions=1, usedHeap=29, maxHeap=987
+         domU-12-31-39-01-B0-91.compute-1.internal:60020 1258653706411
+           requests=0, regions=0, usedHeap=27, maxHeap=987
+       0 dead servers

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster Wed Nov 25 22:30:29 2009
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run commands on master or specified node of a running HBase EC2 cluster.
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+  echo "Command required!"
+  exit 1
+fi
+
+# get arguments
+COMMAND="$1"
+shift
+# get group
+CLUSTER="$1"
+shift
+
+if [ -z $CLUSTER ]; then
+  echo "Cluster name or instance id required!"
+  exit 1
+fi
+
+# Import variables
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+. "$bin"/hbase-ec2-env.sh
+
+if [[ "$CLUSTER" = "i-*" ]]; then
+  HOST=`ec2-describe-instances $TOOL_OPTS $CLUSTER | grep running | awk '{print $4}'`
+  [ -z $HOST ] && echo "Instance still pending or no longer running: $CLUSTER" && exit 1
+else
+  [ ! -f $MASTER_IP_PATH ] && echo "Wrong group name, or cluster not launched! $CLUSTER" && exit 1
+  HOST=`cat $MASTER_IP_PATH`
+fi
+
+if [ "$COMMAND" = "login" ] ; then
+  echo "Logging in to host $HOST."
+  ssh $SSH_OPTS "root@$HOST"
+elif [ "$COMMAND" = "proxy" ] ; then
+  echo "Proxying to host $HOST via local port 6666"
+  echo "Gangia:     http://$HOST/ganglia"
+  echo "JobTracker: http://$HOST:50030/"
+  echo "NameNode:   http://$HOST:50070/"
+  ssh $SSH_OPTS -D 6666 -N "root@$HOST"
+elif [ "$COMMAND" = "push" ] ; then
+  echo "Pushing $1 to host $HOST."
+  scp $SSH_OPTS -r $1 "root@$HOST:"
+elif [ "$COMMAND" = "screen" ] ; then
+  echo "Logging in and attaching screen on host $HOST."
+  ssh $SSH_OPTS -t "root@$HOST" 'screen -D -R'
+else
+  echo "Executing command on host $HOST."
+  ssh $SSH_OPTS -t "root@$HOST" "$COMMAND"
+fi
\ No newline at end of file

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/cmd-hbase-cluster
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image Wed Nov 25 22:30:29 2009
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Create a HBase AMI.
+# Inspired by Jonathan Siegel's EC2 script (http://blogsiegel.blogspot.com/2006/08/sandboxing-amazon-ec2.html)
+
+# allow override of INSTANCE_TYPE from the command line 
+[ ! -z $1 ] && INSTANCE_TYPE=$1
+
+# Import variables
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+. "$bin"/hbase-ec2-env.sh
+
+echo "INSTANCE_TYPE is $INSTANCE_TYPE."
+echo "ARCH is $ARCH."
+
+AMI_IMAGE=`ec2-describe-images $TOOL_OPTS -a | grep $S3_BUCKET | grep hbase | grep $HBASE_VERSION | grep $ARCH | grep available | awk '{print $2}'`
+
+[ ! -z $AMI_IMAGE ] && echo "AMI already registered, use: ec2-deregister $AMI_IMAGE" && exit 1
+
+echo "Starting a AMI with ID $BASE_AMI_IMAGE."
+OUTPUT=`ec2-run-instances $BASE_AMI_IMAGE $TOOL_OPTS -k root -t $INSTANCE_TYPE`
+BOOTING_INSTANCE=`echo $OUTPUT | awk '{print $6}'`
+
+echo "Instance is $BOOTING_INSTANCE."
+
+echo "Polling server status"
+while true; do
+  printf "."
+  HOSTNAME=`ec2-describe-instances $TOOL_OPTS $BOOTING_INSTANCE | grep running | awk '{print $4}'`
+  if [ ! -z $HOSTNAME ]; then
+    break;
+  fi
+  sleep 1
+done
+echo "The server is available at $HOSTNAME."
+while true; do
+  REPLY=`ssh $SSH_OPTS "root@$HOSTNAME" 'echo "hello"'`
+  if [ ! -z $REPLY ]; then
+   break;
+  fi
+  sleep 5
+done
+
+echo "Copying scripts."
+
+# Copy setup scripts
+scp $SSH_OPTS "$bin"/hbase-ec2-env.sh "root@$HOSTNAME:/mnt"
+scp $SSH_OPTS "$bin"/image/create-hbase-image-remote "root@$HOSTNAME:/mnt"
+scp $SSH_OPTS "$bin"/image/ec2-run-user-data "root@$HOSTNAME:/etc/init.d"
+
+# Copy private key and certificate (for bundling image)
+scp $SSH_OPTS $EC2_PRIVATE_KEY "root@$HOSTNAME:/mnt"
+scp $SSH_OPTS $EC2_CERT "root@$HOSTNAME:/mnt"
+
+# Connect to it
+ssh $SSH_OPTS "root@$HOSTNAME" "sh -c \"INSTANCE_TYPE=$INSTANCE_TYPE /mnt/create-hbase-image-remote\""
+
+# Register image
+ec2-register $TOOL_OPTS $S3_BUCKET/hbase-$HBASE_VERSION-$ARCH.manifest.xml
+
+echo "Terminate with: ec2-terminate-instances $BOOTING_INSTANCE"

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/create-hbase-image
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster Wed Nov 25 22:30:29 2009
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Delete the groups an local files associated with a cluster.
+
+if [ -z $1 ]; then
+  echo "Cluster name required!"
+  exit 1
+fi
+
+CLUSTER=$1
+
+# Import variables
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+. "$bin"/hbase-ec2-env.sh
+
+# Finding HBase clusters
+CLUSTERS=`ec2-describe-instances $TOOL_OPTS | \
+  awk '"RESERVATION" == $1 && $4 ~ /-master$/, "INSTANCE" == $1' | tr '\n' '\t' | \
+  grep "$CLUSTER" | grep running | cut -f4 | rev | cut -d'-' -f2- | rev`
+  
+if [ -n "$CLUSTERS" ]; then
+  echo "Cluster $CLUSTER has running instances. Please terminate them first."
+  exit 0
+fi
+
+"$bin"/revoke-hbase-cluster-secgroups $CLUSTER
+
+rm -f $MASTER_IP_PATH
+rm -f $MASTER_PRIVATE_IP_PATH
+rm -f $MASTER_ZONE_PATH
+rm -f $ZOOKEEPER_QUORUM_PATH

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/delete-hbase-cluster
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2 (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2 Wed Nov 25 22:30:29 2009
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+  echo "Usage: hbase-ec2 COMMAND"
+  echo "where COMMAND is one of:"
+  echo "  list                                  list all running HBase EC2 clusters"
+  echo "  launch-cluster <name> <slaves> <zoos> launch a HBase cluster"
+  echo "  launch-zookeeper <name> <zoos>        launch the zookeeper quorum"
+  echo "  launch-master  <name>                 launch or find a cluster master"
+  echo "  launch-slaves  <name> <slaves>        launch the cluster slaves"
+  echo "  terminate-cluster  <name>             terminate all HBase EC2 instances"
+  echo "  delete-cluster <name>                 clean up after a terminated cluster"
+  echo "  login  <name|instance id>             login to the master node"
+  echo "  screen <name|instance id>             start or attach 'screen' on the master"
+  echo "  proxy  <name|instance id>             start a socks proxy on localhost:6666"
+  echo "  push   <name> <file>                  scp a file to the master node"
+  echo "  <shell cmd> <group|instance id>       execute a command on the master"
+  echo "  create-image                          create a HBase AMI"
+  exit 1
+fi
+
+# get arguments
+COMMAND="$1"
+shift
+
+if [ "$COMMAND" = "create-image" ] ; then
+  . "$bin"/create-hbase-image $*
+elif [ "$COMMAND" = "launch-cluster" ] ; then
+  . "$bin"/launch-hbase-cluster $*
+elif [ "$COMMAND" = "launch-zookeeper" ] ; then
+  . "$bin"/launch-hbase-zookeeper $*
+elif [ "$COMMAND" = "launch-master" ] ; then
+  . "$bin"/launch-hbase-master $*
+elif [ "$COMMAND" = "launch-slaves" ] ; then
+  . "$bin"/launch-hbase-slaves $*
+elif [ "$COMMAND" = "delete-cluster" ] ; then
+  . "$bin"/delete-hbase-cluster $*
+elif [ "$COMMAND" = "terminate-cluster" ] ; then
+  . "$bin"/terminate-hbase-cluster $*
+elif [ "$COMMAND" = "list" ] ; then
+  . "$bin"/list-hbase-clusters
+else
+  . "$bin"/cmd-hbase-cluster "$COMMAND" $*
+fi

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh Wed Nov 25 22:30:29 2009
@@ -0,0 +1,114 @@
+# Set environment variables for running Hbase on Amazon EC2 here. All are required.
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Your Amazon Account Number.
+AWS_ACCOUNT_ID=
+
+# Your Amazon AWS access key.
+AWS_ACCESS_KEY_ID=
+
+# Your Amazon AWS secret access key.
+AWS_SECRET_ACCESS_KEY=
+
+# Your AWS private key file -- must begin with 'pk' and end with '.pem'
+EC2_PRIVATE_KEY=
+
+# Your AWS certificate file -- must begin with 'cert' and end with '.pem'
+EC2_CERT=
+
+# Location of EC2 keys.
+# The default setting is probably OK if you set up EC2 following the Amazon Getting Started guide.
+EC2_KEYDIR=`dirname "$EC2_PRIVATE_KEY"`
+
+# Where your EC2 private key is stored (created when following the Amazon Getting Started guide).
+# You need to change this if you don't store this with your other EC2 keys.
+PRIVATE_KEY_PATH=`echo "$EC2_KEYDIR"/"id_rsa_root"`
+
+# SSH options used when connecting to EC2 instances.
+SSH_OPTS=`echo -q -i "$PRIVATE_KEY_PATH" -o StrictHostKeyChecking=no -o ServerAliveInterval=30`
+
+# Global tool options
+TOOL_OPTS=`echo -K "$EC2_PRIVATE_KEY" -C "$EC2_CERT"`
+
+# The version of HBase to use.
+HBASE_VERSION=0.20.2-0.18.3
+
+# The version of Hadoop to use.
+# HADOOP_VERSION=$HBASE_VERSION
+HADOOP_VERSION=0.18.3
+
+# The Amazon S3 bucket where the HBase AMI is stored.
+# The default value is for public images, so can be left if you are using running a public image.
+# Change this value only if you are creating your own (private) AMI
+# so you can store it in a bucket you own.
+S3_BUCKET=hbase-images
+
+# Enable public access web interfaces
+# XXX -- Generally, you do not want to do this
+ENABLE_WEB_PORTS=false
+
+# The script to run on instance boot.
+USER_DATA_FILE=hbase-ec2-init-remote.sh
+
+# Use only c1.xlarge unless you know what you are doing
+INSTANCE_TYPE=${INSTANCE_TYPE:-c1.xlarge}
+
+# Use only c1.medium unless you know what you are doing
+ZOO_INSTANCE_TYPE=${ZOO_INSTANCE_TYPE:-c1.medium}
+
+# The EC2 group master name. CLUSTER is set by calling scripts
+CLUSTER_MASTER=$CLUSTER-master
+
+# Cached values for a given cluster
+MASTER_PRIVATE_IP_PATH=~/.hbase-private-$CLUSTER_MASTER
+MASTER_IP_PATH=~/.hbase-$CLUSTER_MASTER
+MASTER_ZONE_PATH=~/.hbase-zone-$CLUSTER_MASTER
+
+# The Zookeeper EC2 group name. CLUSTER is set by calling scripts.
+CLUSTER_ZOOKEEPER=$CLUSTER-zookeeper
+ZOOKEEPER_QUORUM_PATH=~/.hbase-quorum-$CLUSTER_ZOOKEEPER
+
+#
+# The following variables are only used when creating an AMI.
+#
+
+# The version number of the installed JDK.
+JAVA_VERSION=1.6.0_16
+
+# SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
+# The download URL for the Sun JDK. Visit http://java.sun.com/javase/downloads/index.jsp and get the URL for the "Linux self-extracting file".
+if [ "$INSTANCE_TYPE" = "m1.small" -o "$INSTANCE_TYPE" = "c1.medium" ]; then
+  ARCH='i386'
+  BASE_AMI_IMAGE="ami-48aa4921"  # ec2-public-images/fedora-8-i386-base-v1.10.manifest.xml
+  #AMI_IMAGE="ami-c644a7af"
+  JAVA_BINARY_URL='http://iridiant.s3.amazonaws.com/jdk/jdk-6u16-linux-i586.bin'
+else
+  ARCH='x86_64'
+  BASE_AMI_IMAGE="ami-f61dfd9f"  # ec2-public-images/fedora-8-x86_64-base-v1.10.manifest.xml
+  #AMI_IMAGE="ami-f244a79b"
+  JAVA_BINARY_URL='http://iridiant.s3.amazonaws.com/jdk/jdk-6u16-linux-x64.bin'
+fi
+
+if [ "$ZOO_INSTANCE_TYPE" = "m1.small" -o "$ZOO_INSTANCE_TYPE" = "c1.medium" ]; then
+  ZOO_ARCH='i386'
+  #ZOO_AMI_IMAGE="ami-c644a7af"
+  ZOO_AMI_IMAGE="ami-c644a7af"
+else
+  ZOO_ARCH='x86_64'
+  #ZOO_AMI_IMAGE="ami-f244a79b"
+  ZOO_AMI_IMAGE="ami-f244a79b"
+fi

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh Wed Nov 25 22:30:29 2009
@@ -0,0 +1,217 @@
+#!/usr/bin/env bash
+
+###############################################################################
+# Script that is run on each EC2 instance on boot. It is passed in the EC2 user
+# data, so should not exceed 16K in size.
+###############################################################################
+
+MASTER_HOST=%MASTER_HOST%
+ZOOKEEPER_QUORUM=%ZOOKEEPER_QUORUM%
+SECURITY_GROUPS=`wget -q -O - http://169.254.169.254/latest/meta-data/security-groups`
+IS_MASTER=`echo $SECURITY_GROUPS | awk '{ a = match ($0, "-master$"); if (a) print "true"; else print "false"; }'`
+if [ "$IS_MASTER" = "true" ]; then
+ MASTER_HOST=`wget -q -O - http://169.254.169.254/latest/meta-data/local-hostname`
+fi
+HADOOP_HOME=`ls -d /usr/local/hadoop-*`
+HBASE_HOME=`ls -d /usr/local/hbase-*`
+
+###############################################################################
+# Hadoop configuration
+###############################################################################
+
+cat > $HADOOP_HOME/conf/core-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>hadoop.tmp.dir</name>
+  <value>/mnt/hadoop</value>
+</property>
+<property>
+  <name>fs.default.name</name>
+  <value>hdfs://$MASTER_HOST:8020</value>
+</property>
+</configuration>
+EOF
+
+cat > $HADOOP_HOME/conf/hdfs-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>fs.default.name</name>
+  <value>hdfs://$MASTER_HOST:8020</value>
+</property>
+</configuration>
+EOF
+
+cat > $HADOOP_HOME/conf/mapred-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>mapred.job.tracker</name>
+  <value>$MASTER_HOST:8021</value>
+</property>
+</configuration>
+EOF
+
+# Update classpath to include HBase jars and config
+cat >> $HADOOP_HOME/conf/hadoop-env.sh <<EOF
+HADOOP_CLASSPATH="$HBASE_HOME/hbase-${HBASE_VERSION}.jar:$HBASE_HOME/lib/AgileJSON-2009-03-30.jar:$HBASE_HOME/lib/json.jar:$HBASE_HOME/lib/zookeeper-3.2.1.jar:$HBASE_HOME/conf"
+EOF
+
+# Configure Hadoop for Ganglia
+# overwrite hadoop-metrics.properties
+cat > $HADOOP_HOME/conf/hadoop-metrics.properties <<EOF
+dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+dfs.period=10
+dfs.servers=$MASTER_HOST:8649
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.period=10
+jvm.servers=$MASTER_HOST:8649
+mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+mapred.period=10
+mapred.servers=$MASTER_HOST:8649
+EOF
+
+###############################################################################
+# HBase configuration
+###############################################################################
+
+cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>hbase.rootdir</name>
+  <value>hdfs://$MASTER_HOST:8020/hbase</value>
+</property>
+<property>
+  <name>hbase.cluster.distributed</name>
+  <value>true</value>
+</property>
+<property>
+  <name>hbase.zookeeper.quorum</name>
+  <value>$ZOOKEEPER_QUORUM</value>
+</property>
+<property>
+  <name>hbase.regionserver.handler.count</name>
+  <value>100</value>
+</property>
+<property>
+  <name>hbase.hregion.memstore.block.multiplier</name>
+  <value>3</value>
+</property>
+<property>
+  <name>hbase.hstore.blockingStoreFiles</name>
+  <value>15</value>
+</property>
+<property>
+  <name>dfs.replication</name>
+  <value>3</value>
+</property>
+<property>
+  <name>dfs.client.block.write.retries</name>
+  <value>100</value>
+</property>
+<property>
+  <name>zookeeper.session.timeout</name>
+  <value>60000</value>
+</property>
+<property>
+  <name>hbase.tmp.dir</name>
+  <value>/mnt/hbase</value>
+</property>
+</configuration>
+EOF
+
+# Override JVM options
+cat >> $HBASE_HOME/conf/hbase-env.sh <<EOF
+export HBASE_MASTER_OPTS="-XX:+UseConcMarkSweepGC -XX:+DoEscapeAnalysis -XX:+AggressiveOpts -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:/mnt/hbase/logs/hbase-master-gc.log"
+export HBASE_REGIONSERVER_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=88 -XX:+DoEscapeAnalysis -XX:+AggressiveOpts -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:/mnt/hbase/logs/hbase-regionserver-gc.log"
+EOF
+
+# Configure HBase for Ganglia
+# overwrite hadoop-metrics.properties
+cat > $HBASE_HOME/conf/hadoop-metrics.properties <<EOF
+dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+dfs.period=10
+dfs.servers=$MASTER_HOST:8649
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.period=10
+hbase.servers=$MASTER_HOST:8649
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.period=10
+jvm.servers=$MASTER_HOST:8649
+EOF
+
+###############################################################################
+# Start services
+###############################################################################
+
+# up open file descriptor limits
+echo "root soft nofile 32768" >> /etc/security/limits.conf
+echo "root hard nofile 32768" >> /etc/security/limits.conf
+
+# up epoll limits
+# ok if this fails, only valid for kernels 2.6.27+
+sysctl -w fs.epoll.max_user_instances=32768 > /dev/null 2>&1
+
+mkdir -p /mnt/hadoop/logs
+mkdir -p /mnt/hbase/logs
+
+[ ! -f /etc/hosts ] &&  echo "127.0.0.1 localhost" > /etc/hosts
+
+# not set on boot
+export USER="root"
+
+if [ "$IS_MASTER" = "true" ]; then
+  # MASTER
+  # Prep Ganglia
+  sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
+         -e "s|\( *bind *=.*\)|#\1|" \
+         -e "s|\( *mute *=.*\)|  mute = yes|" \
+         -e "s|\( *location *=.*\)|  location = \"master-node\"|" \
+         /etc/gmond.conf
+  mkdir -p /mnt/ganglia/rrds
+  chown -R ganglia:ganglia /mnt/ganglia/rrds
+  rm -rf /var/lib/ganglia; cd /var/lib; ln -s /mnt/ganglia ganglia; cd
+  service gmond start
+  service gmetad start
+  apachectl start
+
+  # only format on first boot
+  [ ! -e /mnt/hadoop/dfs ] && "$HADOOP_HOME"/bin/hadoop namenode -format
+
+  "$HADOOP_HOME"/bin/hadoop-daemon.sh start namenode
+
+  "$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
+
+  "$HADOOP_HOME"/bin/hadoop-daemon.sh start jobtracker
+
+  sleep 10
+
+  "$HBASE_HOME"/bin/hbase-daemon.sh start master
+
+else
+
+  # SLAVE
+
+  # Prep Ganglia
+  sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
+         -e "s|\( *bind *=.*\)|#\1|" \
+         -e "s|\(udp_send_channel {\)|\1\n  host=$MASTER_HOST|" \
+         /etc/gmond.conf
+  service gmond start
+
+  "$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
+
+  "$HBASE_HOME"/bin/hbase-daemon.sh start regionserver
+
+  "$HADOOP_HOME"/bin/hadoop-daemon.sh start tasktracker
+
+fi
+
+# Run this script on next boot
+rm -f /var/ec2/ec2-run-user-data.*

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-zookeeper-remote.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-zookeeper-remote.sh?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-zookeeper-remote.sh (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-zookeeper-remote.sh Wed Nov 25 22:30:29 2009
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+# ZOOKEEPER_QUORUM set in the environment by the caller
+HBASE_HOME=`ls -d /usr/local/hbase-*`
+
+###############################################################################
+# HBase configuration (Zookeeper)
+###############################################################################
+
+cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+  <name>hbase.zookeeper.quorum</name>
+  <value>$ZOOKEEPER_QUORUM</value>
+</property>
+<property>
+  <name>zookeeper.session.timeout</name>
+  <value>60000</value>
+</property>
+<property>
+  <name>hbase.zookeeper.property.dataDir</name>
+  <value>/mnt/hbase/zk</value>
+</property>
+<property>
+  <name>hbase.zookeeper.property.maxClientCnxns</name>
+  <value>100</value>
+</property>
+</configuration>
+EOF
+
+###############################################################################
+# Start services
+###############################################################################
+
+# up open file descriptor limits
+echo "root soft nofile 32768" >> /etc/security/limits.conf
+echo "root hard nofile 32768" >> /etc/security/limits.conf
+
+# up epoll limits
+# ok if this fails, only valid for kernels 2.6.27+
+sysctl -w fs.epoll.max_user_instance=32768 > /dev/null 2>&1
+
+mkdir -p /mnt/hbase/logs
+mkdir -p /mnt/hbase/zk
+
+[ ! -f /etc/hosts ] &&  echo "127.0.0.1 localhost" > /etc/hosts
+
+"$HBASE_HOME"/bin/hbase-daemon.sh start zookeeper

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote Wed Nov 25 22:30:29 2009
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Create a Hbase AMI. Runs on the EC2 instance.
+
+# Import variables
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+. "$bin"/hbase-ec2-env.sh
+
+echo "Remote: INSTANCE_TYPE is $INSTANCE_TYPE."
+echo "Remote: ARCH is $ARCH."
+
+# Remove environment script since it contains sensitive information
+rm -f "$bin"/hbase-ec2-env.sh
+
+# Install Java
+echo "Downloading and installing java binary."
+cd /usr/local
+wget -nv -O java.bin $JAVA_BINARY_URL
+sh java.bin
+rm -f java.bin
+
+# Install tools
+echo "Installing rpms."
+yum -y update
+yum -y install rsync lynx screen ganglia-gmetad ganglia-gmond ganglia-web httpd php
+yum -y clean all
+
+# Install Hadoop
+echo "Installing Hadoop $HADOOP_VERSION."
+cd /usr/local
+wget -nv http://archive.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
+[ ! -f hadoop-$HADOOP_VERSION.tar.gz ] && wget -nv http://www.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
+tar xzf hadoop-$HADOOP_VERSION.tar.gz
+rm -f hadoop-$HADOOP_VERSION.tar.gz
+
+# Configure Hadoop
+sed -i \
+  -e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
+  -e 's|# export HADOOP_LOG_DIR=.*|export HADOOP_LOG_DIR=/mnt/hadoop/logs|' \
+  -e 's|# export HADOOP_SLAVE_SLEEP=.*|export HADOOP_SLAVE_SLEEP=1|' \
+  -e 's|# export HADOOP_OPTS=.*|export HADOOP_OPTS=-server|' \
+  /usr/local/hadoop-$HADOOP_VERSION/conf/hadoop-env.sh
+
+# Install HBase
+echo "Installing HBase $HBASE_VERSION."
+cd /usr/local
+wget -nv http://iridiant.s3.amazonaws.com/hbase/hbase-$HBASE_VERSION.tar.gz
+tar xzf hbase-$HBASE_VERSION.tar.gz
+rm -f hbase-$HBASE_VERSION.tar.gz
+
+# Configure HBase
+sed -i \
+  -e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
+  -e 's|# export HBASE_OPTS=.*|export HBASE_OPTS="$HBASE_OPTS -server -XX:+UseConcMarkSweepGC -XX:+DoEscapeAnalysis -XX:+AggressiveOpts"|' \
+  -e 's|# export HBASE_LOG_DIR=.*|export HBASE_LOG_DIR=/mnt/hbase/logs|' \
+  -e 's|# export HBASE_SLAVE_SLEEP=.*|export HBASE_SLAVE_SLEEP=1|' \
+  /usr/local/hbase-$HBASE_VERSION/conf/hbase-env.sh
+
+# Run user data as script on instance startup
+chmod +x /etc/init.d/ec2-run-user-data
+echo "/etc/init.d/ec2-run-user-data" >> /etc/rc.d/rc.local
+
+# Setup root user bash environment
+echo "export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}" >> /root/.bash_profile
+echo "export HADOOP_HOME=/usr/local/hadoop-${HADOOP_VERSION}" >> /root/.bash_profile
+echo "export HBASE_HOME=/usr/local/hbase-${HBASE_VERSION}" >> /root/.bash_profile
+echo 'export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HBASE_HOME/bin:$PATH' >> /root/.bash_profile
+
+# Configure networking.
+# Delete SSH authorized_keys since it includes the key it was launched with. (Note that it is re-populated when an instance starts.)
+rm -f /root/.ssh/authorized_keys
+# Ensure logging in to new hosts is seamless.
+echo '    StrictHostKeyChecking no' >> /etc/ssh/ssh_config
+
+# Bundle and upload image
+cd ~root
+# Don't need to delete .bash_history since it isn't written until exit.
+df -h
+ec2-bundle-vol -d /mnt -k /mnt/pk*.pem -c /mnt/cert*.pem -u $AWS_ACCOUNT_ID -s 3072 -p hbase-$HBASE_VERSION-$ARCH -r $ARCH
+
+ec2-upload-bundle -b $S3_BUCKET -m /mnt/hbase-$HBASE_VERSION-$ARCH.manifest.xml -a $AWS_ACCESS_KEY_ID -s $AWS_SECRET_ACCESS_KEY
+
+# End
+echo Done

Propchange: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/create-hbase-image-remote
------------------------------------------------------------------------------
    svn:executable = *

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/ec2-run-user-data
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/ec2-run-user-data?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/ec2-run-user-data (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/image/ec2-run-user-data Wed Nov 25 22:30:29 2009
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# ec2-run-user-data - Run instance user-data if it looks like a script.
+#
+# Only retrieves and runs the user-data script once per instance.  If
+# you want the user-data script to run again (e.g., on the next boot)
+# then add this command in the user-data script:
+#   rm -f /var/ec2/ec2-run-user-data.*
+#
+# History:
+#   2008-05-16 Eric Hammond <ehammond@thinksome.com>
+#   - Initial version including code from Kim Scheibel, Jorge Oliveira
+#   2008-08-06 Tom White
+#   - Updated to use mktemp on fedora
+#
+
+prog=$(basename $0)
+logger="logger -t $prog"
+curl="curl --retry 3 --silent --show-error --fail"
+instance_data_url=http://169.254.169.254/2008-02-01
+
+# Wait until networking is up on the EC2 instance.
+perl -MIO::Socket::INET -e '
+ until(new IO::Socket::INET("169.254.169.254:80")){print"Waiting for network...\n";sleep 1}
+' | $logger
+
+# Exit if we have already run on this instance (e.g., previous boot).
+ami_id=$($curl $instance_data_url/meta-data/ami-id)
+been_run_file=/var/ec2/$prog.$ami_id
+mkdir -p $(dirname $been_run_file)
+if [ -f $been_run_file ]; then
+  $logger < $been_run_file
+  exit
+fi
+
+# Retrieve the instance user-data and run it if it looks like a script
+user_data_file=`mktemp -t ec2-user-data.XXXXXXXXXX`
+chmod 700 $user_data_file
+$logger "Retrieving user-data"
+$curl -o $user_data_file $instance_data_url/user-data 2>&1 | $logger
+if [ ! -s $user_data_file ]; then
+  $logger "No user-data available"
+elif head -1 $user_data_file | egrep -v '^#!'; then
+  $logger "Skipping user-data as it does not begin with #!"
+else
+  $logger "Running user-data"
+  echo "user-data has already been run on this instance" > $been_run_file
+  $user_data_file 2>&1 | logger -t "user-data"
+  $logger "user-data exit code: $?"
+fi
+rm -f $user_data_file



Mime
View raw message