Return-Path: Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: (qmail 10449 invoked from network); 13 Jun 2010 06:54:49 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 13 Jun 2010 06:54:49 -0000 Received: (qmail 39851 invoked by uid 500); 13 Jun 2010 06:54:49 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 39817 invoked by uid 500); 13 Jun 2010 06:54:48 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 39736 invoked by uid 99); 13 Jun 2010 06:54:46 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 13 Jun 2010 06:54:46 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 13 Jun 2010 06:54:38 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id C7427238897D; Sun, 13 Jun 2010 06:53:51 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r954175 [1/2] - in /hbase/trunk: ./ bin/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/avro/generated/ src/test/java/org/apache/hadoop/hbase/avro/ Date: Sun, 13 Jun 2010 06:53:51 -0000 To: commits@hbase.apache.org From: rawson@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20100613065351.C7427238897D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: rawson Date: Sun Jun 13 06:53:50 2010 New Revision: 954175 URL: http://svn.apache.org/viewvc?rev=954175&view=rev Log: HBASE-2400 new connector for Avro RPC access to HBase cluster Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/package.html hbase/trunk/src/test/java/org/apache/hadoop/hbase/avro/ hbase/trunk/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java Modified: hbase/trunk/CHANGES.txt hbase/trunk/bin/hbase hbase/trunk/pom.xml Modified: hbase/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=954175&r1=954174&r2=954175&view=diff ============================================================================== --- hbase/trunk/CHANGES.txt (original) +++ hbase/trunk/CHANGES.txt Sun Jun 13 06:53:50 2010 @@ -728,6 +728,8 @@ Release 0.21.0 - Unreleased HBASE-2588 Add easier way to ship HBase dependencies to MR cluster within Job HBASE-1923 Bulk incremental load into an existing table HBASE-2579 Add atomic checkAndDelete support (Michael Dalton via Stack) + HBASE-2400 new connector for Avro RPC access to HBase cluster + (Jeff Hammerbacher via Ryan Rawson) OPTIMIZATIONS HBASE-410 [testing] Speed up the test suite Modified: hbase/trunk/bin/hbase URL: http://svn.apache.org/viewvc/hbase/trunk/bin/hbase?rev=954175&r1=954174&r2=954175&view=diff ============================================================================== --- hbase/trunk/bin/hbase (original) +++ hbase/trunk/bin/hbase Sun Jun 13 06:53:50 2010 @@ -70,6 +70,7 @@ if [ $# = 0 ]; then echo " master run an HBase HMaster node" echo " regionserver run an HBase HRegionServer node" echo " thrift run an HBase Thrift server" + echo " avro run an HBase Avro server" echo " zookeeper run a Zookeeper server" echo " migrate upgrade an hbase.rootdir" echo " or" @@ -237,6 +238,11 @@ elif [ "$COMMAND" = "thrift" ] ; then if [ "$1" != "stop" ] ; then HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS" fi +elif [ "$COMMAND" = "avro" ] ; then + CLASS='org.apache.hadoop.hbase.avro.AvroServer' + if [ "$1" != "stop" ] ; then + HBASE_OPTS="$HBASE_OPTS $HBASE_AVRO_OPTS" + fi elif [ "$COMMAND" = "migrate" ] ; then CLASS='org.apache.hadoop.hbase.util.Migrate' elif [ "$COMMAND" = "zookeeper" ] ; then Modified: hbase/trunk/pom.xml URL: http://svn.apache.org/viewvc/hbase/trunk/pom.xml?rev=954175&r1=954174&r2=954175&view=diff ============================================================================== --- hbase/trunk/pom.xml (original) +++ hbase/trunk/pom.xml Sun Jun 13 06:53:50 2010 @@ -730,6 +730,33 @@ 1.0.1 + + + org.slf4j + slf4j-log4j12 + 1.4.3 + + + org.apache.hadoop + avro + 1.3.2 + + + org.slf4j + slf4j-api + 1.4.3 + + + org.codehaus.jackson + jackson-core-asl + 1.5.2 + + + org.codehaus.jackson + jackson-mapper-asl + 1.5.2 + + junit Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,580 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.avro; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; + +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericArray; +import org.apache.avro.generic.GenericData; +import org.apache.avro.ipc.HttpServer; +import org.apache.avro.specific.SpecificResponder; +import org.apache.avro.util.Utf8; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.HTablePool; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.hadoop.hbase.avro.generated.AClusterStatus; +import org.apache.hadoop.hbase.avro.generated.AColumnValue; +import org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm; +import org.apache.hadoop.hbase.avro.generated.ADelete; +import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor; +import org.apache.hadoop.hbase.avro.generated.AGet; +import org.apache.hadoop.hbase.avro.generated.AIllegalArgument; +import org.apache.hadoop.hbase.avro.generated.AIOError; +import org.apache.hadoop.hbase.avro.generated.AMasterNotRunning; +import org.apache.hadoop.hbase.avro.generated.APut; +import org.apache.hadoop.hbase.avro.generated.AResult; +import org.apache.hadoop.hbase.avro.generated.AScan; +import org.apache.hadoop.hbase.avro.generated.ATableDescriptor; +import org.apache.hadoop.hbase.avro.generated.ATableExists; +import org.apache.hadoop.hbase.avro.generated.HBase; + +/** + * Start an Avro server + */ +public class AvroServer { + + /** + * The HBaseImpl is a glue object that connects Avro RPC calls to the + * HBase client API primarily defined in the HBaseAdmin and HTable objects. + */ + public static class HBaseImpl implements HBase { + // + // PROPERTIES + // + protected Configuration conf = null; + protected HBaseAdmin admin = null; + protected HTablePool htablePool = null; + protected final Log LOG = LogFactory.getLog(this.getClass().getName()); + + // nextScannerId and scannerMap are used to manage scanner state + protected int nextScannerId = 0; + protected HashMap scannerMap = null; + + // + // UTILITY METHODS + // + + /** + * Assigns a unique ID to the scanner and adds the mapping to an internal + * hash-map. + * + * @param scanner + * @return integer scanner id + */ + protected synchronized int addScanner(ResultScanner scanner) { + int id = nextScannerId++; + scannerMap.put(id, scanner); + return id; + } + + /** + * Returns the scanner associated with the specified ID. + * + * @param id + * @return a Scanner, or null if ID was invalid. + */ + protected synchronized ResultScanner getScanner(int id) { + return scannerMap.get(id); + } + + /** + * Removes the scanner associated with the specified ID from the internal + * id->scanner hash-map. + * + * @param id + * @return a Scanner, or null if ID was invalid. + */ + protected synchronized ResultScanner removeScanner(int id) { + return scannerMap.remove(id); + } + + // + // CTOR METHODS + // + + // TODO(hammer): figure out appropriate setting of maxSize for htablePool + /** + * Constructs an HBaseImpl object. + * + * @throws MasterNotRunningException + */ + HBaseImpl() throws MasterNotRunningException { + conf = HBaseConfiguration.create(); + admin = new HBaseAdmin(conf); + htablePool = new HTablePool(conf, 10); + scannerMap = new HashMap(); + } + + // + // SERVICE METHODS + // + + // TODO(hammer): Investigate use of the Command design pattern + + // + // Cluster metadata + // + + public Utf8 getHBaseVersion() throws AIOError { + try { + return new Utf8(admin.getClusterStatus().getHBaseVersion()); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public AClusterStatus getClusterStatus() throws AIOError { + try { + return AvroUtil.csToACS(admin.getClusterStatus()); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public GenericArray listTables() throws AIOError { + try { + HTableDescriptor[] tables = admin.listTables(); + Schema atdSchema = Schema.createArray(ATableDescriptor.SCHEMA$); + GenericData.Array result = null; + result = new GenericData.Array(tables.length, atdSchema); + for (HTableDescriptor table : tables) { + result.add(AvroUtil.htdToATD(table)); + } + return result; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // + // Table metadata + // + + // TODO(hammer): Handle the case where the table does not exist explicitly? + public ATableDescriptor describeTable(ByteBuffer table) throws AIOError { + try { + return AvroUtil.htdToATD(admin.getTableDescriptor(Bytes.toBytes(table))); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public boolean isTableEnabled(ByteBuffer table) throws AIOError { + try { + return admin.isTableEnabled(Bytes.toBytes(table)); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public boolean tableExists(ByteBuffer table) throws AIOError { + try { + return admin.tableExists(Bytes.toBytes(table)); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // + // Family metadata + // + + // TODO(hammer): Handle the case where the family does not exist explicitly? + public AFamilyDescriptor describeFamily(ByteBuffer table, ByteBuffer family) throws AIOError { + try { + HTableDescriptor htd = admin.getTableDescriptor(Bytes.toBytes(table)); + return AvroUtil.hcdToAFD(htd.getFamily(Bytes.toBytes(family))); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // + // Table admin + // + + public Void createTable(ATableDescriptor table) throws AIOError, + AIllegalArgument, + ATableExists, + AMasterNotRunning { + try { + admin.createTable(AvroUtil.atdToHTD(table)); + return null; + } catch (IllegalArgumentException e) { + AIllegalArgument iae = new AIllegalArgument(); + iae.message = new Utf8(e.getMessage()); + throw iae; + } catch (TableExistsException e) { + ATableExists tee = new ATableExists(); + tee.message = new Utf8(e.getMessage()); + throw tee; + } catch (MasterNotRunningException e) { + AMasterNotRunning mnre = new AMasterNotRunning(); + mnre.message = new Utf8(e.getMessage()); + throw mnre; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // Note that disable, flush and major compaction of .META. needed in client + // TODO(hammer): more selective cache dirtying than flush? + public Void deleteTable(ByteBuffer table) throws AIOError { + try { + admin.deleteTable(Bytes.toBytes(table)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // NB: Asynchronous operation + public Void modifyTable(ByteBuffer tableName, ATableDescriptor tableDescriptor) throws AIOError { + try { + admin.modifyTable(Bytes.toBytes(tableName), + AvroUtil.atdToHTD(tableDescriptor)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public Void enableTable(ByteBuffer table) throws AIOError { + try { + admin.enableTable(Bytes.toBytes(table)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public Void disableTable(ByteBuffer table) throws AIOError { + try { + admin.disableTable(Bytes.toBytes(table)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // NB: Asynchronous operation + public Void flush(ByteBuffer table) throws AIOError { + try { + admin.flush(Bytes.toBytes(table)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // NB: Asynchronous operation + public Void split(ByteBuffer table) throws AIOError { + try { + admin.split(Bytes.toBytes(table)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // + // Family admin + // + + public Void addFamily(ByteBuffer table, AFamilyDescriptor family) throws AIOError { + try { + admin.addColumn(Bytes.toBytes(table), + AvroUtil.afdToHCD(family)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // NB: Asynchronous operation + public Void deleteFamily(ByteBuffer table, ByteBuffer family) throws AIOError { + try { + admin.deleteColumn(Bytes.toBytes(table), Bytes.toBytes(family)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // NB: Asynchronous operation + public Void modifyFamily(ByteBuffer table, ByteBuffer familyName, AFamilyDescriptor familyDescriptor) throws AIOError { + try { + admin.modifyColumn(Bytes.toBytes(table), Bytes.toBytes(familyName), + AvroUtil.afdToHCD(familyDescriptor)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + // + // Single-row DML + // + + // TODO(hammer): Java with statement for htablepool concision? + // TODO(hammer): Can Get have timestamp and timerange simultaneously? + // TODO(hammer): Do I need to catch the RuntimeException of getTable? + // TODO(hammer): Handle gets with no results + // TODO(hammer): Uses exists(Get) to ensure columns exist + public AResult get(ByteBuffer table, AGet aget) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + return AvroUtil.resultToAResult(htable.get(AvroUtil.agetToGet(aget))); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + public boolean exists(ByteBuffer table, AGet aget) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + return htable.exists(AvroUtil.agetToGet(aget)); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + public Void put(ByteBuffer table, APut aput) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + htable.put(AvroUtil.aputToPut(aput)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + public Void delete(ByteBuffer table, ADelete adelete) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + htable.delete(AvroUtil.adeleteToDelete(adelete)); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + public long incrementColumnValue(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, long amount, boolean writeToWAL) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + return htable.incrementColumnValue(Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qualifier), amount, writeToWAL); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + // + // Multi-row DML + // + + public int scannerOpen(ByteBuffer table, AScan ascan) throws AIOError { + HTableInterface htable = htablePool.getTable(Bytes.toBytes(table)); + try { + Scan scan = AvroUtil.ascanToScan(ascan); + return addScanner(htable.getScanner(scan)); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } finally { + htablePool.putTable(htable); + } + } + + public Void scannerClose(int scannerId) throws AIOError, AIllegalArgument { + try { + ResultScanner scanner = getScanner(scannerId); + if (scanner == null) { + AIllegalArgument aie = new AIllegalArgument(); + aie.message = new Utf8("scanner ID is invalid: " + scannerId); + throw aie; + } + scanner.close(); + removeScanner(scannerId); + return null; + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + + public GenericArray scannerGetRows(int scannerId, int numberOfRows) throws AIOError, AIllegalArgument { + try { + ResultScanner scanner = getScanner(scannerId); + if (scanner == null) { + AIllegalArgument aie = new AIllegalArgument(); + aie.message = new Utf8("scanner ID is invalid: " + scannerId); + throw aie; + } + Result[] results = null; + return AvroUtil.resultsToAResults(scanner.next(numberOfRows)); + } catch (IOException e) { + AIOError ioe = new AIOError(); + ioe.message = new Utf8(e.getMessage()); + throw ioe; + } + } + } + + // + // MAIN PROGRAM + // + + private static void printUsageAndExit() { + printUsageAndExit(null); + } + + private static void printUsageAndExit(final String message) { + if (message != null) { + System.err.println(message); + } + System.out.println("Usage: java org.apache.hadoop.hbase.avro.AvroServer " + + "--help | [--port=PORT] start"); + System.out.println("Arguments:"); + System.out.println(" start Start Avro server"); + System.out.println(" stop Stop Avro server"); + System.out.println("Options:"); + System.out.println(" port Port to listen on. Default: 9090"); + System.out.println(" help Print this message and exit"); + System.exit(0); + } + + // TODO(hammer): Figure out a better way to keep the server alive! + protected static void doMain(final String[] args) throws Exception { + if (args.length < 1) { + printUsageAndExit(); + } + int port = 9090; + final String portArgKey = "--port="; + for (String cmd: args) { + if (cmd.startsWith(portArgKey)) { + port = Integer.parseInt(cmd.substring(portArgKey.length())); + continue; + } else if (cmd.equals("--help") || cmd.equals("-h")) { + printUsageAndExit(); + } else if (cmd.equals("start")) { + continue; + } else if (cmd.equals("stop")) { + printUsageAndExit("To shutdown the Avro server run " + + "bin/hbase-daemon.sh stop avro or send a kill signal to " + + "the Avro server pid"); + } + + // Print out usage if we get to here. + printUsageAndExit(); + } + Log LOG = LogFactory.getLog("AvroServer"); + LOG.info("starting HBase Avro server on port " + Integer.toString(port)); + SpecificResponder r = new SpecificResponder(HBase.class, new HBaseImpl()); + HttpServer server = new HttpServer(r, 9090); + Thread.sleep(1000000); + } + + // TODO(hammer): Look at Cassandra's daemonization and integration with JSVC + // TODO(hammer): Don't eat it after a single exception + // TODO(hammer): Figure out why we do doMain() + // TODO(hammer): Figure out if we want String[] or String [] syntax + public static void main(String[] args) throws Exception { + doMain(args); + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,413 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.avro; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.hfile.Compression; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.avro.generated.AClusterStatus; +import org.apache.hadoop.hbase.avro.generated.AColumn; +import org.apache.hadoop.hbase.avro.generated.AColumnValue; +import org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm; +import org.apache.hadoop.hbase.avro.generated.ADelete; +import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor; +import org.apache.hadoop.hbase.avro.generated.AGet; +import org.apache.hadoop.hbase.avro.generated.AIllegalArgument; +import org.apache.hadoop.hbase.avro.generated.APut; +import org.apache.hadoop.hbase.avro.generated.ARegionLoad; +import org.apache.hadoop.hbase.avro.generated.AResult; +import org.apache.hadoop.hbase.avro.generated.AResultEntry; +import org.apache.hadoop.hbase.avro.generated.AScan; +import org.apache.hadoop.hbase.avro.generated.AServerAddress; +import org.apache.hadoop.hbase.avro.generated.AServerInfo; +import org.apache.hadoop.hbase.avro.generated.AServerLoad; +import org.apache.hadoop.hbase.avro.generated.ATableDescriptor; + +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericArray; +import org.apache.avro.generic.GenericData; +import org.apache.avro.util.Utf8; + +public class AvroUtil { + + // + // Cluster metadata + // + + static public AServerAddress hsaToASA(HServerAddress hsa) throws IOException { + AServerAddress asa = new AServerAddress(); + asa.hostname = new Utf8(hsa.getHostname()); + asa.inetSocketAddress = new Utf8(hsa.getInetSocketAddress().toString()); + asa.port = hsa.getPort(); + return asa; + } + + static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException { + ARegionLoad arl = new ARegionLoad(); + arl.memStoreSizeMB = rl.getMemStoreSizeMB(); + arl.name = ByteBuffer.wrap(rl.getName()); + arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB(); + arl.storefiles = rl.getStorefiles(); + arl.storefileSizeMB = rl.getStorefileSizeMB(); + arl.stores = rl.getStores(); + return arl; + } + + static public AServerLoad hslToASL(HServerLoad hsl) throws IOException { + AServerLoad asl = new AServerLoad(); + asl.load = hsl.getLoad(); + asl.maxHeapMB = hsl.getMaxHeapMB(); + asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB(); + asl.numberOfRegions = hsl.getNumberOfRegions(); + asl.numberOfRequests = hsl.getNumberOfRequests(); + + Collection regionLoads = hsl.getRegionsLoad(); + Schema s = Schema.createArray(ARegionLoad.SCHEMA$); + GenericData.Array aregionLoads = null; + if (regionLoads != null) { + aregionLoads = new GenericData.Array(regionLoads.size(), s); + for (HServerLoad.RegionLoad rl : regionLoads) { + aregionLoads.add(hrlToARL(rl)); + } + } else { + aregionLoads = new GenericData.Array(0, s); + } + asl.regionsLoad = aregionLoads; + + asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB(); + asl.storefiles = hsl.getStorefiles(); + asl.storefileSizeInMB = hsl.getStorefileSizeInMB(); + asl.usedHeapMB = hsl.getUsedHeapMB(); + return asl; + } + + static public AServerInfo hsiToASI(HServerInfo hsi) throws IOException { + AServerInfo asi = new AServerInfo(); + asi.infoPort = hsi.getInfoPort(); + asi.load = hslToASL(hsi.getLoad()); + asi.serverAddress = hsaToASA(hsi.getServerAddress()); + asi.serverName = new Utf8(hsi.getServerName()); + asi.startCode = hsi.getStartCode(); + return asi; + } + + static public AClusterStatus csToACS(ClusterStatus cs) throws IOException { + AClusterStatus acs = new AClusterStatus(); + acs.averageLoad = cs.getAverageLoad(); + Collection deadServerNames = cs.getDeadServerNames(); + Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING)); + GenericData.Array adeadServerNames = null; + if (deadServerNames != null) { + adeadServerNames = new GenericData.Array(deadServerNames.size(), stringArraySchema); + for (String deadServerName : deadServerNames) { + adeadServerNames.add(new Utf8(deadServerName)); + } + } else { + adeadServerNames = new GenericData.Array(0, stringArraySchema); + } + acs.deadServerNames = adeadServerNames; + acs.deadServers = cs.getDeadServers(); + acs.hbaseVersion = new Utf8(cs.getHBaseVersion()); + acs.regionsCount = cs.getRegionsCount(); + acs.requestsCount = cs.getRequestsCount(); + Collection hserverInfos = cs.getServerInfo(); + Schema s = Schema.createArray(AServerInfo.SCHEMA$); + GenericData.Array aserverInfos = null; + if (hserverInfos != null) { + aserverInfos = new GenericData.Array(hserverInfos.size(), s); + for (HServerInfo hsi : hserverInfos) { + aserverInfos.add(hsiToASI(hsi)); + } + } else { + aserverInfos = new GenericData.Array(0, s); + } + acs.serverInfos = aserverInfos; + acs.servers = cs.getServers(); + return acs; + } + + // + // Table metadata + // + + static public ATableDescriptor htdToATD(HTableDescriptor table) throws IOException { + ATableDescriptor atd = new ATableDescriptor(); + atd.name = ByteBuffer.wrap(table.getName()); + Collection families = table.getFamilies(); + Schema afdSchema = Schema.createArray(AFamilyDescriptor.SCHEMA$); + GenericData.Array afamilies = null; + if (families.size() > 0) { + afamilies = new GenericData.Array(families.size(), afdSchema); + for (HColumnDescriptor hcd : families) { + AFamilyDescriptor afamily = hcdToAFD(hcd); + afamilies.add(afamily); + } + } else { + afamilies = new GenericData.Array(0, afdSchema); + } + atd.families = afamilies; + atd.maxFileSize = table.getMaxFileSize(); + atd.memStoreFlushSize = table.getMemStoreFlushSize(); + atd.rootRegion = table.isRootRegion(); + atd.metaRegion = table.isMetaRegion(); + atd.metaTable = table.isMetaTable(); + atd.readOnly = table.isReadOnly(); + atd.deferredLogFlush = table.isDeferredLogFlush(); + return atd; + } + + static public HTableDescriptor atdToHTD(ATableDescriptor atd) throws IOException, AIllegalArgument { + HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(atd.name)); + if (atd.families != null && atd.families.size() > 0) { + for (AFamilyDescriptor afd : atd.families) { + htd.addFamily(afdToHCD(afd)); + } + } + if (atd.maxFileSize != null) { + htd.setMaxFileSize(atd.maxFileSize); + } + if (atd.memStoreFlushSize != null) { + htd.setMemStoreFlushSize(atd.memStoreFlushSize); + } + if (atd.readOnly != null) { + htd.setReadOnly(atd.readOnly); + } + if (atd.deferredLogFlush != null) { + htd.setDeferredLogFlush(atd.deferredLogFlush); + } + if (atd.rootRegion != null || atd.metaRegion != null || atd.metaTable != null) { + AIllegalArgument aie = new AIllegalArgument(); + aie.message = new Utf8("Can't set root or meta flag on create table."); + throw aie; + } + return htd; + } + + // + // Family metadata + // + + static public AFamilyDescriptor hcdToAFD(HColumnDescriptor hcd) throws IOException { + AFamilyDescriptor afamily = new AFamilyDescriptor(); + afamily.name = ByteBuffer.wrap(hcd.getName()); + String compressionAlgorithm = hcd.getCompressionType().getName(); + if (compressionAlgorithm == "LZO") { + afamily.compression = ACompressionAlgorithm.LZO; + } else if (compressionAlgorithm == "GZ") { + afamily.compression = ACompressionAlgorithm.GZ; + } else { + afamily.compression = ACompressionAlgorithm.NONE; + } + afamily.maxVersions = hcd.getMaxVersions(); + afamily.blocksize = hcd.getBlocksize(); + afamily.inMemory = hcd.isInMemory(); + afamily.timeToLive = hcd.getTimeToLive(); + afamily.blockCacheEnabled = hcd.isBlockCacheEnabled(); + return afamily; + } + + static public HColumnDescriptor afdToHCD(AFamilyDescriptor afd) throws IOException { + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(afd.name)); + + ACompressionAlgorithm compressionAlgorithm = afd.compression; + if (compressionAlgorithm == ACompressionAlgorithm.LZO) { + hcd.setCompressionType(Compression.Algorithm.LZO); + } else if (compressionAlgorithm == ACompressionAlgorithm.GZ) { + hcd.setCompressionType(Compression.Algorithm.GZ); + } else { + hcd.setCompressionType(Compression.Algorithm.NONE); + } + + if (afd.maxVersions != null) { + hcd.setMaxVersions(afd.maxVersions); + } + + if (afd.blocksize != null) { + hcd.setBlocksize(afd.blocksize); + } + + if (afd.inMemory != null) { + hcd.setInMemory(afd.inMemory); + } + + if (afd.timeToLive != null) { + hcd.setTimeToLive(afd.timeToLive); + } + + if (afd.blockCacheEnabled != null) { + hcd.setBlockCacheEnabled(afd.blockCacheEnabled); + } + return hcd; + } + + // + // Single-Row DML (Get) + // + + // TODO(hammer): More concise idiom than if not null assign? + static public Get agetToGet(AGet aget) throws IOException { + Get get = new Get(Bytes.toBytes(aget.row)); + if (aget.columns != null) { + for (AColumn acolumn : aget.columns) { + if (acolumn.qualifier != null) { + get.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier)); + } else { + get.addFamily(Bytes.toBytes(acolumn.family)); + } + } + } + if (aget.timestamp != null) { + get.setTimeStamp(aget.timestamp); + } + if (aget.timerange != null) { + get.setTimeRange(aget.timerange.minStamp, aget.timerange.maxStamp); + } + if (aget.maxVersions != null) { + get.setMaxVersions(aget.maxVersions); + } + return get; + } + + // TODO(hammer): Pick one: Timestamp or TimeStamp + static public AResult resultToAResult(Result result) { + AResult aresult = new AResult(); + aresult.row = ByteBuffer.wrap(result.getRow()); + Schema s = Schema.createArray(AResultEntry.SCHEMA$); + GenericData.Array entries = null; + List resultKeyValues = result.list(); + if (resultKeyValues != null && resultKeyValues.size() > 0) { + entries = new GenericData.Array(resultKeyValues.size(), s); + for (KeyValue resultKeyValue : resultKeyValues) { + AResultEntry entry = new AResultEntry(); + entry.family = ByteBuffer.wrap(resultKeyValue.getFamily()); + entry.qualifier = ByteBuffer.wrap(resultKeyValue.getQualifier()); + entry.value = ByteBuffer.wrap(resultKeyValue.getValue()); + entry.timestamp = resultKeyValue.getTimestamp(); + entries.add(entry); + } + } else { + entries = new GenericData.Array(0, s); + } + aresult.entries = entries; + return aresult; + } + + // + // Single-Row DML (Put) + // + + static public Put aputToPut(APut aput) throws IOException { + Put put = new Put(Bytes.toBytes(aput.row)); + for (AColumnValue acv : aput.columnValues) { + if (acv.timestamp != null) { + put.add(Bytes.toBytes(acv.family), + Bytes.toBytes(acv.qualifier), + acv.timestamp, + Bytes.toBytes(acv.value)); + } else { + put.add(Bytes.toBytes(acv.family), + Bytes.toBytes(acv.qualifier), + Bytes.toBytes(acv.value)); + } + } + return put; + } + + // + // Single-Row DML (Delete) + // + + static public Delete adeleteToDelete(ADelete adelete) throws IOException { + Delete delete = new Delete(Bytes.toBytes(adelete.row)); + if (adelete.columns != null) { + for (AColumn acolumn : adelete.columns) { + if (acolumn.qualifier != null) { + delete.deleteColumns(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier)); + } else { + delete.deleteFamily(Bytes.toBytes(acolumn.family)); + } + } + } + return delete; + } + + // + // Multi-row DML (Scan) + // + + static public Scan ascanToScan(AScan ascan) throws IOException { + Scan scan = new Scan(); + if (ascan.startRow != null) { + scan.setStartRow(Bytes.toBytes(ascan.startRow)); + } + if (ascan.stopRow != null) { + scan.setStopRow(Bytes.toBytes(ascan.stopRow)); + } + if (ascan.columns != null) { + for (AColumn acolumn : ascan.columns) { + if (acolumn.qualifier != null) { + scan.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier)); + } else { + scan.addFamily(Bytes.toBytes(acolumn.family)); + } + } + } + if (ascan.timestamp != null) { + scan.setTimeStamp(ascan.timestamp); + } + if (ascan.timerange != null) { + scan.setTimeRange(ascan.timerange.minStamp, ascan.timerange.maxStamp); + } + if (ascan.maxVersions != null) { + scan.setMaxVersions(ascan.maxVersions); + } + return scan; + } + + // TODO(hammer): Better to return null or empty array? + static public GenericArray resultsToAResults(Result[] results) { + Schema s = Schema.createArray(AResult.SCHEMA$); + GenericData.Array aresults = null; + if (results != null && results.length > 0) { + aresults = new GenericData.Array(results.length, s); + for (Result result : results) { + aresults.add(resultToAResult(result)); + } + } else { + aresults = new GenericData.Array(0, s); + } + return aresults; + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,21 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AAlreadyExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AAlreadyExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); + public org.apache.avro.util.Utf8 message; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return message; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: message = (org.apache.avro.util.Utf8)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,42 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AClusterStatus extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AClusterStatus\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"} ,{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}}},{\"name\":\"servers\",\"type\":\"int\"}]}"); + public double averageLoad; + public org.apache.avro.generic.GenericArray deadServerNames; + public int deadServers; + public org.apache.avro.util.Utf8 hbaseVersion; + public int regionsCount; + public int requestsCount; + public org.apache.avro.generic.GenericArray serverInfos; + public int servers; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return averageLoad; + case 1: return deadServerNames; + case 2: return deadServers; + case 3: return hbaseVersion; + case 4: return regionsCount; + case 5: return requestsCount; + case 6: return serverInfos; + case 7: return servers; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: averageLoad = (java.lang.Double)value$; break; + case 1: deadServerNames = (org.apache.avro.generic.GenericArray)value$; break; + case 2: deadServers = (java.lang.Integer)value$; break; + case 3: hbaseVersion = (org.apache.avro.util.Utf8)value$; break; + case 4: regionsCount = (java.lang.Integer)value$; break; + case 5: requestsCount = (java.lang.Integer)value$; break; + case 6: serverInfos = (org.apache.avro.generic.GenericArray)value$; break; + case 7: servers = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,24 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AColumn extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumn\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}"); + public java.nio.ByteBuffer family; + public java.nio.ByteBuffer qualifier; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return family; + case 1: return qualifier; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: family = (java.nio.ByteBuffer)value$; break; + case 1: qualifier = (java.nio.ByteBuffer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,42 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AColumnFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]}},{\"name\":\"maxVersions\",\"type\":\"int\"},{\"name\":\"blocksize\",\"type\":\"int\"},{\"name\":\"inMemory\",\"type\":\"boolean\"},{\"name\":\"timeToLive\",\"type\":\"int\"},{\"name\":\"blockCacheEnabled\",\"type\":\"boolean\"},{\"name\":\"bloomfilterEnabled\",\"type\":\"boolean\"}]}"); + public java.nio.ByteBuffer name; + public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression; + public int maxVersions; + public int blocksize; + public boolean inMemory; + public int timeToLive; + public boolean blockCacheEnabled; + public boolean bloomfilterEnabled; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return name; + case 1: return compression; + case 2: return maxVersions; + case 3: return blocksize; + case 4: return inMemory; + case 5: return timeToLive; + case 6: return blockCacheEnabled; + case 7: return bloomfilterEnabled; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: name = (java.nio.ByteBuffer)value$; break; + case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break; + case 2: maxVersions = (java.lang.Integer)value$; break; + case 3: blocksize = (java.lang.Integer)value$; break; + case 4: inMemory = (java.lang.Boolean)value$; break; + case 5: timeToLive = (java.lang.Integer)value$; break; + case 6: blockCacheEnabled = (java.lang.Boolean)value$; break; + case 7: bloomfilterEnabled = (java.lang.Boolean)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,30 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AColumnValue extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnValue\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}"); + public java.nio.ByteBuffer family; + public java.nio.ByteBuffer qualifier; + public java.nio.ByteBuffer value; + public java.lang.Long timestamp; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return family; + case 1: return qualifier; + case 2: return value; + case 3: return timestamp; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: family = (java.nio.ByteBuffer)value$; break; + case 1: qualifier = (java.nio.ByteBuffer)value$; break; + case 2: value = (java.nio.ByteBuffer)value$; break; + case 3: timestamp = (java.lang.Long)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,6 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public enum ACompressionAlgorithm { + LZO, GZ, NONE +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,24 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class ADelete extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ADelete\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]}]}"); + public java.nio.ByteBuffer row; + public org.apache.avro.generic.GenericArray columns; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return row; + case 1: return columns; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: row = (java.nio.ByteBuffer)value$; break; + case 1: columns = (org.apache.avro.generic.GenericArray)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,39 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}"); + public java.nio.ByteBuffer name; + public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression; + public java.lang.Integer maxVersions; + public java.lang.Integer blocksize; + public java.lang.Boolean inMemory; + public java.lang.Integer timeToLive; + public java.lang.Boolean blockCacheEnabled; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return name; + case 1: return compression; + case 2: return maxVersions; + case 3: return blocksize; + case 4: return inMemory; + case 5: return timeToLive; + case 6: return blockCacheEnabled; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: name = (java.nio.ByteBuffer)value$; break; + case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break; + case 2: maxVersions = (java.lang.Integer)value$; break; + case 3: blocksize = (java.lang.Integer)value$; break; + case 4: inMemory = (java.lang.Boolean)value$; break; + case 5: timeToLive = (java.lang.Integer)value$; break; + case 6: blockCacheEnabled = (java.lang.Boolean)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,33 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AGet extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AGet\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}"); + public java.nio.ByteBuffer row; + public org.apache.avro.generic.GenericArray columns; + public java.lang.Long timestamp; + public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange; + public java.lang.Integer maxVersions; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return row; + case 1: return columns; + case 2: return timestamp; + case 3: return timerange; + case 4: return maxVersions; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: row = (java.nio.ByteBuffer)value$; break; + case 1: columns = (org.apache.avro.generic.GenericArray)value$; break; + case 2: timestamp = (java.lang.Long)value$; break; + case 3: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break; + case 4: maxVersions = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,21 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AIOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); + public org.apache.avro.util.Utf8 message; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return message; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: message = (org.apache.avro.util.Utf8)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,21 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AIllegalArgument extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); + public org.apache.avro.util.Utf8 message; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return message; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: message = (org.apache.avro.util.Utf8)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,21 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AMasterNotRunning extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); + public org.apache.avro.util.Utf8 message; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return message; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: message = (org.apache.avro.util.Utf8)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,24 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class APut extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"APut\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}}}]}"); + public java.nio.ByteBuffer row; + public org.apache.avro.generic.GenericArray columnValues; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return row; + case 1: return columnValues; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: row = (java.nio.ByteBuffer)value$; break; + case 1: columnValues = (org.apache.avro.generic.GenericArray)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,36 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class ARegionLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ARegionLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}"); + public int memStoreSizeMB; + public java.nio.ByteBuffer name; + public int storefileIndexSizeMB; + public int storefiles; + public int storefileSizeMB; + public int stores; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return memStoreSizeMB; + case 1: return name; + case 2: return storefileIndexSizeMB; + case 3: return storefiles; + case 4: return storefileSizeMB; + case 5: return stores; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: memStoreSizeMB = (java.lang.Integer)value$; break; + case 1: name = (java.nio.ByteBuffer)value$; break; + case 2: storefileIndexSizeMB = (java.lang.Integer)value$; break; + case 3: storefiles = (java.lang.Integer)value$; break; + case 4: storefileSizeMB = (java.lang.Integer)value$; break; + case 5: stores = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,24 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AResult extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResult\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}}}]}"); + public java.nio.ByteBuffer row; + public org.apache.avro.generic.GenericArray entries; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return row; + case 1: return entries; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: row = (java.nio.ByteBuffer)value$; break; + case 1: entries = (org.apache.avro.generic.GenericArray)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,30 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AResultEntry extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResultEntry\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}"); + public java.nio.ByteBuffer family; + public java.nio.ByteBuffer qualifier; + public java.nio.ByteBuffer value; + public long timestamp; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return family; + case 1: return qualifier; + case 2: return value; + case 3: return timestamp; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: family = (java.nio.ByteBuffer)value$; break; + case 1: qualifier = (java.nio.ByteBuffer)value$; break; + case 2: value = (java.nio.ByteBuffer)value$; break; + case 3: timestamp = (java.lang.Long)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,36 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AScan extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AScan\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}"); + public java.nio.ByteBuffer startRow; + public java.nio.ByteBuffer stopRow; + public org.apache.avro.generic.GenericArray columns; + public java.lang.Long timestamp; + public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange; + public java.lang.Integer maxVersions; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return startRow; + case 1: return stopRow; + case 2: return columns; + case 3: return timestamp; + case 4: return timerange; + case 5: return maxVersions; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: startRow = (java.nio.ByteBuffer)value$; break; + case 1: stopRow = (java.nio.ByteBuffer)value$; break; + case 2: columns = (org.apache.avro.generic.GenericArray)value$; break; + case 3: timestamp = (java.lang.Long)value$; break; + case 4: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break; + case 5: maxVersions = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,27 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AServerAddress extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerAddress\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}"); + public org.apache.avro.util.Utf8 hostname; + public org.apache.avro.util.Utf8 inetSocketAddress; + public int port; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return hostname; + case 1: return inetSocketAddress; + case 2: return port; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: hostname = (org.apache.avro.util.Utf8)value$; break; + case 1: inetSocketAddress = (org.apache.avro.util.Utf8)value$; break; + case 2: port = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,33 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AServerInfo extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerInfo\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"ty pe\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}"); + public int infoPort; + public org.apache.hadoop.hbase.avro.generated.AServerLoad load; + public org.apache.hadoop.hbase.avro.generated.AServerAddress serverAddress; + public org.apache.avro.util.Utf8 serverName; + public long startCode; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return infoPort; + case 1: return load; + case 2: return serverAddress; + case 3: return serverName; + case 4: return startCode; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: infoPort = (java.lang.Integer)value$; break; + case 1: load = (org.apache.hadoop.hbase.avro.generated.AServerLoad)value$; break; + case 2: serverAddress = (org.apache.hadoop.hbase.avro.generated.AServerAddress)value$; break; + case 3: serverName = (org.apache.avro.util.Utf8)value$; break; + case 4: startCode = (java.lang.Long)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +} Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java?rev=954175&view=auto ============================================================================== --- hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java (added) +++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java Sun Jun 13 06:53:50 2010 @@ -0,0 +1,48 @@ +package org.apache.hadoop.hbase.avro.generated; + +@SuppressWarnings("all") +public class AServerLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeap MB\",\"type\":\"int\"}]}"); + public int load; + public int maxHeapMB; + public int memStoreSizeInMB; + public int numberOfRegions; + public int numberOfRequests; + public org.apache.avro.generic.GenericArray regionsLoad; + public int storefileIndexSizeInMB; + public int storefiles; + public int storefileSizeInMB; + public int usedHeapMB; + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return load; + case 1: return maxHeapMB; + case 2: return memStoreSizeInMB; + case 3: return numberOfRegions; + case 4: return numberOfRequests; + case 5: return regionsLoad; + case 6: return storefileIndexSizeInMB; + case 7: return storefiles; + case 8: return storefileSizeInMB; + case 9: return usedHeapMB; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: load = (java.lang.Integer)value$; break; + case 1: maxHeapMB = (java.lang.Integer)value$; break; + case 2: memStoreSizeInMB = (java.lang.Integer)value$; break; + case 3: numberOfRegions = (java.lang.Integer)value$; break; + case 4: numberOfRequests = (java.lang.Integer)value$; break; + case 5: regionsLoad = (org.apache.avro.generic.GenericArray)value$; break; + case 6: storefileIndexSizeInMB = (java.lang.Integer)value$; break; + case 7: storefiles = (java.lang.Integer)value$; break; + case 8: storefileSizeInMB = (java.lang.Integer)value$; break; + case 9: usedHeapMB = (java.lang.Integer)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } +}