Return-Path: Delivered-To: apmail-hadoop-hbase-commits-archive@minotaur.apache.org Received: (qmail 31815 invoked from network); 7 May 2010 19:28:36 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 7 May 2010 19:28:36 -0000 Received: (qmail 54469 invoked by uid 500); 7 May 2010 19:28:36 -0000 Delivered-To: apmail-hadoop-hbase-commits-archive@hadoop.apache.org Received: (qmail 54447 invoked by uid 500); 7 May 2010 19:28:36 -0000 Mailing-List: contact hbase-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hbase-dev@hadoop.apache.org Delivered-To: mailing list hbase-commits@hadoop.apache.org Received: (qmail 54440 invoked by uid 99); 7 May 2010 19:28:36 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 07 May 2010 19:28:36 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED,T_FILL_THIS_FORM_SHORT X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 07 May 2010 19:28:21 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id E32BD2388B43; Fri, 7 May 2010 19:26:56 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r942186 [11/18] - in /hadoop/hbase/trunk: ./ contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/ core/src/main/java/org/apache/hadoop/hbase/ core/src/main/java/org/apache/hadoop/hbase/client/ core/src/main/java/org/apache/... Date: Fri, 07 May 2010 19:26:51 -0000 To: hbase-commits@hadoop.apache.org From: stack@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20100507192656.E32BD2388B43@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java Fri May 7 19:26:45 2010 @@ -106,7 +106,7 @@ public class IOError extends Exception i // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -271,7 +271,7 @@ public class IOError extends Exception i while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -282,7 +282,7 @@ public class IOError extends Exception i case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java Fri May 7 19:26:45 2010 @@ -105,7 +105,7 @@ public class IllegalArgument extends Exc // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -270,7 +270,7 @@ public class IllegalArgument extends Exc while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -281,7 +281,7 @@ public class IllegalArgument extends Exc case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java Fri May 7 19:26:45 2010 @@ -112,11 +112,11 @@ public class Mutation implements TBase metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IS_DELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, + put(_Fields.IS_DELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -411,7 +411,7 @@ public class Mutation implements TBase metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -343,7 +343,7 @@ public class TCell implements TBase metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.START_KEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, + put(_Fields.START_KEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.END_KEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, + put(_Fields.END_KEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, + put(_Fields.VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BYTE))); }}); @@ -546,7 +546,7 @@ public class TRegionInfo implements TBas while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -557,14 +557,14 @@ public class TRegionInfo implements TBas case START_KEY: if (field.type == TType.STRING) { this.startKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case END_KEY: if (field.type == TType.STRING) { this.endKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -572,14 +572,14 @@ public class TRegionInfo implements TBas if (field.type == TType.I64) { this.id = iprot.readI64(); setIdIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case NAME: if (field.type == TType.STRING) { this.name = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -587,7 +587,7 @@ public class TRegionInfo implements TBas if (field.type == TType.BYTE) { this.version = iprot.readByte(); setVersionIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java Fri May 7 19:26:45 2010 @@ -107,11 +107,11 @@ public class TRowResult implements TBase // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), new StructMetaData(TType.STRUCT, TCell.class)))); }}); @@ -336,7 +336,7 @@ public class TRowResult implements TBase while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -347,7 +347,7 @@ public class TRowResult implements TBase case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -367,7 +367,7 @@ public class TRowResult implements TBase } iprot.readMapEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java Fri May 7 19:26:45 2010 @@ -137,7 +137,7 @@ public class Base64 { /** * Encode using Base64-like encoding that is URL and Filename safe as - * described in Section 4 of RFC3548: + * described in Section 4 of RFC3548: * * http://www.faqs.org/rfcs/rfc3548.html. * It is important to note that data encoded this way is not @@ -155,7 +155,7 @@ public class Base64 { public final static int ORDERED = 32; /* ******** P R I V A T E F I E L D S ******** */ - + private static final Log LOG = LogFactory.getLog(Base64.class); /** Maximum line length (76) of Base64 output. */ @@ -286,8 +286,8 @@ public class Base64 { * exactly the same as the input value. It is described in the RFC change * request: * http://www.faqs.org/qa/rfcc-1940.html. - * - * It replaces "plus" and "slash" with "hyphen" and "underscore" and + * + * It replaces "plus" and "slash" with "hyphen" and "underscore" and * rearranges the alphabet so that the characters are in their natural sort * order. */ @@ -353,7 +353,7 @@ public class Base64 { } else if ((options & ORDERED) == ORDERED) { return _ORDERED_ALPHABET; - + } else { return _STANDARD_ALPHABET; } @@ -370,10 +370,10 @@ public class Base64 { protected static byte[] getDecodabet(int options) { if ((options & URL_SAFE) == URL_SAFE) { return _URL_SAFE_DECODABET; - + } else if ((options & ORDERED) == ORDERED) { return _ORDERED_DECODABET; - + } else { return _STANDARD_DECODABET; } @@ -384,9 +384,9 @@ public class Base64 { /** * Main program. Used for testing. - * + * * Encodes or decodes two files from the command line - * + * * @param args command arguments */ public static void main(String[] args) { @@ -411,7 +411,7 @@ public class Base64 { /** * Prints command line usage. - * + * * @param msg A message to include with usage info. */ private static void usage(String msg) { @@ -427,7 +427,7 @@ public class Base64 { * significant bytes in your array is given by numSigBytes. The * array threeBytes needs only be as big as numSigBytes. * Code can reuse a byte array by passing a four-byte array as b4. - * + * * @param b4 A reusable byte array to reduce array instantiation * @param threeBytes the array to convert * @param numSigBytes the number of significant bytes in your array @@ -454,7 +454,7 @@ public class Base64 { *

* This is the lowest level of the encoding methods with all possible * parameters. - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param numSigBytes the number of significant bytes in your array @@ -468,7 +468,7 @@ public class Base64 { int numSigBytes, byte[] destination, int destOffset, int options) { byte[] ALPHABET = getAlphabet(options); - // 1 2 3 + // 1 2 3 // 01234567890123456789012345678901 Bit position // --------000000001111111122222222 Array position from threeBytes // --------| || || || | Six bit groups to index ALPHABET @@ -516,7 +516,7 @@ public class Base64 { * serialized object. If the object cannot be serialized or there is another * error, the method will return null. The object is not * GZip-compressed before being encoded. - * + * * @param serializableObject The object to encode * @return The Base64-encoded object * @since 1.4 @@ -541,7 +541,7 @@ public class Base64 { *

* Example: * encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param serializableObject The object to encode * @param options Specified options * @see Base64#GZIP @@ -569,11 +569,11 @@ public class Base64 { } catch (UnsupportedEncodingException uue) { return new String(baos.toByteArray()); - + } catch (IOException e) { LOG.error("error encoding object", e); return null; - + } finally { if (oos != null) { try { @@ -599,7 +599,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @return encoded byte array * @since 1.4 @@ -617,13 +617,13 @@ public class Base64 { *

  • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
  • * - * + * *

    * Example: encodeBytes( myData, Base64.GZIP ) or *

    * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param options Specified options * @see Base64#GZIP @@ -639,7 +639,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -659,13 +659,13 @@ public class Base64 { *

  • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
  • * - * + * *

    * Example: encodeBytes( myData, Base64.GZIP ) or *

    * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -684,7 +684,7 @@ public class Base64 { GZIPOutputStream gzos = null; try { - gzos = + gzos = new GZIPOutputStream(new Base64OutputStream(baos, ENCODE | options)); gzos.write(source, off, len); @@ -698,7 +698,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding byte array", e); return null; - + } finally { if (gzos != null) { try { @@ -715,7 +715,7 @@ public class Base64 { } // end finally } // end Compress - + // Don't compress. Better not to use streams at all then. boolean breakLines = ((options & DONT_BREAK_LINES) == 0); @@ -770,7 +770,7 @@ public class Base64 { * This is the lowest level of the decoding methods with all possible * parameters. *

    - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param destination the array to hold the conversion @@ -796,7 +796,7 @@ public class Base64 { destination[destOffset] = (byte) (outBuff >>> 16); return 1; - + } else if (source[srcOffset + 3] == EQUALS_SIGN) { // Example: DkL= // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) @@ -829,9 +829,9 @@ public class Base64 { destination[destOffset + 2] = (byte) (outBuff); return 3; - + } catch (Exception e) { - LOG.error("error decoding bytes at " + source[srcOffset] + ": " + + LOG.error("error decoding bytes at " + source[srcOffset] + ": " + (DECODABET[source[srcOffset]]) + ", " + source[srcOffset + 1] + ": " + (DECODABET[source[srcOffset + 1]]) + ", " + source[srcOffset + 2] + ": " + (DECODABET[source[srcOffset + 2]]) + @@ -846,7 +846,7 @@ public class Base64 { * Very low-level access to decoding ASCII characters in the form of a byte * array. Does not support automatically gunzipping or any other "fancy" * features. - * + * * @param source The Base64 encoded data * @param off The offset of where to begin decoding * @param len The length of characters to decode @@ -899,7 +899,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @return the decoded data * @since 1.4 @@ -911,7 +911,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @param options options for decode * @see Base64#URL_SAFE @@ -977,7 +977,7 @@ public class Base64 { /** * Attempts to decode Base64 data and deserialize a Java Object within. * Returns null if there was an error. - * + * * @param encodedObject The Base64 data to decode * @return The decoded and deserialized object * @since 1.5 @@ -1013,11 +1013,11 @@ public class Base64 { /** * Convenience method for encoding data to a file. - * + * * @param dataToEncode byte array of data to encode in base64 form * @param filename Filename for saving encoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean encodeToFile(byte[] dataToEncode, String filename) { @@ -1031,7 +1031,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding file: " + filename, e); success = false; - + } finally { if (bos != null) { try { @@ -1047,11 +1047,11 @@ public class Base64 { /** * Convenience method for decoding data to a file. - * + * * @param dataToDecode Base64-encoded data as a string * @param filename Filename for saving decoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean decodeToFile(String dataToDecode, String filename) { @@ -1061,7 +1061,7 @@ public class Base64 { bos = new Base64OutputStream(new FileOutputStream(filename), DECODE); bos.write(dataToDecode.getBytes(PREFERRED_ENCODING)); success = true; - + } catch (IOException e) { LOG.error("error decoding to file: " + filename, e); success = false; @@ -1081,10 +1081,10 @@ public class Base64 { /** * Convenience method for reading a base64-encoded file and decoding it. - * + * * @param filename Filename for reading encoded data * @return decoded byte array or null if unsuccessful - * + * * @since 2.1 */ public static byte[] decodeFromFile(String filename) { @@ -1096,33 +1096,33 @@ public class Base64 { // Check the size of file if (file.length() > Integer.MAX_VALUE) { - LOG.fatal("File is too big for this convenience method (" + + LOG.fatal("File is too big for this convenience method (" + file.length() + " bytes)."); return null; } // end if: file too big for int index - + buffer = new byte[(int) file.length()]; // Open a stream - + bis = new Base64InputStream(new BufferedInputStream( new FileInputStream(file)), DECODE); // Read until done - + int length = 0; for (int numBytes; (numBytes = bis.read(buffer, length, 4096)) >= 0; ) { length += numBytes; } - + // Save in a variable to return - + decodedData = new byte[length]; System.arraycopy(buffer, 0, decodedData, 0, length); } catch (IOException e) { LOG.error("Error decoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1138,10 +1138,10 @@ public class Base64 { /** * Convenience method for reading a binary file and base64-encoding it. - * + * * @param filename Filename for reading binary data * @return base64-encoded string or null if unsuccessful - * + * * @since 2.1 */ public static String encodeFromFile(String filename) { @@ -1149,9 +1149,9 @@ public class Base64 { Base64InputStream bis = null; try { File file = new File(filename); - + // Need max() for math on small files (v2.2.1) - + byte[] buffer = new byte[Math.max((int) (file.length() * 1.4), 40)]; // Open a stream @@ -1166,12 +1166,12 @@ public class Base64 { } // Save in a variable to return - + encodedData = new String(buffer, 0, length, PREFERRED_ENCODING); } catch (IOException e) { LOG.error("Error encoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1187,7 +1187,7 @@ public class Base64 { /** * Reads infile and encodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1198,7 +1198,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(encoded.getBytes("US-ASCII")); // Strict, 7-bit output. - + } catch (IOException e) { LOG.error("error encoding from file " + infile + " to " + outfile, e); @@ -1215,7 +1215,7 @@ public class Base64 { /** * Reads infile and decodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1226,7 +1226,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(decoded); - + } catch (IOException e) { LOG.error("error decoding from file " + infile + " to " + outfile, e); @@ -1247,7 +1247,7 @@ public class Base64 { * A {@link Base64.Base64InputStream} will read data from another * InputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1264,7 +1264,7 @@ public class Base64 { /** * Constructs a {@link Base64InputStream} in DECODE mode. - * + * * @param in the InputStream from which to read data. * @since 1.3 */ @@ -1276,18 +1276,18 @@ public class Base64 { * Constructs a {@link Base64.Base64InputStream} in either ENCODE or DECODE mode. *

    * Valid options: - * + * *

          *   ENCODE or DECODE: Encode or Decode as data is read.
          *   DONT_BREAK_LINES: don't break lines at 76 characters
          *     (only meaningful when encoding)
          *     <i>Note: Technically, this makes your encoding non-compliant.</i>
          * 
    - * + * *

    * Example: new Base64.Base64InputStream( in, Base64.DECODE ) - * - * + * + * * @param in the InputStream from which to read data. * @param options Specified options * @see Base64#ENCODE @@ -1311,7 +1311,7 @@ public class Base64 { /** * Reads enough of the input stream to convert to/from Base64 and returns * the next byte. - * + * * @return next byte * @since 1.3 */ @@ -1369,10 +1369,10 @@ public class Base64 { if (i == 4) { numSigBytes = decode4to3(b4, 0, buffer, 0, options); position = 0; - + } else if (i == 0) { return -1; - + } else { // Must have broken out from above. throw new IOException("Improperly padded Base64 input."); @@ -1415,7 +1415,7 @@ public class Base64 { * Calls {@link #read()} repeatedly until the end of stream is reached or * len bytes are read. Returns number of bytes read into array * or -1 if end of stream is encountered. - * + * * @param dest array to hold values * @param off offset for array * @param len max number of bytes to read into array @@ -1447,7 +1447,7 @@ public class Base64 { * A {@link Base64.Base64OutputStream} will write data to another * OutputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1465,7 +1465,7 @@ public class Base64 { /** * Constructs a {@link Base64OutputStream} in ENCODE mode. - * + * * @param out the OutputStream to which data will be written. * @since 1.3 */ @@ -1477,17 +1477,17 @@ public class Base64 { * Constructs a {@link Base64OutputStream} in either ENCODE or DECODE mode. *

    * Valid options: - * + * *

      *
    • ENCODE or DECODE: Encode or Decode as data is read.
    • *
    • DONT_BREAK_LINES: don't break lines at 76 characters (only * meaningful when encoding) Note: Technically, this makes your * encoding non-compliant.
    • *
    - * + * *

    * Example: new Base64.Base64OutputStream( out, Base64.ENCODE ) - * + * * @param out the OutputStream to which data will be written. * @param options Specified options. * @see Base64#ENCODE @@ -1514,7 +1514,7 @@ public class Base64 { * notation. When encoding, bytes are buffered three at a time before the * output stream actually gets a write() call. When decoding, bytes are * buffered four at a time. - * + * * @param theByte the byte to write * @since 1.3 */ @@ -1539,7 +1539,7 @@ public class Base64 { position = 0; } // end if: enough to output - + } else { // Meaningful Base64 character? if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) { @@ -1549,7 +1549,7 @@ public class Base64 { out.write(b4, 0, len); position = 0; } // end if: enough to output - + } else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) { throw new IOException("Invalid character in Base64 data."); } // end else: not white space either @@ -1559,7 +1559,7 @@ public class Base64 { /** * Calls {@link #write(int)} repeatedly until len bytes are * written. - * + * * @param theBytes array from which to read bytes * @param off offset for array * @param len max number of bytes to read into array @@ -1582,7 +1582,7 @@ public class Base64 { /** * Method added by PHIL. [Thanks, PHIL. -Rob] This pads the buffer without * closing the stream. - * + * * @throws IOException e */ public void flushBase64() throws IOException { @@ -1600,7 +1600,7 @@ public class Base64 { /** * Flushes and closes (I think, in the superclass) the stream. - * + * * @since 1.3 */ @Override @@ -1631,7 +1631,7 @@ public class Base64 { /** * Resumes encoding of the stream. May be helpful if you need to embed a * piece of base640-encoded data in a stream. - * + * * @since 1.5.1 */ public void resumeEncoding() { Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java Fri May 7 19:26:45 2010 @@ -30,12 +30,12 @@ import java.util.Properties; /** * Class for determining the "size" of a class, an attempt to calculate the * actual bytes that an object of this class will occupy in memory - * + * * The core of this class is taken from the Derby project */ public class ClassSize { static final Log LOG = LogFactory.getLog(ClassSize.class); - + private static int nrOfRefsPerObj = 2; /** Array overhead */ @@ -43,61 +43,61 @@ public class ClassSize { /** Overhead for ArrayList(0) */ public static int ARRAYLIST = 0; - + /** Overhead for ByteBuffer */ public static int BYTE_BUFFER = 0; /** Overhead for an Integer */ public static int INTEGER = 0; - + /** Overhead for entry in map */ public static int MAP_ENTRY = 0; - + /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ public static int OBJECT = 0; - + /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ public static int REFERENCE = 0; - + /** String overhead */ public static int STRING = 0; /** Overhead for TreeMap */ public static int TREEMAP = 0; - + /** Overhead for ConcurrentHashMap */ public static int CONCURRENT_HASHMAP = 0; - + /** Overhead for ConcurrentHashMap.Entry */ public static int CONCURRENT_HASHMAP_ENTRY = 0; - + /** Overhead for ConcurrentHashMap.Segment */ public static int CONCURRENT_HASHMAP_SEGMENT = 0; - + /** Overhead for ConcurrentSkipListMap */ public static int CONCURRENT_SKIPLISTMAP = 0; - + /** Overhead for ConcurrentSkipListMap Entry */ public static int CONCURRENT_SKIPLISTMAP_ENTRY = 0; - + /** Overhead for ReentrantReadWriteLock */ public static int REENTRANT_LOCK = 0; - + /** Overhead for AtomicLong */ public static int ATOMIC_LONG = 0; - + /** Overhead for AtomicInteger */ public static int ATOMIC_INTEGER = 0; - + /** Overhead for AtomicBoolean */ public static int ATOMIC_BOOLEAN = 0; - + /** Overhead for CopyOnWriteArraySet */ public static int COPYONWRITE_ARRAYSET = 0; - + /** Overhead for CopyOnWriteArrayList */ public static int COPYONWRITE_ARRAYLIST = 0; - + private static final String THIRTY_TWO = "32"; /** @@ -108,7 +108,7 @@ public class ClassSize { // Figure out whether this is a 32 or 64 bit machine. Properties sysProps = System.getProperties(); String arcModel = sysProps.getProperty("sun.arch.data.model"); - + //Default value is set to 8, covering the case when arcModel is unknown REFERENCE = 8; if (arcModel.equals(THIRTY_TWO)) { @@ -116,7 +116,7 @@ public class ClassSize { } OBJECT = 2 * REFERENCE; - + ARRAY = 3 * REFERENCE; ARRAYLIST = align(OBJECT + align(REFERENCE) + align(ARRAY) + @@ -124,48 +124,48 @@ public class ClassSize { //noinspection PointlessArithmeticExpression BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + - (5 * Bytes.SIZEOF_INT) + - (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); - + (5 * Bytes.SIZEOF_INT) + + (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); + INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + MAP_ENTRY = align(OBJECT + 5 * REFERENCE + Bytes.SIZEOF_BOOLEAN); - + TREEMAP = align(OBJECT + (2 * Bytes.SIZEOF_INT) + align(7 * REFERENCE)); - + STRING = align(OBJECT + ARRAY + REFERENCE + 3 * Bytes.SIZEOF_INT); - - CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + + + CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + (6 * REFERENCE) + OBJECT); - + CONCURRENT_HASHMAP_ENTRY = align(REFERENCE + OBJECT + (3 * REFERENCE) + (2 * Bytes.SIZEOF_INT)); - - CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + + + CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY); - + CONCURRENT_SKIPLISTMAP = align(Bytes.SIZEOF_INT + OBJECT + (8 * REFERENCE)); - + CONCURRENT_SKIPLISTMAP_ENTRY = align( - align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ + align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */ - + REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE)); - + ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG); - + ATOMIC_INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN); - + COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE); - + COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY); } - + /** - * The estimate of the size of a class instance depends on whether the JVM - * uses 32 or 64 bit addresses, that is it depends on the size of an object + * The estimate of the size of a class instance depends on whether the JVM + * uses 32 or 64 bit addresses, that is it depends on the size of an object * reference. It is a linear function of the size of a reference, e.g. * 24 + 5*r where r is the size of a reference (usually 4 or 8 bytes). * @@ -183,7 +183,7 @@ public class ClassSize { int primitives = 0; int arrays = 0; //The number of references that a new object takes - int references = nrOfRefsPerObj; + int references = nrOfRefsPerObj; for ( ; null != cl; cl = cl.getSuperclass()) { Field[] field = cl.getDeclaredFields(); @@ -230,7 +230,7 @@ public class ClassSize { } /** - * Estimate the static space taken up by a class instance given the + * Estimate the static space taken up by a class instance given the * coefficients returned by getSizeCoefficients. * * @param coeff the coefficients @@ -247,19 +247,19 @@ public class ClassSize { if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. LOG.debug("Primitives " + coeff[0] + ", arrays " + coeff[1] + - ", references(includes " + nrOfRefsPerObj + - " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + + ", references(includes " + nrOfRefsPerObj + + " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + ", size " + size); } } return size; - } + } /** - * Estimate the static space taken up by the fields of a class. This includes - * the space taken up by by references (the pointer) but not by the referenced - * object. So the estimated size of an array field does not depend on the size - * of the array. Similarly the size of an object (reference) field does not + * Estimate the static space taken up by the fields of a class. This includes + * the space taken up by by references (the pointer) but not by the referenced + * object. So the estimated size of an array field does not depend on the size + * of the array. Similarly the size of an object (reference) field does not * depend on the object. * * @param cl class @@ -269,7 +269,7 @@ public class ClassSize { @SuppressWarnings("unchecked") public static long estimateBase(Class cl, boolean debug) { return estimateBaseFromCoefficients( getSizeCoefficients(cl, debug), debug); - } + } /** * Aligns a number to 8. @@ -279,7 +279,7 @@ public class ClassSize { public static int align(int num) { return (int)(align((long)num)); } - + /** * Aligns a number to 8. * @param num number to align to 8 @@ -290,6 +290,6 @@ public class ClassSize { //stored and sent together return ((num + 7) >> 3) << 3; } - + } Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Fri May 7 19:26:45 2010 @@ -102,11 +102,11 @@ public class FSUtils { /** * Checks to see if the specified file system is available - * + * * @param fs filesystem * @throws IOException e */ - public static void checkFileSystemAvailable(final FileSystem fs) + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; @@ -129,10 +129,10 @@ public class FSUtils { io.initCause(exception); throw io; } - + /** * Verifies current version of file system - * + * * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise. @@ -153,20 +153,20 @@ public class FSUtils { } return version; } - + /** * Verifies current version of file system - * + * * @param fs file system * @param rootdir root directory of HBase installation - * @param message if true, issues a message on System.out - * + * @param message if true, issues a message on System.out + * * @throws IOException e */ - public static void checkVersion(FileSystem fs, Path rootdir, + public static void checkVersion(FileSystem fs, Path rootdir, boolean message) throws IOException { String version = getVersion(fs, rootdir); - + if (version == null) { if (!rootRegionExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) @@ -176,7 +176,7 @@ public class FSUtils { } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return; - + // version is deprecated require migration // Output on stdout so user sees it in terminal. String msg = "File system needs to be upgraded." @@ -188,28 +188,28 @@ public class FSUtils { } throw new FileSystemVersionException(msg); } - + /** * Sets version of file system - * + * * @param fs filesystem object * @param rootdir hbase root * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION); } /** * Sets version of file system - * + * * @param fs filesystem object * @param rootdir hbase root directory * @param version version to set * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version) + public static void setVersion(FileSystem fs, Path rootdir, String version) throws IOException { FSDataOutputStream s = fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME)); @@ -220,7 +220,7 @@ public class FSUtils { /** * Verifies root directory path is a valid URI with a scheme - * + * * @param root root directory path * @return Passed root argument. * @throws IOException if not a valid URI with a scheme @@ -288,7 +288,7 @@ public class FSUtils { * This method is useful if you want to print out a Path without qualifying * Filesystem instance. * @param p Filesystem Path whose 'path' component we are to return. - * @return Path portion of the Filesystem + * @return Path portion of the Filesystem */ public static String getPath(Path p) { return p.toUri().getPath(); @@ -306,7 +306,7 @@ public class FSUtils { /** * Checks if root region exists - * + * * @param fs file system * @param rootdir root directory of HBase installation * @return true if exists @@ -366,42 +366,42 @@ public class FSUtils { } /** - * Returns the total overall fragmentation percentage. Includes .META. and + * Returns the total overall fragmentation percentage. Includes .META. and * -ROOT- as well. - * + * * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ - public static int getTotalTableFragmentation(final HMaster master) + public static int getTotalTableFragmentation(final HMaster master) throws IOException { Map map = getTableFragmentation(master); return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1; } - + /** * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and .META. too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * + * have more than one file in them. Checks -ROOT- and .META. too. The total + * percentage across all tables is stored under the special key "-TOTAL-". + * * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ public static Map getTableFragmentation( - final HMaster master) + final HMaster master) throws IOException { Path path = master.getRootDir(); // since HMaster.getFileSystem() is package private FileSystem fs = path.getFileSystem(master.getConfiguration()); return getTableFragmentation(fs, path); } - + /** * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and .META. too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * + * have more than one file in them. Checks -ROOT- and .META. too. The total + * percentage across all tables is stored under the special key "-TOTAL-". + * * @param fs The file system to use. * @param hbaseRootDir The root directory to scan. * @return A map for each table and its percentage. Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java Fri May 7 19:26:45 2010 @@ -32,7 +32,7 @@ public abstract class Hash { public static final int JENKINS_HASH = 0; /** Constant to denote {@link MurmurHash}. */ public static final int MURMUR_HASH = 1; - + /** * This utility method converts String representation of hash function name * to a symbolic constant. Currently two function types are supported, @@ -49,7 +49,7 @@ public abstract class Hash { return INVALID_HASH; } } - + /** * This utility method converts the name of the configured * hash type to a symbolic constant. @@ -60,7 +60,7 @@ public abstract class Hash { String name = conf.get("hbase.hash.type", "murmur"); return parseHashType(name); } - + /** * Get a singleton instance of hash function of a given type. * @param type predefined hash type @@ -76,7 +76,7 @@ public abstract class Hash { return null; } } - + /** * Get a singleton instance of hash function of a type * defined in the configuration. @@ -87,7 +87,7 @@ public abstract class Hash { int type = getHashType(conf); return getInstance(type); } - + /** * Calculate a hash using all bytes from the input argument, and * a seed of -1. @@ -97,7 +97,7 @@ public abstract class Hash { public int hash(byte[] bytes) { return hash(bytes, bytes.length, -1); } - + /** * Calculate a hash using all bytes from the input argument, * and a provided seed value. @@ -108,7 +108,7 @@ public abstract class Hash { public int hash(byte[] bytes, int initval) { return hash(bytes, bytes.length, initval); } - + /** * Calculate a hash using bytes from 0 to length, and * the provided seed value Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java Fri May 7 19:26:45 2010 @@ -37,14 +37,14 @@ import java.util.Map; * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/) */ -public class InfoServer extends HttpServer { +public class InfoServer extends HttpServer { /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/name. * @param name The name of the server * @param bindAddress address to bind to * @param port The port to use on the server - * @param findPort whether the server should start at the given port and + * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @throws IOException e */ @@ -71,7 +71,7 @@ public class InfoServer extends HttpServ this.defaultContexts.put(oldLogsContext, Boolean.FALSE); } // Now do my logs. - // set up the context for "/logs/" if "hadoop.log.dir" property is defined. + // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); @@ -102,8 +102,8 @@ public class InfoServer extends HttpServ private static String getWebAppsPath(final String path) throws IOException { URL url = InfoServer.class.getClassLoader().getResource(path); - if (url == null) - throw new IOException("webapps not found in CLASSPATH: " + path); + if (url == null) + throw new IOException("webapps not found in CLASSPATH: " + path); return url.toString(); } Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java Fri May 7 19:26:45 2010 @@ -1,6 +1,6 @@ /** * Copyright 2010 The Apache Software Foundation - * + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class JVMClusterUtil { */ public static class RegionServerThread extends Thread { private final HRegionServer regionServer; - + public RegionServerThread(final HRegionServer r, final int index) { super(r, "RegionServer:" + index); this.regionServer = r; @@ -49,7 +49,7 @@ public class JVMClusterUtil { public HRegionServer getRegionServer() { return this.regionServer; } - + /** * Block until the region server has come online, indicating it is ready * to be used. @@ -77,7 +77,7 @@ public class JVMClusterUtil { public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c, final Class hrsc, final int index) throws IOException { - HRegionServer server; + HRegionServer server; try { server = hrsc.getConstructor(Configuration.class).newInstance(c); } catch (Exception e) { Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java Fri May 7 19:26:45 2010 @@ -25,13 +25,13 @@ import java.io.IOException; /** * Produces 32-bit hash for hash table lookup. - * + * *

    lookup3.c, by Bob Jenkins, May 2006, Public Domain.
      *
      * You can use this free for any purpose.  It's in the public domain.
      * It has no warranty.
      * 
    - * + * * @see lookup3.c * @see Hash Functions (and how this * function compares to others such as CRC, MD?, etc @@ -41,9 +41,9 @@ import java.io.IOException; public class JenkinsHash extends Hash { private static long INT_MASK = 0x00000000ffffffffL; private static long BYTE_MASK = 0x00000000000000ffL; - + private static JenkinsHash _instance = new JenkinsHash(); - + public static Hash getInstance() { return _instance; } @@ -55,26 +55,26 @@ public class JenkinsHash extends Hash { /** * taken from hashlittle() -- hash a variable-length key into a 32-bit value - * + * * @param key the key (the unaligned variable-length array of bytes) * @param nbytes number of bytes to include in hash * @param initval can be any integer value * @return a 32-bit value. Every bit of the key affects every bit of the * return value. Two keys differing by one or two bits will have totally * different hash values. - * + * *

    The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do * h = (h & hashmask(10)); * In which case, the hash table should have hashsize(10) elements. - * + * *

    If you are hashing n strings byte[][] k, do it like this: * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); - * + * *

    By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this * code any way you wish, private, educational, or commercial. It's free. - * + * *

    Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @@ -99,16 +99,16 @@ public class JenkinsHash extends Hash { c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; - + /* * mix -- mix 3 32-bit values reversibly. * This is reversible, so any information in (a,b,c) before mix() is * still in (a,b,c) after mix(). - * + * * If four pairs of (a,b,c) inputs are run through mix(), or through * mix() in reverse, there are at least 32 bits of the output that * are sometimes the same for one pair and different for another pair. - * + * * This was tested for: * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of @@ -119,22 +119,22 @@ public class JenkinsHash extends Hash { * difference. * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that * satisfy this are * 4 6 8 16 19 4 * 9 15 3 18 27 15 * 14 9 3 7 17 3 - * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for * "differ" defined as + with a one-bit base and a two-bit delta. I * used http://burtleburtle.net/bob/hash/avalanche.html to choose * the operations, constants, and arrangements of the variables. - * + * * This does not achieve avalanche. There are input bits of (a,b,c) * that fail to affect some output bits of (a,b,c), especially of a. * The most thoroughly mixed value is c, but it doesn't really even * achieve avalanche in c. - * + * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the * opposite direction as the goal of parallelism. I did what I could. @@ -151,7 +151,7 @@ public class JenkinsHash extends Hash { * b -= a; b ^= rot(a,19); a += c; \ * c -= b; c ^= rot(b, 4); b += a; \ * } - * + * * mix(a,b,c); */ a = (a - c) & INT_MASK; a ^= rot(c, 4); c = (c + b) & INT_MASK; @@ -195,21 +195,21 @@ public class JenkinsHash extends Hash { } /* * final -- final mixing of 3 32-bit values (a,b,c) into c - * + * * Pairs of (a,b,c) values differing in only a few bits will usually * produce values of c that look totally different. This was tested for * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). - * + * * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. - * + * * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * These constants passed: * 14 11 25 16 4 14 24 * 12 14 25 16 4 14 24 @@ -217,9 +217,9 @@ public class JenkinsHash extends Hash { * 4 8 15 26 3 22 24 * 10 8 15 26 3 22 24 * 11 8 15 26 3 22 24 - * + * * #define final(a,b,c) \ - * { + * { * c ^= b; c -= rot(b,14); \ * a ^= c; a -= rot(c,11); \ * b ^= a; b -= rot(a,25); \ @@ -228,7 +228,7 @@ public class JenkinsHash extends Hash { * b ^= a; b -= rot(a,14); \ * c ^= b; c -= rot(b,24); \ * } - * + * */ c ^= b; c = (c - rot(b,14)) & INT_MASK; a ^= c; a = (a - rot(c,11)) & INT_MASK; @@ -240,7 +240,7 @@ public class JenkinsHash extends Hash { return (int)(c & INT_MASK); } - + /** * Compute the hash of the specified file * @param args name of file to compute hash of. Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java Fri May 7 19:26:45 2010 @@ -37,7 +37,7 @@ public class Keying { /** * Makes a key out of passed URI for use as row name or column qualifier. - * + * * This method runs transforms on the passed URI so it sits better * as a key (or portion-of-a-key) in hbase. The host portion of * the URI authority is reversed so subdomains sort under their parent @@ -49,10 +49,10 @@ public class Keying { * r:http://org.apache.lucene/index.html?query=something#middle * The transforms are reversible. No transform is done if passed URI is * not hierarchical. - * + * *

    If authority userinfo is present, will mess up the sort * (until we do more work).

    - * + * * @param u URL to transform. * @return An opaque URI of artificial 'r' scheme with host portion of URI * authority reversed (if present). @@ -70,10 +70,10 @@ public class Keying { } return SCHEME + m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + /** * Reverse the {@link #createKey(String)} transform. - * + * * @param s URI made by {@link #createKey(String)}. * @return 'Restored' URI made by reversing the {@link #createKey(String)} * transform. @@ -89,14 +89,14 @@ public class Keying { } return m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + private static Matcher getMatcher(final String u) { if (u == null || u.length() <= 0) { return null; } return URI_RE_PARSER.matcher(u); } - + private static String reverseHostname(final String hostname) { if (hostname == null) { return ""; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java Fri May 7 19:26:45 2010 @@ -85,7 +85,7 @@ public class Merge extends Configured im LOG.fatal("File system is not available", e); return -1; } - + // Verify HBase is down LOG.info("Verifying that HBase is not running..."); try { @@ -95,9 +95,9 @@ public class Merge extends Configured im } catch (MasterNotRunningException e) { // Expected. Ignore. } - + // Initialize MetaUtils and and get the root of the HBase installation - + this.utils = new MetaUtils(getConf()); this.rootdir = FSUtils.getRootDir(getConf()); try { @@ -119,14 +119,14 @@ public class Merge extends Configured im ); return -1; - + } finally { if (this.utils != null) { this.utils.shutdown(); } } } - + /** @return HRegionInfo for merge result */ HRegionInfo getMergedHRegionInfo() { return this.mergeInfo; @@ -150,25 +150,25 @@ public class Merge extends Configured im get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); - HRegion merged = merge(info1, rootRegion, info2, rootRegion); + HRegion merged = merge(info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); HRegion.addRegionToMETA(rootRegion, merged); merged.close(); } - + private static class MetaScannerListener implements MetaUtils.ScannerListener { private final byte [] region1; private final byte [] region2; private HRegionInfo meta1 = null; private HRegionInfo meta2 = null; - + MetaScannerListener(final byte [] region1, final byte [] region2) { this.region1 = region1; this.region2 = region2; } - + public boolean processRow(HRegionInfo info) { if (meta1 == null && HRegion.rowIsInRange(info, region1)) { meta1 = info; @@ -179,16 +179,16 @@ public class Merge extends Configured im } return meta1 == null || (region2 != null && meta2 == null); } - + HRegionInfo getMeta1() { return meta1; } - + HRegionInfo getMeta2() { return meta2; } } - + /* * Merges two regions from a user table. */ @@ -257,7 +257,7 @@ public class Merge extends Configured im HRegion.addRegionToMETA(mergeMeta, merged); merged.close(); } - + /* * Actually merge two regions and update their info in the meta region(s) * If the meta is split, meta1 may be different from meta2. (and we may have @@ -292,21 +292,21 @@ public class Merge extends Configured im r1.close(); } } - + // Remove the old regions from meta. // HRegion.merge has already deleted their files - + removeRegionFromMeta(meta1, info1); removeRegionFromMeta(meta2, info2); this.mergeInfo = merged.getRegionInfo(); return merged; } - + /* * Removes a region's meta information from the passed meta * region. - * + * * @param meta META HRegion to be updated * @param regioninfo HRegionInfo of region to remove from meta * @@ -317,8 +317,8 @@ public class Merge extends Configured im if (LOG.isDebugEnabled()) { LOG.debug("Removing region: " + regioninfo + " from " + meta); } - - Delete delete = new Delete(regioninfo.getRegionName(), + + Delete delete = new Delete(regioninfo.getRegionName(), System.currentTimeMillis(), null); meta.delete(delete, null, true); } @@ -326,7 +326,7 @@ public class Merge extends Configured im /* * Adds a region's meta information from the passed meta * region. - * + * * @param metainfo META HRegionInfo to be updated * @param region HRegion to add to meta * @@ -335,7 +335,7 @@ public class Merge extends Configured im private int parseArgs(String[] args) throws IOException { GenericOptionsParser parser = new GenericOptionsParser(getConf(), args); - + String[] remainingArgs = parser.getRemainingArgs(); if (remainingArgs.length != 3) { usage(); @@ -343,7 +343,7 @@ public class Merge extends Configured im } tableName = Bytes.toBytes(remainingArgs[0]); isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0; - + region1 = Bytes.toBytesBinary(remainingArgs[1]); region2 = Bytes.toBytesBinary(remainingArgs[2]); int status = 0; @@ -355,7 +355,7 @@ public class Merge extends Configured im } return status; } - + private boolean notInTable(final byte [] tn, final byte [] rn) { if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) { LOG.error("Region " + Bytes.toString(rn) + " does not belong to table " + @@ -364,12 +364,12 @@ public class Merge extends Configured im } return false; } - + private void usage() { System.err.println( "Usage: bin/hbase merge \n"); } - + public static void main(String[] args) { int status; try { Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java Fri May 7 19:26:45 2010 @@ -64,14 +64,14 @@ public class MetaUtils { private HRegion rootRegion; private Map metaRegions = Collections.synchronizedSortedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); - - /** Default constructor + + /** Default constructor * @throws IOException e */ public MetaUtils() throws IOException { this(HBaseConfiguration.create()); } - + /** * @param conf Configuration * @throws IOException e @@ -107,7 +107,7 @@ public class MetaUtils { } return this.log; } - + /** * @return HRegion for root region * @throws IOException e @@ -118,10 +118,10 @@ public class MetaUtils { } return this.rootRegion; } - + /** * Open or return cached opened meta region - * + * * @param metaInfo HRegionInfo for meta region * @return meta HRegion * @throws IOException e @@ -135,7 +135,7 @@ public class MetaUtils { } return meta; } - + /** * Closes catalog regions if open. Also closes and deletes the HLog. You * must call this method if you want to persist changes made during a @@ -180,18 +180,18 @@ public class MetaUtils { public interface ScannerListener { /** * Callback so client of scanner can process row contents - * + * * @param info HRegionInfo for row * @return false to terminate the scan * @throws IOException e */ public boolean processRow(HRegionInfo info) throws IOException; } - + /** * Scans the root region. For every meta region found, calls the listener with * the HRegionInfo of the meta region. - * + * * @param listener method to be called for each meta region found * @throws IOException e */ @@ -249,7 +249,7 @@ public class MetaUtils { *

    Use for reading meta only. Does not close region when done. * Use {@link #getMetaRegion(HRegionInfo)} instead if writing. Adds * meta region to list that will get a close on {@link #shutdown()}. - * + * * @param metaRegionInfo HRegionInfo for meta region * @param listener method to be called for each meta region found * @throws IOException e @@ -278,7 +278,7 @@ public class MetaUtils { meta.compactStores(); return meta; } - + /** * Set a single region on/offline. * This is a tool to repair tables that have offlined tables in their midst. @@ -310,18 +310,18 @@ public class MetaUtils { HRegionInfo info = Writables.getHRegionInfo(value); Put put = new Put(row); info.setOffline(onlineOffline); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(info)); t.put(put); - + Delete delete = new Delete(row); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); - + t.delete(delete); } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.AddColumn. @@ -337,7 +337,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { LOG.debug("Testing " + Bytes.toString(tableName) + " against " + @@ -355,7 +355,7 @@ public class MetaUtils { }}); } } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.DeleteColumn. @@ -370,7 +370,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { if (Bytes.equals(info.getTableDesc().getName(), tableName)) { @@ -393,15 +393,15 @@ public class MetaUtils { }}); } } - + /** * Update COL_REGIONINFO in meta region r with HRegionInfo hri - * + * * @param r region * @param hri region info * @throws IOException e */ - public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) + public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) throws IOException { if (LOG.isDebugEnabled()) { Get get = new Get(hri.getRegionName()); @@ -416,14 +416,14 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - - LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + + LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } - + Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); r.put(put); @@ -440,8 +440,8 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + - Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + + LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } } @@ -464,7 +464,7 @@ public class MetaUtils { // Return all meta regions that contain the passed tablename. scanRootRegion(new ScannerListener() { private final Log SL_LOG = LogFactory.getLog(this.getClass()); - + public boolean processRow(HRegionInfo info) throws IOException { SL_LOG.debug("Testing " + info); if (Bytes.equals(info.getTableDesc().getName(), @@ -476,7 +476,7 @@ public class MetaUtils { }}); return result; } - + /** * @param n Table name. * @return True if a catalog table, -ROOT- or .META. Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java Fri May 7 19:26:45 2010 @@ -23,17 +23,17 @@ package org.apache.hadoop.hbase.util; /** * This is a very fast, non-cryptographic hash suitable for general hash-based * lookup. See http://murmurhash.googlepages.com/ for more details. - * + * *

    The C version of MurmurHash 2.0 found at that site was ported * to Java by Andrzej Bialecki (ab at getopt org).

    */ public class MurmurHash extends Hash { private static MurmurHash _instance = new MurmurHash(); - + public static Hash getInstance() { return _instance; } - + @Override public int hash(byte[] data, int length, int seed) { int m = 0x5bd1e995; Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java Fri May 7 19:26:45 2010 @@ -24,8 +24,8 @@ import java.io.Serializable; /** * A generic class for pairs. - * @param - * @param + * @param + * @param */ public class Pair implements Serializable { Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java Fri May 7 19:26:45 2010 @@ -27,7 +27,7 @@ import java.util.concurrent.atomic.Atomi /** * Sleeper for current thread. * Sleeps for passed period. Also checks passed boolean and if interrupted, - * will return if the flag is set (rather than go back to sleep until its + * will return if the flag is set (rather than go back to sleep until its * sleep time is up). */ public class Sleeper { @@ -35,7 +35,7 @@ public class Sleeper { private final int period; private final AtomicBoolean stop; private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; - + private final Object sleepLock = new Object(); private boolean triggerWake = false; @@ -47,7 +47,7 @@ public class Sleeper { this.period = sleep; this.stop = stop; } - + /** * Sleep for period. */ @@ -65,7 +65,7 @@ public class Sleeper { sleepLock.notify(); } } - + /** * Sleep for period adjusted by passed startTime * @param startTime Time some task started previous to now. Time to sleep Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java Fri May 7 19:26:45 2010 @@ -29,7 +29,7 @@ import java.util.Set; /** * A Map that uses Soft Reference values internally. Use as a simple cache. - * + * * @param key class * @param value class */ @@ -37,11 +37,11 @@ public class SoftValueMap implement private final Map> internalMap = new HashMap>(); private final ReferenceQueue rq; - + public SoftValueMap() { this(new ReferenceQueue()); } - + public SoftValueMap(final ReferenceQueue rq) { this.rq = rq; } @@ -67,12 +67,12 @@ public class SoftValueMap implement new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + @SuppressWarnings({"SuspiciousMethodCalls"}) public V get(Object key) { checkReferences(); @@ -94,16 +94,16 @@ public class SoftValueMap implement } public boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ throw new UnsupportedOperationException("Don't support containsValue!"); } - + public boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java Fri May 7 19:26:45 2010 @@ -33,19 +33,19 @@ import java.util.TreeSet; * A SortedMap implementation that uses Soft Reference values * internally to make it play well with the GC when in a low-memory * situation. Use as a cache where you also need SortedMap functionality. - * + * * @param key class * @param value class */ public class SoftValueSortedMap implements SortedMap { private final SortedMap> internalMap; private final ReferenceQueue rq = new ReferenceQueue(); - + /** Constructor */ public SoftValueSortedMap() { this(new TreeMap>()); } - + /** * Constructor * @param c comparator @@ -53,7 +53,7 @@ public class SoftValueSortedMap imp public SoftValueSortedMap(final Comparator c) { this(new TreeMap>(c)); } - + /** For headMap and tailMap support * @param original object to wrap */ @@ -83,12 +83,12 @@ public class SoftValueSortedMap imp new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public synchronized void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + @SuppressWarnings({"SuspiciousMethodCalls"}) public synchronized V get(Object key) { checkReferences(); @@ -110,10 +110,10 @@ public class SoftValueSortedMap imp } public synchronized boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public synchronized boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ @@ -129,22 +129,22 @@ public class SoftValueSortedMap imp checkReferences(); return internalMap.lastKey(); } - + public synchronized SoftValueSortedMap headMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.headMap(key)); } - + public synchronized SoftValueSortedMap tailMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.tailMap(key)); } - + public synchronized SoftValueSortedMap subMap(K fromKey, K toKey) { checkReferences(); return new SoftValueSortedMap(this.internalMap.subMap(fromKey, toKey)); } - + public synchronized boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java Fri May 7 19:26:45 2010 @@ -25,7 +25,7 @@ package org.apache.hadoop.hbase.util; public class Strings { public final static String DEFAULT_SEPARATOR = "="; public final static String DEFAULT_KEYVALUE_SEPARATOR = ", "; - + /** * Append to a StringBuilder a key/value. * Uses default separators. Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java Fri May 7 19:26:45 2010 @@ -31,7 +31,7 @@ import java.lang.Thread.UncaughtExceptio */ public class Threads { protected static final Log LOG = LogFactory.getLog(Threads.class); - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t thread to frob @@ -42,7 +42,7 @@ public class Threads { final String name) { return setDaemonThreadRunning(t, name, null); } - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t thread to frob Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java?rev=942186&r1=942185&r2=942186&view=diff ============================================================================== --- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java (original) +++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java Fri May 7 19:26:45 2010 @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.VersionAn public class VersionInfo { private static Package myPackage; private static VersionAnnotation version; - + static { myPackage = VersionAnnotation.class.getPackage(); version = myPackage.getAnnotation(VersionAnnotation.class); @@ -42,7 +42,7 @@ public class VersionInfo { static Package getPackage() { return myPackage; } - + /** * Get the hbase version. * @return the hbase version string, eg. "0.6.3-dev" @@ -50,7 +50,7 @@ public class VersionInfo { public static String getVersion() { return version != null ? version.version() : "Unknown"; } - + /** * Get the subversion revision number for the root directory * @return the revision number, eg. "451451" @@ -58,7 +58,7 @@ public class VersionInfo { public static String getRevision() { return version != null ? version.revision() : "Unknown"; } - + /** * The date that hbase was compiled. * @return the compilation date in unix date format @@ -66,7 +66,7 @@ public class VersionInfo { public static String getDate() { return version != null ? version.date() : "Unknown"; } - + /** * The user that compiled hbase. * @return the username of the user @@ -74,7 +74,7 @@ public class VersionInfo { public static String getUser() { return version != null ? version.user() : "Unknown"; } - + /** * Get the subversion URL for the root hbase directory. * @return the url @@ -82,7 +82,7 @@ public class VersionInfo { public static String getUrl() { return version != null ? version.url() : "Unknown"; } - + public static void main(String[] args) { System.out.println("HBase " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision());