hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From li...@apache.org
Subject svn commit: r1509363 - in /hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase: ./ client/ executor/ util/
Date Thu, 01 Aug 2013 18:18:26 GMT
Author: liyin
Date: Thu Aug  1 18:18:26 2013
New Revision: 1509363

URL: http://svn.apache.org/r1509363
Log:
[master]Make findbugs happy

Author: fan

Summary: Fix some findbugs high priority warnings

Test Plan: take a look at new findbugs report

Reviewers: manukranthk, liyintang

Reviewed By: liyintang

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D910084

Task ID: 2624700

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/EmptyWatcher.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/EmptyWatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/EmptyWatcher.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/EmptyWatcher.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/EmptyWatcher.java Thu Aug
 1 18:18:26 2013
@@ -26,7 +26,7 @@ import org.apache.zookeeper.WatchedEvent
  * Class used as an empty watche for the tests
  */
 public class EmptyWatcher implements Watcher{
-  public static EmptyWatcher instance = new EmptyWatcher();
+  public static final EmptyWatcher instance = new EmptyWatcher();
   private EmptyWatcher() {}
 
   public void process(WatchedEvent event) {}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java Thu Aug 
1 18:18:26 2013
@@ -543,7 +543,7 @@ public final class HConstants {
      * Parameter name for maximum number of bytes returned when calling a
      * scanner's next method.
      */
-  public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
+  public static final String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
 
   /**
    * Maximum number of bytes returned when calling a scanner's next method.
@@ -552,7 +552,7 @@ public final class HConstants {
    *
    * The default value is unlimited.
    */
-  public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE;
+  public static final long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE;
 
 
   /**
@@ -562,43 +562,43 @@ public final class HConstants {
    * if partialRow is true, otherwise, the row will be truncated in order to
    * fit the memory.
    */
-  public static int DEFAULT_HBASE_SCANNER_MAX_RESULT_SIZE = Integer.MAX_VALUE;
+  public static final int DEFAULT_HBASE_SCANNER_MAX_RESULT_SIZE = Integer.MAX_VALUE;
 
   /**
    * HRegion server lease period in milliseconds. Clients must report in within this period
    * else they are considered dead. Unit measured in ms (milliseconds).
    */
-  public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY   = "hbase.regionserver.lease.period";
+  public static final String HBASE_REGIONSERVER_LEASE_PERIOD_KEY   = "hbase.regionserver.lease.period";
 
 
   /**
    * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}.
    */
-  public static long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000;
+  public static final long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000;
 
   /**
    * timeout for each RPC
    */
-  public static String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout";
-  public static String HBASE_RS_REPORT_TIMEOUT_KEY = "hbase.regionserverReport.timeout";
+  public static final String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout";
+  public static final String HBASE_RS_REPORT_TIMEOUT_KEY = "hbase.regionserverReport.timeout";
 
   /**
    * Default value of {@link #HBASE_RPC_TIMEOUT_KEY}
    */
-  public static int DEFAULT_HBASE_RPC_TIMEOUT = 60000;
-  public static int DEFAULT_RS_REPORT_TIMEOUT = 3000;
+  public static final int DEFAULT_HBASE_RPC_TIMEOUT = 60000;
+  public static final int DEFAULT_RS_REPORT_TIMEOUT = 3000;
 
   /**
    * pause between rpc or connect retries
    */
-  public static String HBASE_CLIENT_PAUSE = "hbase.client.pause";
-  public static int DEFAULT_HBASE_CLIENT_PAUSE = 1000;
+  public static final String HBASE_CLIENT_PAUSE = "hbase.client.pause";
+  public static final int DEFAULT_HBASE_CLIENT_PAUSE = 1000;
 
   /**
    * compression for each RPC and its default value
    */
-  public static String HBASE_RPC_COMPRESSION_KEY = "hbase.rpc.compression";
-  public static Compression.Algorithm DEFAULT_HBASE_RPC_COMPRESSION =
+  public static final String HBASE_RPC_COMPRESSION_KEY = "hbase.rpc.compression";
+  public static final Compression.Algorithm DEFAULT_HBASE_RPC_COMPRESSION =
     Compression.Algorithm.NONE;
 
   public static final String
@@ -696,7 +696,7 @@ public final class HConstants {
   /**
    * Absolute path of the external jar which will contain the custom compaction hook
    */
-  public static  String COMPACTION_HOOK_JAR = "compaction_hook_jar";
+  public static final String COMPACTION_HOOK_JAR = "compaction_hook_jar";
 
   public static final String GENERAL_BLOOM_FILTER = "general_bloom_filter";
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java Thu Aug  1
18:18:26 2013
@@ -65,7 +65,7 @@ import org.apache.hadoop.io.Writable;
  * <p>TODO: Group Key-only comparators and operations into a Key class, just
  * for neatness sake, if can figure what to call it.
  */
-public class KeyValue implements Writable, HeapSize {
+public class KeyValue implements Writable, HeapSize, Cloneable {
   static final Log LOG = LogFactory.getLog(KeyValue.class);
 
   /**
@@ -79,37 +79,37 @@ public class KeyValue implements Writabl
   /**
    * Comparator for plain key/values; i.e. non-catalog table key/values.
    */
-  public static KVComparator COMPARATOR = new KVComparator();
+  public static final KVComparator COMPARATOR = new KVComparator();
 
   /**
    * Comparator for plain key; i.e. non-catalog table key.  Works on Key portion
    * of KeyValue only.
    */
-  public static KeyComparator KEY_COMPARATOR = new KeyComparator();
+  public static final KeyComparator KEY_COMPARATOR = new KeyComparator();
 
   /**
    * A {@link KVComparator} for <code>.META.</code> catalog table
    * {@link KeyValue}s.
    */
-  public static KVComparator META_COMPARATOR = new MetaComparator();
+  public static final KVComparator META_COMPARATOR = new MetaComparator();
 
   /**
    * A {@link KVComparator} for <code>.META.</code> catalog table
    * {@link KeyValue} keys.
    */
-  public static KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
+  public static final KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
 
   /**
    * A {@link KVComparator} for <code>-ROOT-</code> catalog table
    * {@link KeyValue}s.
    */
-  public static KVComparator ROOT_COMPARATOR = new RootComparator();
+  public static final KVComparator ROOT_COMPARATOR = new RootComparator();
 
   /**
    * A {@link KVComparator} for <code>-ROOT-</code> catalog table
    * {@link KeyValue} keys.
    */
-  public static KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
+  public static final KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
 
   /**
    * Get the appropriate row comparator for the specified table.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java
Thu Aug  1 18:18:26 2013
@@ -70,7 +70,7 @@ public class ClientLocalScanner extends 
   /*
    * Threadpool for doing scanner prefetches
    */
-  public static ThreadPoolExecutor scanPrefetchThreadPool;
+  public static final ThreadPoolExecutor scanPrefetchThreadPool;
   private static int numHandlers = 20;
   private final boolean areHardlinksCreated;
   // Initializing the numHandlers statically since the thread pool can be

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java Thu
Aug  1 18:18:26 2013
@@ -252,16 +252,16 @@ public class HBaseFsck {
     try { 
       for (HbckInfo hbi : regionInfo.values()) {
         tableDir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), 
-         		  hbi.metaEntry.getTableDesc().getName());
-		  
+            hbi.metaEntry.getTableDesc().getName());
+
         Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
         FileSystem fs = rootDir.getFileSystem(conf);
-		  
+
         Path regionPath = HRegion.getRegionDir(tableDir, 
                           hbi.metaEntry.getEncodedName());
         Path regionInfoPath = new Path(regionPath, HRegion.REGIONINFO_FILE);
         if (fs.exists(regionInfoPath) &&
-		        fs.getFileStatus(regionInfoPath).getLen() > 0) {
+          fs.getFileStatus(regionInfoPath).getLen() > 0) {
           FSDataInputStream in = fs.open(regionInfoPath);
           HRegionInfo f_hri = null;
           try { 
@@ -269,7 +269,7 @@ public class HBaseFsck {
             f_hri.readFields(in);
           } catch (IOException ex) { 
             errors.reportError("Could not read .regioninfo file at " 
-	                       + regionInfoPath);
+                               + regionInfoPath);
           } finally {
             in.close();
           }
@@ -284,10 +284,10 @@ public class HBaseFsck {
         } else {
           if (!fs.exists(regionInfoPath)) {
             errors.reportError(".regioninfo not found at " 
-                              + regionInfoPath.toString());
+                               + regionInfoPath.toString());
           } else if (fs.getFileStatus(regionInfoPath).getLen() <= 0) {
             errors.reportError(".regioninfo file is empty (path =  " 
-	                    + regionInfoPath + ")");
+                               + regionInfoPath + ")");
           }
         }
       } 
@@ -296,8 +296,8 @@ public class HBaseFsck {
                          + e.getMessage());
     }
   }
-	  	
-	  
+
+
   /**
    * Scan HDFS for all regions, recording their information into
    * regionInfo
@@ -432,14 +432,14 @@ public class HBaseFsck {
     
     String tableName = null;
     if (inMeta) {
-    	tableName = hbi.metaEntry.getTableDesc().getNameAsString();
+      tableName = hbi.metaEntry.getTableDesc().getNameAsString();
     } else {
-    	tableName = UnkownTable;
+      tableName = UnkownTable;
     }
     TInfo tableInfo = tablesInfo.get(tableName);
     if (tableInfo == null) {
-    	tableInfo = new TInfo(tableName);
-    	tablesInfo.put(tableName,  tableInfo);
+      tableInfo = new TInfo(tableName);
+      tablesInfo.put(tableName,  tableInfo);
     }
     tableInfo.addRegionDetails(RegionType.total, descriptiveName);
     // ========== First the healthy cases =============
@@ -447,21 +447,21 @@ public class HBaseFsck {
       return;
     }
     if (inMeta && inHdfs && isDeployed && deploymentMatchesMeta &&
shouldBeDeployed) {
-    	if (!hbi.metaEntry.isOffline()) {
-    		tableInfo.addRegionDetails(RegionType.online, descriptiveName);
-    	} else {
-    		tableInfo.addRegionDetails(RegionType.offline, descriptiveName);
-    	}
+      if (!hbi.metaEntry.isOffline()) {
+        tableInfo.addRegionDetails(RegionType.online, descriptiveName);
+      } else {
+        tableInfo.addRegionDetails(RegionType.offline, descriptiveName);
+      }
       return;
     } else if (inMeta && !shouldBeDeployed && !isDeployed) {
       // offline regions shouldn't cause complaints
-    	String message = "Region " + descriptiveName + " offline, ignoring.";
+      String message = "Region " + descriptiveName + " offline, ignoring.";
       LOG.debug(message);
       tableInfo.addRegionDetails(RegionType.offline, descriptiveName);
       tableInfo.addRegionError(message);
       return;
     } else if (recentlyModified) {
-    	String message = "Region " + descriptiveName + " was recently modified -- skipping";
+      String message = "Region " + descriptiveName + " was recently modified -- skipping";
       LOG.info(message);
       tableInfo.addRegionDetails(RegionType.skipped, descriptiveName);
       tableInfo.addRegionError(message);
@@ -472,46 +472,46 @@ public class HBaseFsck {
       // We shouldn't have record of this region at all then!
       assert false : "Entry for region with no data";
     } else if (!inMeta && !inHdfs && isDeployed) {
-    	String message = "Region " + descriptiveName + " not on HDFS or in META but " +
-    		"deployed on " + Joiner.on(", ").join(hbi.deployedOn);
+      String message = "Region " + descriptiveName + " not on HDFS or in META but " +
+        "deployed on " + Joiner.on(", ").join(hbi.deployedOn);
       errors.reportError(message);
       tableInfo.addRegionDetails(RegionType.missing, descriptiveName);
       tableInfo.addRegionError(message);
     } else if (!inMeta && inHdfs && !isDeployed) {
-    	String message = "Region " + descriptiveName + " on HDFS, but not listed in META " +
-    		"or deployed on any region server.";
+      String message = "Region " + descriptiveName + " on HDFS, but not listed in META "
+
+        "or deployed on any region server.";
       errors.reportError(message);
       tableInfo.addRegionDetails(RegionType.missing, descriptiveName);
       tableInfo.addRegionError(message);
     } else if (!inMeta && inHdfs && isDeployed) {
-    	String message = "Region " + descriptiveName + " not in META, but deployed on " +
-    		Joiner.on(", ").join(hbi.deployedOn);
+      String message = "Region " + descriptiveName + " not in META, but deployed on " +
+        Joiner.on(", ").join(hbi.deployedOn);
       errors.reportError(message);
       tableInfo.addRegionDetails(RegionType.missing, descriptiveName);
       tableInfo.addRegionError(message);
     // ========== Cases where the region is in META =============
     } else if (inMeta && !inHdfs && !isDeployed) {
-    	String message = "Region " + descriptiveName + " found in META, but not in HDFS " +
+      String message = "Region " + descriptiveName + " found in META, but not in HDFS " +
           "or deployed on any region server.";
       errors.reportError(message);
       tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
       tableInfo.addRegionError(message);
     } else if (inMeta && !inHdfs && isDeployed) {
-    	String message = "Region " + descriptiveName + " found in META, but not in HDFS, " +
+      String message = "Region " + descriptiveName + " found in META, but not in HDFS, "
+
         "and deployed on " + Joiner.on(", ").join(hbi.deployedOn);
       errors.reportError(message);
       tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
       tableInfo.addRegionError(message);
     } else if (inMeta && inHdfs && !isDeployed && shouldBeDeployed)
{
       if (couldNotScan.contains(hbi.metaEntry.regionServer)) {
-      	String message = "Could not verify region " + descriptiveName
+        String message = "Could not verify region " + descriptiveName
             + " because could not scan supposed owner "
             + hbi.metaEntry.regionServer;
         LOG.info(message);
         tableInfo.addRegionDetails(RegionType.timeout, descriptiveName);
         tableInfo.addRegionError(message);
       } else {
-      	String message = "Region " + descriptiveName + " not deployed on any region server.";
+        String message = "Region " + descriptiveName + " not deployed on any region server.";
         errors.reportWarning(message);
         // If we are trying to fix the errors
         tableInfo.addRegionDetails(RegionType.missing, descriptiveName);
@@ -526,11 +526,11 @@ public class HBaseFsck {
     } else if (inMeta && inHdfs && isDeployed && !shouldBeDeployed)
{
       String message = "Region " + descriptiveName + " should not be deployed according "
+
           "to META, but is deployed on " + Joiner.on(", ").join(hbi.deployedOn);
-    	errors.reportError(message);
-    	tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
+      errors.reportError(message);
+      tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
       tableInfo.addRegionError(message);
     } else if (inMeta && inHdfs && isMultiplyDeployed) {
-    	String message = "Region " + descriptiveName + 
+      String message = "Region " + descriptiveName +
           " is listed in META on region server " + hbi.metaEntry.regionServer + 
           " but is multiply assigned to region servers " +
           Joiner.on(", ").join(hbi.deployedOn);
@@ -548,9 +548,9 @@ public class HBaseFsck {
       String message = "Region " + descriptiveName + 
           " listed in META on region server " + hbi.metaEntry.regionServer + 
           " but found on region server " + hbi.deployedOn.get(0);
-    	errors.reportFixableError(message);
-    	tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
-    	tableInfo.addRegionError(message);
+      errors.reportFixableError(message);
+      tableInfo.addRegionDetails(RegionType.unknown, descriptiveName);
+      tableInfo.addRegionError(message);
       // If we are trying to fix the errors
       if (fix != FixState.NONE) {
         errors.print("Trying to fix assignment error...");
@@ -559,7 +559,7 @@ public class HBaseFsck {
         }
       }
     } else {
-    	String message = "Region " + descriptiveName + " is in an unforeseen state:" +
+      String message = "Region " + descriptiveName + " is in an unforeseen state:" +
           " inMeta=" + inMeta +
           " inHdfs=" + inHdfs +
           " isDeployed=" + isDeployed +
@@ -580,8 +580,8 @@ public class HBaseFsck {
   void checkIntegrity() {
     for (HbckInfo hbi : regionInfo.values()) {
       // Check only valid, working regions
-    	
-    	if (hbi.metaEntry == null) continue;
+
+      if (hbi.metaEntry == null) continue;
       if (hbi.metaEntry.regionServer == null) continue;
       if (hbi.foundRegionDir == null) continue;
       if (hbi.deployedOn.isEmpty()
@@ -590,7 +590,7 @@ public class HBaseFsck {
       
       // We should be safe here
       String tableName = hbi.metaEntry.getTableDesc().getNameAsString();
-    	TInfo modTInfo = tablesInfo.get(tableName);
+      TInfo modTInfo = tablesInfo.get(tableName);
       if (modTInfo == null) {
         modTInfo = new TInfo(tableName);
       }
@@ -602,7 +602,7 @@ public class HBaseFsck {
     }
     
     for (TInfo tInfo : tablesInfo.values()) {
-    	if (tInfo.getName().equals(UnkownTable)) continue;
+      if (tInfo.getName().equals(UnkownTable)) continue;
       if (!tInfo.check()) {
         errors.reportError("Found inconsistency in table " + tInfo.getName() + 
             ": " + tInfo.getLastError());
@@ -611,13 +611,13 @@ public class HBaseFsck {
   }
 
   public enum RegionType {
-  	total,
-  	online,
-  	offline,
-  	missing,
-  	skipped,
-  	timeout,
-  	unknown
+    total,
+    online,
+    offline,
+    missing,
+    skipped,
+    timeout,
+    unknown
   }
 
   /**
@@ -630,25 +630,25 @@ public class HBaseFsck {
     String lastError = null;
     
     private TreeMap<RegionType, ArrayList<String>> regionDetails = new TreeMap<RegionType,
ArrayList<String>>();
-  	private ArrayList<String> regionErrors = new ArrayList<String>();
-  	
+    private ArrayList<String> regionErrors = new ArrayList<String>();
+
     TInfo(String name) {
       this.tableName = name;
       edges = new TreeMap <byte[], byte[]> (Bytes.BYTES_COMPARATOR);
       deployedOn = new TreeSet <HServerAddress>();
       for (RegionType regionType : RegionType.values()) {
-      	regionDetails.put(regionType, new ArrayList<String>());
+        regionDetails.put(regionType, new ArrayList<String>());
       }
     }
-    
+
     public void addEdge(byte[] fromNode, byte[] toNode) {
       this.edges.put(fromNode, toNode);
     }
-    
+
     public void addServer(HServerAddress server) {
       this.deployedOn.add(server);
     }
-    
+
     public String getName() {
       return tableName;
     }
@@ -656,7 +656,7 @@ public class HBaseFsck {
     public int getNumRegions() {
       return edges.size();
     }
-    
+
     public String getLastError() { 
       return this.lastError;
     }
@@ -694,7 +694,7 @@ public class HBaseFsck {
         if (visited.contains(next)) {
           this.lastError = "Cycle found in region chain. " 
             + "Current = "+ posToStr(last)
-          	+ "; Cycle Start = " +  posToStr(next);
+            + "; Cycle Start = " +  posToStr(next);
           return false;
         }
         // Mark next node as visited
@@ -719,56 +719,56 @@ public class HBaseFsck {
       }
       // How did we get here?
     }
-		
-		public JSONObject toJSONObject() {
-			JSONObject ret = new JSONObject();
-			try {
-				ret.put("table", tableName);
-				for (RegionType type : RegionType.values()) {
-					ret.put(type.toString(), regionDetails.get(type).size());
-				}
-				JSONArray arr = new JSONArray();
-				for (int i=0; i<regionErrors.size(); i++) {
-					arr.put(i, regionErrors.get(i));
-				}
-				ret.put("Errors", arr);
-				ret.put("Details", this.getAllRegionDetails());
-				return ret;
-			} catch (JSONException ex) {
-				return null;
-			}
-		}
-
-		public void addRegionDetails(RegionType type, String name) {
-			regionDetails.get(type).add(name);
-		}
-		
-		public void addRegionError(String error) {
-			regionErrors.add(error);
-		}
-
-		public ArrayList<String> getRegionDetails(RegionType type) {
-			return regionDetails.get(type);
-		}
-
-		public JSONArray getRegionDetailsArray(RegionType type) {
-			JSONArray arr = new JSONArray();
-			ArrayList<String> regions = this.getRegionDetails(type);
-			for (String s : regions) {
-				arr.put(s);
-			}
-			return arr;
-		}
-
-		public JSONObject getAllRegionDetails() throws JSONException{
-			JSONObject ret = new JSONObject();
-			for (RegionType type : RegionType.values()) {
-				if (type.equals(RegionType.total)) continue;
-				if (type.equals(RegionType.online)) continue;
-				ret.put(type.toString(), getRegionDetailsArray(type));
-			}
-			return ret;
-		}
+
+    public JSONObject toJSONObject() {
+      JSONObject ret = new JSONObject();
+      try {
+        ret.put("table", tableName);
+        for (RegionType type : RegionType.values()) {
+          ret.put(type.toString(), regionDetails.get(type).size());
+        }
+        JSONArray arr = new JSONArray();
+        for (int i=0; i<regionErrors.size(); i++) {
+          arr.put(i, regionErrors.get(i));
+        }
+        ret.put("Errors", arr);
+        ret.put("Details", this.getAllRegionDetails());
+        return ret;
+      } catch (JSONException ex) {
+        return null;
+      }
+    }
+
+    public void addRegionDetails(RegionType type, String name) {
+      regionDetails.get(type).add(name);
+    }
+
+    public void addRegionError(String error) {
+      regionErrors.add(error);
+    }
+
+    public ArrayList<String> getRegionDetails(RegionType type) {
+      return regionDetails.get(type);
+    }
+
+    public JSONArray getRegionDetailsArray(RegionType type) {
+      JSONArray arr = new JSONArray();
+      ArrayList<String> regions = this.getRegionDetails(type);
+      for (String s : regions) {
+        arr.put(s);
+      }
+      return arr;
+    }
+
+    public JSONObject getAllRegionDetails() throws JSONException{
+      JSONObject ret = new JSONObject();
+      for (RegionType type : RegionType.values()) {
+        if (type.equals(RegionType.total)) continue;
+        if (type.equals(RegionType.online)) continue;
+        ret.put(type.toString(), getRegionDetailsArray(type));
+      }
+      return ret;
+    }
   }
 
   
@@ -997,7 +997,7 @@ public class HBaseFsck {
    * Prints summary of all tables found on the system.
    */
   private void printTableSummary() {
-  	if (HBaseFsck.json != null) return;
+    if (HBaseFsck.json != null) return;
     System.out.println("Summary:");
     for (TInfo tInfo : tablesInfo.values()) {
       if (tInfo.getLastError() == null) {
@@ -1032,18 +1032,18 @@ public class HBaseFsck {
 
     public synchronized void reportWarning(String message) {
       if (!summary) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("WARNING: " + message);
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("WARNING: " + message);
+        }
       }
       warnCount++;
     }
 
     public synchronized void reportError(String message) {
       if (!summary) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("ERROR: " + message);
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("ERROR: " + message);
+        }
       }
       errorCount++;
       showProgress = 0;
@@ -1051,56 +1051,56 @@ public class HBaseFsck {
 
     public synchronized void reportFixableError(String message) {
       if (!summary) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("ERROR (fixable): " + message);
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("ERROR (fixable): " + message);
+        }
       }
       fixableCount++;
       showProgress = 0;
     }
 
     public synchronized int summarize() {
-    	if (HBaseFsck.json == null) {
-	      System.out.println(Integer.toString(errorCount + fixableCount) +
-	                         " inconsistencies detected.");
-	      System.out.println(Integer.toString(fixableCount) +
-	      " inconsistencies are fixable.");
-    	}
+      if (HBaseFsck.json == null) {
+        System.out.println(Integer.toString(errorCount + fixableCount) +
+                           " inconsistencies detected.");
+        System.out.println(Integer.toString(fixableCount) +
+        " inconsistencies are fixable.");
+      }
       if (warnCount > 0) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println(Integer.toString(warnCount) + " warnings.");
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println(Integer.toString(warnCount) + " warnings.");
+        }
       }
       if (errorCount + fixableCount == 0) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("Status: OK ");
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("Status: OK ");
+        }
         return 0;
       } else if (fixableCount == 0) {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("Status: INCONSISTENT");
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("Status: INCONSISTENT");
+        }
         return -1;
       } else {
-      	if (HBaseFsck.json == null) {
-      		System.out.println("Status: INCONSISTENT (fixable)");
-      	}
+        if (HBaseFsck.json == null) {
+          System.out.println("Status: INCONSISTENT (fixable)");
+        }
         return -2;
       }
     }
     
     public synchronized void print(String message) {
-    	if (HBaseFsck.json != null) return;
+      if (HBaseFsck.json != null) return;
       if (!summary) {
         System.out.println(message);
       }
     }
 
     public synchronized void detail(String message) {
-    	if (details) {
-      	if (HBaseFsck.json == null){
-      		System.out.println(message);
-      	}
+      if (details) {
+        if (HBaseFsck.json == null){
+          System.out.println(message);
+        }
       }
       showProgress = 0;
     }
@@ -1108,9 +1108,9 @@ public class HBaseFsck {
     public synchronized void progress() {
       if (showProgress++ == 10) {
         if (!summary) {
-        	if (HBaseFsck.json == null) {
-        		System.out.print(".");
-        	}
+          if (HBaseFsck.json == null) {
+            System.out.print(".");
+          }
         }
         showProgress = 0;
       }
@@ -1256,20 +1256,20 @@ public class HBaseFsck {
     }
   }
 
-	private void WriteShortTableSummaries() {
-		JSONObject ret = new JSONObject();
-		JSONArray arr = new JSONArray();
-		try {
-			int i = 0;
-			for (Entry<String, TInfo> entry : tablesInfo.entrySet()) {
-				arr.put(i++, entry.getValue().toJSONObject());
-			}
-			ret.put("Summaries", arr);
-		} catch (JSONException ex) {
-			LOG.error("Problem creating the Summaries JSON");
-		}
-		System.out.println(ret.toString());
-	}
+  private void WriteShortTableSummaries() {
+    JSONObject ret = new JSONObject();
+    JSONArray arr = new JSONArray();
+    try {
+      int i = 0;
+      for (Entry<String, TInfo> entry : tablesInfo.entrySet()) {
+        arr.put(i++, entry.getValue().toJSONObject());
+      }
+      ret.put("Summaries", arr);
+    } catch (JSONException ex) {
+      LOG.error("Problem creating the Summaries JSON");
+    }
+    System.out.println(ret.toString());
+  }
 
   /**
    * Display the full report from fsck.
@@ -1334,8 +1334,8 @@ public class HBaseFsck {
    * @param summaryFileName - the file name
    */
   private void setJsonFlag(String jsonFlag) {
-		HBaseFsck.json = jsonFlag;
-	}
+    HBaseFsck.json = jsonFlag;
+  }
 
   /**
    * Main program
@@ -1372,12 +1372,12 @@ public class HBaseFsck {
     }
     
     if (cmd.hasOption("json")) {
-    	Logger.getLogger("org.apache.zookeeper").setLevel(Level.OFF);
-    	Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.OFF);
-    	Logger.getLogger("org.apache.hadoop.fs.FileSystem").setLevel(Level.OFF);
-    	Logger.getLogger("org.apache.hadoop.fs").setLevel(Level.OFF);
-    	Logger.getLogger("org.apache.hadoop.dfs").setLevel(Level.OFF);
-    	Logger.getLogger("org.apache.hadoop.hdfs").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.zookeeper").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.hadoop.fs.FileSystem").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.hadoop.fs").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.hadoop.dfs").setLevel(Level.OFF);
+      Logger.getLogger("org.apache.hadoop.hdfs").setLevel(Level.OFF);
     }
 
     Configuration conf = HBaseConfiguration.create();
@@ -1427,14 +1427,14 @@ public class HBaseFsck {
     if (cmd.hasOption("y")) {
       fsck.setPromptResponse(true);
     }
-    if (cmd.equals("summary")) {
+    if (cmd.hasOption("summary")) {
         fsck.setSummary();
     }
     if (cmd.hasOption("checkRegionInfo")) {
       checkRegionInfo = true;
     }
     if (cmd.hasOption("json")) {
-    	fsck.setJsonFlag("json");
+      fsck.setJsonFlag("json");
     }
     int code = -1;
     try {
@@ -1457,7 +1457,7 @@ public class HBaseFsck {
       code = -1;
     }
     if (cmd.hasOption("json")) {
-    	fsck.WriteShortTableSummaries();
+      fsck.WriteShortTableSummaries();
     }
 
     Runtime.getRuntime().exit(code);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
Thu Aug  1 18:18:26 2013
@@ -138,18 +138,17 @@ public class RowMutations extends Operat
   @Override
   public Map<String, Object> getFingerprint() {
     Map<String, Object> map = new HashMap<String, Object>();
-    List<String> mutationsList = new ArrayList<String>();
     // ideally, we would also include table information, but that information
     // is not stored in each Operation instance.
     map.put("row", Bytes.toStringBinary(this.row));
     int deleteCnt = 0, putCnt = 0;
     for (Mutation mod: this.mutations) {
-    	if (mod instanceof Put) {
-    		putCnt++;
-    	}
-    	else {
-    		deleteCnt++;
-    	}
+      if (mod instanceof Put) {
+        putCnt++;
+      }
+      else {
+        deleteCnt++;
+      }
     }
     map.put("num-put", putCnt);
     map.put("num-delete", deleteCnt);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java
Thu Aug  1 18:18:26 2013
@@ -67,7 +67,7 @@ public abstract class HBaseEventHandler 
   protected RegionTransitionEventData hbEventData;
 
   // listeners that are called before and after an event is processed
-  protected static List<HBaseEventHandlerListener> eventHandlerListeners = 
+  protected static final List<HBaseEventHandlerListener> eventHandlerListeners =
     Collections.synchronizedList(new ArrayList<HBaseEventHandlerListener>());  
 
   /**

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java?rev=1509363&r1=1509362&r2=1509363&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java Thu Aug 
1 18:18:26 2013
@@ -115,13 +115,13 @@ public class Bytes {
   /**
    * Pass this to TreeMaps where byte [] are keys.
    */
-  public static Comparator<byte []> BYTES_COMPARATOR =
+  public static final Comparator<byte []> BYTES_COMPARATOR =
     new ByteArrayComparator();
 
   /**
    * Use comparing byte arrays, byte-by-byte
    */
-  public static RawComparator<byte []> BYTES_RAWCOMPARATOR =
+  public static final RawComparator<byte []> BYTES_RAWCOMPARATOR =
     new ByteArrayComparator();
 
   public static final Comparator<ByteBuffer> BYTE_BUFFER_COMPARATOR =



Mime
View raw message