hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1501908 [3/4] - in /hbase/branches/0.95: hbase-client/ hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/...
Date Wed, 10 Jul 2013 18:49:36 GMT
Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Wed Jul 10 18:49:33 2013
@@ -1145,11 +1145,11 @@ public class AssignmentManager extends Z
   /**
    * A specific runnable that works only on a region.
    */
-  private interface RegionRunnable extends Runnable{
+  private static interface RegionRunnable extends Runnable{
     /**
      * @return - the name of the region it works on.
      */
-    String getRegionName();
+    public String getRegionName();
   }
 
   /**

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java Wed Jul 10 18:49:33 2013
@@ -220,14 +220,14 @@ public class ClusterStatusPublisher exte
   }
 
 
-  public interface Publisher extends Closeable {
+  public static interface Publisher extends Closeable {
 
-    void connect(Configuration conf) throws IOException;
+    public void connect(Configuration conf) throws IOException;
 
-    void publish(ClusterStatus cs);
+    public void publish(ClusterStatus cs);
 
     @Override
-    void close();
+    public void close();
   }
 
   public static class MulticastPublisher implements Publisher {

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java Wed Jul 10 18:49:33 2013
@@ -50,21 +50,21 @@ public interface LoadBalancer extends Co
    * Set the current cluster status.  This allows a LoadBalancer to map host name to a server
    * @param st
    */
-  void setClusterStatus(ClusterStatus st);
+  public void setClusterStatus(ClusterStatus st);
 
 
   /**
    * Set the master service.
    * @param masterServices
    */
-  void setMasterServices(MasterServices masterServices);
+  public void setMasterServices(MasterServices masterServices);
 
   /**
    * Perform the major balance operation
    * @param clusterState
    * @return List of plans
    */
-  List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState);
+  public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState);
 
   /**
    * Perform a Round Robin assignment of regions.
@@ -72,10 +72,7 @@ public interface LoadBalancer extends Co
    * @param servers
    * @return Map of servername to regioninfos
    */
-  Map<ServerName, List<HRegionInfo>> roundRobinAssignment(
-    List<HRegionInfo> regions,
-    List<ServerName> servers
-  );
+  public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, List<ServerName> servers);
 
   /**
    * Assign regions to the previously hosting region server
@@ -83,10 +80,7 @@ public interface LoadBalancer extends Co
    * @param servers
    * @return List of plans
    */
-  Map<ServerName, List<HRegionInfo>> retainAssignment(
-    Map<HRegionInfo, ServerName> regions,
-    List<ServerName> servers
-  );
+  public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions, List<ServerName> servers);
 
   /**
    * Sync assign a region
@@ -94,10 +88,7 @@ public interface LoadBalancer extends Co
    * @param servers
     * @return Map regioninfos to servernames
    */
-  Map<HRegionInfo, ServerName> immediateAssignment(
-    List<HRegionInfo> regions,
-    List<ServerName> servers
-  );
+  public Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions, List<ServerName> servers);
 
   /**
    * Get a random region server from the list
@@ -105,7 +96,6 @@ public interface LoadBalancer extends Co
    * @param servers
    * @return Servername
    */
-  ServerName randomAssignment(
-    HRegionInfo regionInfo, List<ServerName> servers
-  );
+  public ServerName randomAssignment(HRegionInfo regionInfo, 
+		  List<ServerName> servers);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Wed Jul 10 18:49:33 2013
@@ -40,32 +40,32 @@ public interface MasterServices extends 
   /**
    * @return Master's instance of the {@link AssignmentManager}
    */
-  AssignmentManager getAssignmentManager();
+  public AssignmentManager getAssignmentManager();
 
   /**
    * @return Master's filesystem {@link MasterFileSystem} utility class.
    */
-  MasterFileSystem getMasterFileSystem();
+  public MasterFileSystem getMasterFileSystem();
 
   /**
    * @return Master's {@link ServerManager} instance.
    */
-  ServerManager getServerManager();
+  public ServerManager getServerManager();
 
   /**
    * @return Master's instance of {@link ExecutorService}
    */
-  ExecutorService getExecutorService();
+  public ExecutorService getExecutorService();
 
   /**
    * @return Master's instance of {@link TableLockManager}
    */
-  TableLockManager getTableLockManager();
+  public TableLockManager getTableLockManager();
 
   /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
-  MasterCoprocessorHost getCoprocessorHost();
+  public MasterCoprocessorHost getCoprocessorHost();
 
   /**
    * Check table is modifiable; i.e. exists and is offline.
@@ -75,7 +75,7 @@ public interface MasterServices extends 
    * @throws IOException
    */
   // We actually throw the exceptions mentioned in the
-  void checkTableModifiable(final byte[] tableName)
+  public void checkTableModifiable(final byte [] tableName)
       throws IOException, TableNotFoundException, TableNotDisabledException;
 
   /**
@@ -84,7 +84,7 @@ public interface MasterServices extends 
    * @param splitKeys Starting row keys for the initial table regions.  If null
    *     a single region is created.
    */
-  void createTable(HTableDescriptor desc, byte[][] splitKeys)
+  public void createTable(HTableDescriptor desc, byte [][] splitKeys)
       throws IOException;
 
   /**
@@ -92,7 +92,7 @@ public interface MasterServices extends 
    * @param tableName The table name
    * @throws IOException
    */
-  void deleteTable(final byte[] tableName) throws IOException;
+  public void deleteTable(final byte[] tableName) throws IOException;
 
   /**
    * Modify the descriptor of an existing table
@@ -100,7 +100,7 @@ public interface MasterServices extends 
    * @param descriptor The updated table descriptor
    * @throws IOException
    */
-  void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
+  public void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
       throws IOException;
 
   /**
@@ -108,14 +108,14 @@ public interface MasterServices extends 
    * @param tableName The table name
    * @throws IOException
    */
-  void enableTable(final byte[] tableName) throws IOException;
+  public void enableTable(final byte[] tableName) throws IOException;
 
   /**
    * Disable an existing table
    * @param tableName The table name
    * @throws IOException
    */
-  void disableTable(final byte[] tableName) throws IOException;
+  public void disableTable(final byte[] tableName) throws IOException;
 
   /**
    * Add a new column to an existing table
@@ -123,7 +123,7 @@ public interface MasterServices extends 
    * @param column The column definition
    * @throws IOException
    */
-  void addColumn(final byte[] tableName, final HColumnDescriptor column)
+  public void addColumn(final byte[] tableName, final HColumnDescriptor column)
       throws IOException;
 
   /**
@@ -132,7 +132,7 @@ public interface MasterServices extends 
    * @param descriptor The updated column definition
    * @throws IOException
    */
-  void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
+  public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
       throws IOException;
 
   /**
@@ -141,18 +141,18 @@ public interface MasterServices extends 
    * @param columnName The column name
    * @throws IOException
    */
-  void deleteColumn(final byte[] tableName, final byte[] columnName)
+  public void deleteColumn(final byte[] tableName, final byte[] columnName)
       throws IOException;
 
   /**
    * @return Return table descriptors implementation.
    */
-  TableDescriptors getTableDescriptors();
+  public TableDescriptors getTableDescriptors();
 
   /**
    * @return true if master enables ServerShutdownHandler;
    */
-  boolean isServerShutdownHandlerEnabled();
+  public boolean isServerShutdownHandlerEnabled();
 
   /**
    * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint.
@@ -167,7 +167,7 @@ public interface MasterServices extends 
    * @return {@code true} if the registration was successful, {@code false}
    * otherwise
    */
-  boolean registerService(Service instance);
+  public boolean registerService(Service instance);
 
   /**
    * Merge two regions. The real implementation is on the regionserver, master
@@ -178,13 +178,12 @@ public interface MasterServices extends 
    *          two adjacent regions
    * @throws IOException
    */
-  void dispatchMergingRegions(
-    final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible
-  ) throws IOException;
+  public void dispatchMergingRegions(final HRegionInfo region_a,
+      final HRegionInfo region_b, final boolean forcible) throws IOException;
 
   /**
    * @return true if master is initialized
    */
-  boolean isInitialized();
+  public boolean isInitialized();
 
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java Wed Jul 10 18:49:33 2013
@@ -34,30 +34,30 @@ public interface SnapshotSentinel {
    * @return <tt>false</tt> if the snapshot is still in progress, <tt>true</tt> if the snapshot has
    *         finished
    */
-  boolean isFinished();
+  public boolean isFinished();
 
   /**
    * @return -1 if the snapshot is in progress, otherwise the completion timestamp.
    */
-  long getCompletionTimestamp();
+  public long getCompletionTimestamp();
 
   /**
    * Actively cancel a running snapshot.
    * @param why Reason for cancellation.
    */
-  void cancel(String why);
+  public void cancel(String why);
 
   /**
    * @return the description of the snapshot being run
    */
-  SnapshotDescription getSnapshot();
+  public SnapshotDescription getSnapshot();
 
   /**
    * Get the exception that caused the snapshot to fail, if the snapshot has failed.
    * @return {@link ForeignException} that caused the snapshot to fail, or <tt>null</tt> if the
    *  snapshot is still in progress or has succeeded
    */
-  ForeignException getExceptionIfFailed();
+  public ForeignException getExceptionIfFailed();
 
   /**
    * Rethrow the exception returned by {@link SnapshotSentinel#getExceptionIfFailed}.
@@ -65,5 +65,5 @@ public interface SnapshotSentinel {
    *
    * @throws ForeignException all exceptions from remote sources are procedure exceptions
    */
-  void rethrowExceptionIfFailed() throws ForeignException;
+  public void rethrowExceptionIfFailed() throws ForeignException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Wed Jul 10 18:49:33 2013
@@ -1593,11 +1593,11 @@ public class SplitLogManager extends Zoo
    * a serialization point at the end of the task processing. Must be
    * restartable and idempotent.
    */
-  public interface TaskFinisher {
+  static public interface TaskFinisher {
     /**
      * status that can be returned finish()
      */
-    enum Status {
+    static public enum Status {
       /**
        * task completed successfully
        */
@@ -1616,7 +1616,7 @@ public class SplitLogManager extends Zoo
      * @param taskname
      * @return DONE if task completed successfully, ERR otherwise
      */
-    Status finish(ServerName workerName, String taskname);
+    public Status finish(ServerName workerName, String taskname);
   }
 
   enum ResubmitDirective {

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java Wed Jul 10 18:49:33 2013
@@ -82,20 +82,20 @@ public abstract class TableLockManager {
    * A distributed lock for a table.
    */
   @InterfaceAudience.Private
-  public interface TableLock {
+  public static interface TableLock {
     /**
      * Acquire the lock, with the configured lock timeout.
      * @throws LockTimeoutException If unable to acquire a lock within a specified
      * time period (if any)
      * @throws IOException If unrecoverable error occurs
      */
-    void acquire() throws IOException;
+    public void acquire() throws IOException;
 
     /**
      * Release the lock already held.
      * @throws IOException If there is an unrecoverable error releasing the lock
      */
-    void release() throws IOException;
+    public void release() throws IOException;
   }
 
   /**

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java Wed Jul 10 18:49:33 2013
@@ -33,5 +33,5 @@ public interface TotesHRegionInfo {
   /**
    * @return HRegionInfo instance.
    */
-  HRegionInfo getHRegionInfo();
+  public HRegionInfo getHRegionInfo();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java Wed Jul 10 18:49:33 2013
@@ -29,16 +29,16 @@ import com.google.protobuf.Message;
  */
 @InterfaceAudience.Private
 public interface MonitoredRPCHandler extends MonitoredTask {
-  String getRPC();
-  String getRPC(boolean withParams);
-  long getRPCPacketLength();
-  String getClient();
-  long getRPCStartTime();
-  long getRPCQueueTime();
-  boolean isRPCRunning();
-  boolean isOperationRunning();
+  public abstract String getRPC();
+  public abstract String getRPC(boolean withParams);
+  public abstract long getRPCPacketLength();
+  public abstract String getClient();
+  public abstract long getRPCStartTime();
+  public abstract long getRPCQueueTime();
+  public abstract boolean isRPCRunning();
+  public abstract boolean isOperationRunning();
 
-  void setRPC(String methodName, Object[] params, long queueTime);
-  void setRPCPacket(Message param);
-  void setConnection(String clientAddress, int remotePort);
+  public abstract void setRPC(String methodName, Object [] params, long queueTime);
+  public abstract void setRPCPacket(Message param);
+  public abstract void setConnection(String clientAddress, int remotePort);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java Wed Jul 10 18:49:33 2013
@@ -32,47 +32,47 @@ public interface MonitoredTask extends C
     ABORTED;
   }
 
-  long getStartTime();
-  String getDescription();
-  String getStatus();
-  long getStatusTime();
-  State getState();
-  long getStateTime();
-  long getCompletionTimestamp();
-
-  void markComplete(String msg);
-  void pause(String msg);
-  void resume(String msg);
-  void abort(String msg);
-  void expireNow();
+  public abstract long getStartTime();
+  public abstract String getDescription();
+  public abstract String getStatus();
+  public abstract long getStatusTime();
+  public abstract State getState();
+  public abstract long getStateTime();
+  public abstract long getCompletionTimestamp();
+
+  public abstract void markComplete(String msg);
+  public abstract void pause(String msg);
+  public abstract void resume(String msg);
+  public abstract void abort(String msg);
+  public abstract void expireNow();
 
-  void setStatus(String status);
-  void setDescription(String description);
+  public abstract void setStatus(String status);
+  public abstract void setDescription(String description);
 
   /**
    * Explicitly mark this status as able to be cleaned up,
    * even though it might not be complete.
    */
-  void cleanup();
+  public abstract void cleanup();
 
   /**
    * Public exposure of Object.clone() in order to allow clients to easily 
    * capture current state.
    * @return a copy of the object whose references will not change
    */
-  MonitoredTask clone();
+  public abstract MonitoredTask clone();
 
   /**
    * Creates a string map of internal details for extensible exposure of 
    * monitored tasks.
    * @return A Map containing information for this task.
    */
-  Map<String, Object> toMap() throws IOException;
+  public abstract Map<String, Object> toMap() throws IOException;
 
   /**
    * Creates a JSON object for parseable exposure of monitored tasks.
    * @return An encoded JSON object containing information for this task.
    */
-  String toJSON() throws IOException;
+  public abstract String toJSON() throws IOException;
 
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java Wed Jul 10 18:49:33 2013
@@ -39,7 +39,7 @@ public interface ProcedureCoordinatorRpc
    * @param listener
    * @return true if succeed, false if encountered initialization errors.
    */
-  boolean start(final ProcedureCoordinator listener);
+  public boolean start(final ProcedureCoordinator listener);
 
   /**
    * Notify the members that the coordinator has aborted the procedure and that it should release
@@ -50,7 +50,7 @@ public interface ProcedureCoordinatorRpc
    * @throws IOException if the rpcs can't reach the other members of the procedure (and can't
    *           recover).
    */
-  void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException;
+  public void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException;
 
   /**
    * Notify the members to acquire barrier for the procedure
@@ -61,7 +61,7 @@ public interface ProcedureCoordinatorRpc
    * @throws IllegalArgumentException if the procedure was already marked as failed
    * @throws IOException if we can't reach the remote notification mechanism
    */
-  void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List<String> members)
+  public void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List<String> members)
       throws IOException, IllegalArgumentException;
 
   /**
@@ -74,12 +74,12 @@ public interface ProcedureCoordinatorRpc
    * @param members members to tell we have reached in-barrier phase
    * @throws IOException if we can't reach the remote notification mechanism
    */
-  void sendGlobalBarrierReached(Procedure procName, List<String> members) throws IOException;
+  public void sendGlobalBarrierReached(Procedure procName, List<String> members) throws IOException;
 
   /**
    * Notify Members to reset the distributed state for procedure
    * @param procName name of the procedure to reset
    * @throws IOException if the remote notification mechanism cannot be reached
    */
-  void resetMembers(Procedure procName) throws IOException;
+  public void resetMembers(Procedure procName) throws IOException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java Wed Jul 10 18:49:33 2013
@@ -35,13 +35,13 @@ public interface ProcedureMemberRpcs ext
   /**
    * Initialize and start any threads or connections the member needs.
    */
-  void start(final String memberName, final ProcedureMember member);
+  public void start(final String memberName, final ProcedureMember member);
 
   /**
    * Each subprocedure is being executed on a member.  This is the identifier for the member.
    * @return the member name
    */
-  String getMemberName();
+  public String getMemberName();
 
   /**
    * Notify the coordinator that we aborted the specified {@link Subprocedure}
@@ -51,7 +51,7 @@ public interface ProcedureMemberRpcs ext
    * @throws IOException thrown when the rpcs can't reach the other members of the procedure (and
    *  thus can't recover).
    */
-  void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException;
+  public void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException;
 
   /**
    * Notify the coordinator that the specified {@link Subprocedure} has acquired the locally required
@@ -60,7 +60,7 @@ public interface ProcedureMemberRpcs ext
    * @param sub the specified {@link Subprocedure}
    * @throws IOException if we can't reach the coordinator
    */
-  void sendMemberAcquired(Subprocedure sub) throws IOException;
+  public void sendMemberAcquired(Subprocedure sub) throws IOException;
 
   /**
    * Notify the coordinator that the specified {@link Subprocedure} has completed the work that
@@ -69,5 +69,5 @@ public interface ProcedureMemberRpcs ext
    * @param sub the specified {@link Subprocedure}
    * @throws IOException if we can't reach the coordinator
    */
-  void sendMemberCompleted(Subprocedure sub) throws IOException;
-}
+  public void sendMemberCompleted(Subprocedure sub) throws IOException;
+}
\ No newline at end of file

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java Wed Jul 10 18:49:33 2013
@@ -36,5 +36,5 @@ public interface SubprocedureFactory {
    *           request
    * @throws IllegalStateException if the current runner cannot accept any more new requests
    */
-  Subprocedure buildSubprocedure(String procName, byte[] procArgs);
+  public Subprocedure buildSubprocedure(String procName, byte[] procArgs);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java Wed Jul 10 18:49:33 2013
@@ -88,14 +88,13 @@ public interface ColumnTracker {
    *
    * @return null, or a ColumnCount that we should seek to
    */
-  ColumnCount getColumnHint();
+  public ColumnCount getColumnHint();
 
   /**
    * Retrieve the MatchCode for the next row or column
    */
-  MatchCode getNextRowOrNextColumn(
-    byte[] bytes, int offset, int qualLength
-  );
+  public MatchCode getNextRowOrNextColumn(byte[] bytes, int offset,
+      int qualLength);
 
   /**
    * Give the tracker a chance to declare it's done based on only the timestamp
@@ -104,5 +103,5 @@ public interface ColumnTracker {
    * @param timestamp
    * @return <code>true</code> to early out based on timestamp.
    */
-  boolean isDone(long timestamp);
+  public boolean isDone(long timestamp);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java Wed Jul 10 18:49:33 2013
@@ -34,7 +34,7 @@ public interface CompactionRequestor {
    *         compactions were started
    * @throws IOException
    */
-  List<CompactionRequest> requestCompaction(final HRegion r, final String why)
+  public List<CompactionRequest> requestCompaction(final HRegion r, final String why)
       throws IOException;
 
   /**
@@ -47,9 +47,8 @@ public interface CompactionRequestor {
    *         compactions were started
    * @throws IOException
    */
-  List<CompactionRequest> requestCompaction(
-    final HRegion r, final String why, List<Pair<CompactionRequest, Store>> requests
-  )
+  public List<CompactionRequest> requestCompaction(final HRegion r, final String why,
+      List<Pair<CompactionRequest, Store>> requests)
       throws IOException;
 
   /**
@@ -61,9 +60,8 @@ public interface CompactionRequestor {
    * @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started.
    * @throws IOException
    */
-  CompactionRequest requestCompaction(
-    final HRegion r, final Store s, final String why, CompactionRequest request
-  ) throws IOException;
+  public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
+      CompactionRequest request) throws IOException;
 
   /**
    * @param r Region to compact
@@ -76,9 +74,8 @@ public interface CompactionRequestor {
    *         compactions were started.
    * @throws IOException
    */
-  List<CompactionRequest> requestCompaction(
-    final HRegion r, final String why, int pri, List<Pair<CompactionRequest, Store>> requests
-  ) throws IOException;
+  public List<CompactionRequest> requestCompaction(final HRegion r, final String why, int pri,
+      List<Pair<CompactionRequest, Store>> requests) throws IOException;
 
   /**
    * @param r Region to compact
@@ -90,7 +87,6 @@ public interface CompactionRequestor {
    * @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started
    * @throws IOException
    */
-  CompactionRequest requestCompaction(
-    final HRegion r, final Store s, final String why, int pri, CompactionRequest request
-  ) throws IOException;
+  public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
+      int pri, CompactionRequest request) throws IOException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java Wed Jul 10 18:49:33 2013
@@ -43,9 +43,8 @@ public interface DeleteTracker {
    * @param timestamp timestamp
    * @param type delete type as byte
    */
-  void add(
-    byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp, byte type
-  );
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type);
 
   /**
    * Check if the specified KeyValue buffer has been deleted by a previously
@@ -56,14 +55,13 @@ public interface DeleteTracker {
    * @param timestamp timestamp
    * @return deleteResult The result tells whether the KeyValue is deleted and why
    */
-  DeleteResult isDeleted(
-    byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp
-  );
+  public DeleteResult isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp);
 
   /**
    * @return true if there are no current delete, false otherwise
    */
-  boolean isEmpty();
+  public boolean isEmpty();
 
   /**
    * Called at the end of every StoreFile.
@@ -71,14 +69,14 @@ public interface DeleteTracker {
    * Many optimized implementations of Trackers will require an update at
    * when the end of each StoreFile is reached.
    */
-  void update();
+  public void update();
 
   /**
    * Called between rows.
    * <p>
    * This clears everything as if a new DeleteTracker was instantiated.
    */
-  void reset();
+  public void reset();
 
 
   /**
@@ -104,7 +102,7 @@ public interface DeleteTracker {
    * Based on the delete result, the ScanQueryMatcher will decide the next
    * operation
    */
-  enum DeleteResult {
+  public static enum DeleteResult {
     FAMILY_DELETED, // The KeyValue is deleted by a delete family.
     FAMILY_VERSION_DELETED, // The KeyValue is deleted by a delete family version.
     COLUMN_DELETED, // The KeyValue is deleted by a delete column.

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Wed Jul 10 18:49:33 2013
@@ -5618,7 +5618,7 @@ public class HRegion implements HeapSize
    * bulkLoadHFile() to perform any necessary
    * pre/post processing of a given bulkload call
    */
-  public interface BulkLoadListener {
+  public static interface BulkLoadListener {
 
     /**
      * Called before an HFile is actually loaded

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java Wed Jul 10 18:49:33 2013
@@ -47,7 +47,7 @@ public interface InternalScanner extends
    * @return true if more rows exist after this one, false if scanner is done
    * @throws IOException e
    */
-  boolean next(List<KeyValue> results) throws IOException;
+  public boolean next(List<KeyValue> results) throws IOException;
 
   /**
    * Grab the next row's worth of values with a limit on the number of values
@@ -57,11 +57,11 @@ public interface InternalScanner extends
    * @return true if more rows exist after this one, false if scanner is done
    * @throws IOException e
    */
-  boolean next(List<KeyValue> result, int limit) throws IOException;
+  public boolean next(List<KeyValue> result, int limit) throws IOException;
 
   /**
    * Closes the scanner and releases any resources it has allocated
    * @throws IOException
    */
-  void close() throws IOException;
+  public void close() throws IOException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java Wed Jul 10 18:49:33 2013
@@ -34,20 +34,20 @@ public interface KeyValueScanner {
    * Look at the next KeyValue in this scanner, but do not iterate scanner.
    * @return the next KeyValue
    */
-  KeyValue peek();
+  public KeyValue peek();
 
   /**
    * Return the next KeyValue in this scanner, iterating the scanner
    * @return the next KeyValue
    */
-  KeyValue next() throws IOException;
+  public KeyValue next() throws IOException;
 
   /**
    * Seek the scanner at or after the specified KeyValue.
    * @param key seek value
    * @return true if scanner has values left, false if end of scanner
    */
-  boolean seek(KeyValue key) throws IOException;
+  public boolean seek(KeyValue key) throws IOException;
 
   /**
    * Reseek the scanner at or after the specified KeyValue.
@@ -57,7 +57,7 @@ public interface KeyValueScanner {
    * @param key seek value (should be non-null)
    * @return true if scanner has values left, false if end of scanner
    */
-  boolean reseek(KeyValue key) throws IOException;
+  public boolean reseek(KeyValue key) throws IOException;
 
   /**
    * Get the sequence id associated with this KeyValueScanner. This is required
@@ -65,12 +65,12 @@ public interface KeyValueScanner {
    * The default implementation for this would be to return 0. A file having
    * lower sequence id will be considered to be the older one.
    */
-  long getSequenceID();
+  public long getSequenceID();
 
   /**
    * Close the KeyValue scanner.
    */
-  void close();
+  public void close();
 
   /**
    * Allows to filter out scanners (both StoreFile and memstore) that we don't
@@ -82,9 +82,8 @@ public interface KeyValueScanner {
    *          this query, based on TTL
    * @return true if the scanner should be included in the query
    */
-  boolean shouldUseScanner(
-    Scan scan, SortedSet<byte[]> columns, long oldestUnexpiredTS
-  );
+  public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
+      long oldestUnexpiredTS);
 
   // "Lazy scanner" optimizations
 
@@ -98,7 +97,7 @@ public interface KeyValueScanner {
    * @param forward do a forward-only "reseek" instead of a random-access seek
    * @param useBloom whether to enable multi-column Bloom filter optimization
    */
-  boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom)
+  public boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom)
       throws IOException;
 
   /**
@@ -107,7 +106,7 @@ public interface KeyValueScanner {
    * store scanner bubbles up to the top of the key-value heap. This method is
    * then used to ensure the top store file scanner has done a seek operation.
    */
-  boolean realSeekDone();
+  public boolean realSeekDone();
 
   /**
    * Does the real seek operation in case it was skipped by
@@ -116,11 +115,11 @@ public interface KeyValueScanner {
    * of the scanners). The easiest way to achieve this is to call
    * {@link #realSeekDone()} first.
    */
-  void enforceSeek() throws IOException;
+  public void enforceSeek() throws IOException;
 
   /**
    * @return true if this is a file scanner. Otherwise a memory scanner is
    *         assumed.
    */
-  boolean isFileScanner();
+  public boolean isFileScanner();  
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java Wed Jul 10 18:49:33 2013
@@ -29,5 +29,5 @@ public interface LastSequenceId {
    * @param regionName Encoded region name
    * @return Last flushed sequence Id for regionName or -1 if it can't be determined
    */
-  long getLastSequenceId(byte[] regionName);
+  public long getLastSequenceId(byte[] regionName);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java Wed Jul 10 18:49:33 2013
@@ -32,5 +32,5 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 public interface LeaseListener {
   /** When a lease expires, this method is called. */
-  void leaseExpired();
+  public void leaseExpired();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java Wed Jul 10 18:49:33 2013
@@ -35,7 +35,7 @@ interface OnlineRegions extends Server {
    * Add to online regions.
    * @param r
    */
-  void addToOnlineRegions(final HRegion r);
+  public void addToOnlineRegions(final HRegion r);
 
   /**
    * This method removes HRegion corresponding to hri from the Map of onlineRegions.
@@ -44,7 +44,7 @@ interface OnlineRegions extends Server {
    * @param destination Destination, if any, null otherwise.
    * @return True if we removed a region from online list.
    */
-  boolean removeFromOnlineRegions(final HRegion r, ServerName destination);
+  public boolean removeFromOnlineRegions(final HRegion r, ServerName destination);
 
   /**
    * Return {@link HRegion} instance.
@@ -54,7 +54,7 @@ interface OnlineRegions extends Server {
    * @return HRegion for the passed encoded <code>encodedRegionName</code> or
    * null if named region is not member of the online regions.
    */
-  HRegion getFromOnlineRegions(String encodedRegionName);
+  public HRegion getFromOnlineRegions(String encodedRegionName);
 
    /**
     * Get all online regions of a table in this RS.
@@ -62,5 +62,5 @@ interface OnlineRegions extends Server {
     * @return List of HRegion
     * @throws java.io.IOException
     */
-   List<HRegion> getOnlineRegions(byte[] tableName) throws IOException;
+   public List<HRegion> getOnlineRegions(byte[] tableName) throws IOException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java Wed Jul 10 18:49:33 2013
@@ -34,13 +34,13 @@ public interface RegionScanner extends I
   /**
    * @return The RegionInfo for this scanner.
    */
-  HRegionInfo getRegionInfo();
+  public HRegionInfo getRegionInfo();
 
   /**
    * @return True if a filter indicates that this scanner will return no further rows.
    * @throws IOException in case of I/O failure on a filter.
    */
-  boolean isFilterDone() throws IOException;
+  public boolean isFilterDone() throws IOException;
 
   /**
    * Do a reseek to the required row. Should not be used to seek to a key which
@@ -52,17 +52,17 @@ public interface RegionScanner extends I
    *           if row is null
    *
    */
-  boolean reseek(byte[] row) throws IOException;
+  public boolean reseek(byte[] row) throws IOException;
 
   /**
    * @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)}
    */
-  long getMaxResultSize();
+  public long getMaxResultSize();
 
   /**
    * @return The Scanner's MVCC readPt see {@link MultiVersionConsistencyControl}
    */
-  long getMvccReadPoint();
+  public long getMvccReadPoint();
 
   /**
    * Grab the next row's worth of values with the default limit on the number of values
@@ -74,7 +74,7 @@ public interface RegionScanner extends I
    * @return true if more rows exist after this one, false if scanner is done
    * @throws IOException e
    */
-  boolean nextRaw(List<KeyValue> result) throws IOException;
+  public boolean nextRaw(List<KeyValue> result) throws IOException;
 
   /**
    * Grab the next row's worth of values with a limit on the number of values
@@ -102,5 +102,5 @@ public interface RegionScanner extends I
    * @return true if more rows exist after this one, false if scanner is done
    * @throws IOException e
    */
-  boolean nextRaw(List<KeyValue> result, int limit) throws IOException;
+  public boolean nextRaw(List<KeyValue> result, int limit) throws IOException;
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java Wed Jul 10 18:49:33 2013
@@ -40,31 +40,31 @@ public interface RegionServerServices ex
   /**
    * @return True if this regionserver is stopping.
    */
-  boolean isStopping();
+  public boolean isStopping();
 
   /** @return the HLog for a particular region. Pass null for getting the
    * default (common) WAL */
-  HLog getWAL(HRegionInfo regionInfo) throws IOException;
+  public HLog getWAL(HRegionInfo regionInfo) throws IOException;
 
   /**
    * @return Implementation of {@link CompactionRequestor} or null.
    */
-  CompactionRequestor getCompactionRequester();
+  public CompactionRequestor getCompactionRequester();
 
   /**
    * @return Implementation of {@link FlushRequester} or null.
    */
-  FlushRequester getFlushRequester();
+  public FlushRequester getFlushRequester();
 
   /**
    * @return the RegionServerAccounting for this Region Server
    */
-  RegionServerAccounting getRegionServerAccounting();
+  public RegionServerAccounting getRegionServerAccounting();
 
   /**
    * @return RegionServer's instance of {@link TableLockManager}
    */
-  TableLockManager getTableLockManager();
+  public TableLockManager getTableLockManager();
 
   /**
    * Tasks to perform after region open to complete deploy of region on
@@ -75,42 +75,42 @@ public interface RegionServerServices ex
    * @throws KeeperException
    * @throws IOException
    */
-  void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
+  public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
   throws KeeperException, IOException;
 
   /**
    * Returns a reference to the region server's RPC server
    */
-  RpcServerInterface getRpcServer();
+  public RpcServerInterface getRpcServer();
 
   /**
    * Get the regions that are currently being opened or closed in the RS
    * @return map of regions in transition in this RS
    */
-  ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
+  public ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
 
   /**
    * @return Return the FileSystem object used by the regionserver
    */
-  FileSystem getFileSystem();
+  public FileSystem getFileSystem();
 
   /**
    * @return The RegionServer's "Leases" service
    */
-  Leases getLeases();
+  public Leases getLeases();
 
   /**
    * @return hbase executor service
    */
-  ExecutorService getExecutorService();
+  public ExecutorService getExecutorService();
 
   /**
    * @return The RegionServer's CatalogTracker
    */
-  CatalogTracker getCatalogTracker();
+  public CatalogTracker getCatalogTracker();
 
   /**
    * @return set of recovering regions on the hosting region server
    */
-  Map<String, HRegion> getRecoveringRegions();
+  public Map<String, HRegion> getRecoveringRegions();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java Wed Jul 10 18:49:33 2013
@@ -38,18 +38,17 @@ public interface ReplicationService {
    * Initializes the replication service object.
    * @throws IOException
    */
-  void initialize(
-    Server rs, FileSystem fs, Path logdir, Path oldLogDir
-  ) throws IOException;
+  public void initialize(Server rs, FileSystem fs, Path logdir,
+                         Path oldLogDir) throws IOException;
 
   /**
    * Start replication services.
    * @throws IOException
    */
-  void startReplicationService() throws IOException;
+  public void startReplicationService() throws IOException;
 
   /**
    * Stops replication service.
    */
-  void stopReplicationService();
+  public void stopReplicationService();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java Wed Jul 10 18:49:33 2013
@@ -38,5 +38,5 @@ public interface ReplicationSinkService 
    * @param cells Cells that the WALEntries refer to (if cells is non-null)
    * @throws IOException
    */
-  void replicateLogEntries(List<WALEntry> entries, CellScanner cells) throws IOException;
-}
+  public void replicateLogEntries(List<WALEntry> entries, CellScanner cells) throws IOException;
+}
\ No newline at end of file

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java Wed Jul 10 18:49:33 2013
@@ -32,5 +32,5 @@ public interface ReplicationSourceServic
    * Returns a WALObserver for the service. This is needed to 
    * observe log rolls and log archival events.
    */
-  WALActionsListener getWALActionsListener();
-}
+  public WALActionsListener getWALActionsListener();
+}
\ No newline at end of file

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java Wed Jul 10 18:49:33 2013
@@ -640,13 +640,13 @@ public class SplitLogWorker extends ZooK
    * is better to have workers prepare the task and then have the
    * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
    */
-  public interface TaskExecutor {
-    enum Status {
+  static public interface TaskExecutor {
+    static public enum Status {
       DONE(),
       ERR(),
       RESIGNED(),
       PREEMPTED()
     }
-    Status exec(String name, CancelableProgressable p);
+    public Status exec(String name, CancelableProgressable p);
   }
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Jul 10 18:49:33 2013
@@ -50,13 +50,14 @@ public interface Store extends HeapSize,
 
   /* The default priority for user-specified compaction requests.
    * The user gets top priority unless we have blocking compactions. (Pri <= 0)
-   */ int PRIORITY_USER = 1;
-  int NO_PRIORITY = Integer.MIN_VALUE;
+   */
+  public static final int PRIORITY_USER = 1;
+  public static final int NO_PRIORITY = Integer.MIN_VALUE;
 
   // General Accessors
-  KeyValue.KVComparator getComparator();
+  public KeyValue.KVComparator getComparator();
 
-  Collection<StoreFile> getStorefiles();
+  public Collection<StoreFile> getStorefiles();
 
   /**
    * Close all the readers We don't need to worry about subsequent requests because the HRegion
@@ -64,7 +65,7 @@ public interface Store extends HeapSize,
    * @return the {@link StoreFile StoreFiles} that were previously being used.
    * @throws IOException on failure
    */
-  Collection<StoreFile> close() throws IOException;
+  public Collection<StoreFile> close() throws IOException;
 
   /**
    * Return a scanner for both the memstore and the HStore files. Assumes we are not in a
@@ -74,7 +75,7 @@ public interface Store extends HeapSize,
    * @return a scanner over the current key values
    * @throws IOException on failure
    */
-  KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols)
+  public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols)
       throws IOException;
 
   /**
@@ -88,16 +89,11 @@ public interface Store extends HeapSize,
    * @param stopRow
    * @return all scanners for this store
    */
-  List<KeyValueScanner> getScanners(
-    boolean cacheBlocks,
-    boolean isGet,
-    boolean isCompaction,
-    ScanQueryMatcher matcher,
-    byte[] startRow,
-    byte[] stopRow
-  ) throws IOException;
+  public List<KeyValueScanner> getScanners(boolean cacheBlocks,
+      boolean isGet, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
+      byte[] stopRow) throws IOException;
 
-  ScanInfo getScanInfo();
+  public ScanInfo getScanInfo();
 
   /**
    * Adds or replaces the specified KeyValues.
@@ -112,14 +108,14 @@ public interface Store extends HeapSize,
    * @return memstore size delta
    * @throws IOException
    */
-  long upsert(Iterable<? extends Cell> cells, long readpoint) throws IOException;
+  public long upsert(Iterable<? extends Cell> cells, long readpoint) throws IOException;
 
   /**
    * Adds a value to the memstore
    * @param kv
    * @return memstore size delta
    */
-  long add(KeyValue kv);
+  public long add(KeyValue kv);
 
   /**
    * When was the last edit done in the memstore
@@ -131,7 +127,7 @@ public interface Store extends HeapSize,
    * key & memstoreTS value of the kv parameter.
    * @param kv
    */
-  void rollback(final KeyValue kv);
+  public void rollback(final KeyValue kv);
 
   /**
    * Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
@@ -145,9 +141,9 @@ public interface Store extends HeapSize,
    * @return Found keyvalue or null if none found.
    * @throws IOException
    */
-  KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException;
+  public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException;
 
-  FileSystem getFileSystem();
+  public FileSystem getFileSystem();
 
   /*
    * @param maxKeyCount
@@ -156,48 +152,44 @@ public interface Store extends HeapSize,
    * @param includeMVCCReadpoint whether we should out the MVCC readpoint
    * @return Writer for a new StoreFile in the tmp dir.
    */
-  StoreFile.Writer createWriterInTmp(
-    long maxKeyCount,
-    Compression.Algorithm compression,
-    boolean isCompaction,
-    boolean includeMVCCReadpoint
-  ) throws IOException;
+  public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
+      boolean isCompaction, boolean includeMVCCReadpoint) throws IOException;
 
   // Compaction oriented methods
 
-  boolean throttleCompaction(long compactionSize);
+  public boolean throttleCompaction(long compactionSize);
 
   /**
    * getter for CompactionProgress object
    * @return CompactionProgress object; can be null
    */
-  CompactionProgress getCompactionProgress();
+  public CompactionProgress getCompactionProgress();
 
-  CompactionContext requestCompaction() throws IOException;
+  public CompactionContext requestCompaction() throws IOException;
 
-  CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
+  public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
       throws IOException;
 
-  void cancelRequestedCompaction(CompactionContext compaction);
+  public void cancelRequestedCompaction(CompactionContext compaction);
 
-  List<StoreFile> compact(CompactionContext compaction) throws IOException;
+  public List<StoreFile> compact(CompactionContext compaction) throws IOException;
 
   /**
    * @return true if we should run a major compaction.
    */
-  boolean isMajorCompaction() throws IOException;
+  public boolean isMajorCompaction() throws IOException;
 
-  void triggerMajorCompaction();
+  public void triggerMajorCompaction();
 
   /**
    * See if there's too much store files in this store
    * @return true if number of store files is greater than the number defined in minFilesToCompact
    */
-  boolean needsCompaction();
+  public boolean needsCompaction();
 
-  int getCompactPriority();
+  public int getCompactPriority();
 
-  StoreFlushContext createFlushContext(long cacheFlushId);
+  public StoreFlushContext createFlushContext(long cacheFlushId);
 
   /**
    * Call to complete a compaction. Its for the case where we find in the WAL a compaction
@@ -205,18 +197,18 @@ public interface Store extends HeapSize,
    * See HBASE-2331.
    * @param compaction
    */
-  void completeCompactionMarker(CompactionDescriptor compaction)
+  public void completeCompactionMarker(CompactionDescriptor compaction)
       throws IOException;
 
   // Split oriented methods
 
-  boolean canSplit();
+  public boolean canSplit();
 
   /**
    * Determines if Store should be split
    * @return byte[] if store should be split, null otherwise.
    */
-  byte[] getSplitPoint();
+  public byte[] getSplitPoint();
 
   // Bulk Load methods
 
@@ -224,7 +216,7 @@ public interface Store extends HeapSize,
    * This throws a WrongRegionException if the HFile does not fit in this region, or an
    * InvalidHFileException if the HFile is not valid.
    */
-  void assertBulkLoadHFileOk(Path srcPath) throws IOException;
+  public void assertBulkLoadHFileOk(Path srcPath) throws IOException;
 
   /**
    * This method should only be called from HRegion. It is assumed that the ranges of values in the
@@ -233,7 +225,7 @@ public interface Store extends HeapSize,
    * @param srcPathStr
    * @param sequenceId sequence Id associated with the HFile
    */
-  void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
+  public void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
 
   // General accessors into the state of the store
   // TODO abstract some of this out into a metrics class
@@ -241,50 +233,50 @@ public interface Store extends HeapSize,
   /**
    * @return <tt>true</tt> if the store has any underlying reference files to older HFiles
    */
-  boolean hasReferences();
+  public boolean hasReferences();
 
   /**
    * @return The size of this store's memstore, in bytes
    */
-  long getMemStoreSize();
+  public long getMemStoreSize();
 
-  HColumnDescriptor getFamily();
+  public HColumnDescriptor getFamily();
 
   /**
    * @return The maximum memstoreTS in all store files.
    */
-  long getMaxMemstoreTS();
+  public long getMaxMemstoreTS();
 
   /**
    * @return the data block encoder
    */
-  HFileDataBlockEncoder getDataBlockEncoder();
+  public HFileDataBlockEncoder getDataBlockEncoder();
 
   /** @return aggregate size of all HStores used in the last compaction */
-  long getLastCompactSize();
+  public long getLastCompactSize();
 
   /** @return aggregate size of HStore */
-  long getSize();
+  public long getSize();
 
   /**
    * @return Count of store files
    */
-  int getStorefilesCount();
+  public int getStorefilesCount();
 
   /**
    * @return The size of the store files, in bytes, uncompressed.
    */
-  long getStoreSizeUncompressed();
+  public long getStoreSizeUncompressed();
 
   /**
    * @return The size of the store files, in bytes.
    */
-  long getStorefilesSize();
+  public long getStorefilesSize();
 
   /**
    * @return The size of the store file indexes, in bytes.
    */
-  long getStorefilesIndexSize();
+  public long getStorefilesIndexSize();
 
   /**
    * Returns the total size of all index blocks in the data block indexes, including the root level,
@@ -292,14 +284,14 @@ public interface Store extends HeapSize,
    * single-level indexes.
    * @return the total size of block indexes in the store
    */
-  long getTotalStaticIndexSize();
+  public long getTotalStaticIndexSize();
 
   /**
    * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the
    * Bloom blocks currently not loaded into the block cache are counted.
    * @return the total size of all Bloom filters in the store
    */
-  long getTotalStaticBloomSize();
+  public long getTotalStaticBloomSize();
 
   // Test-helper methods
 
@@ -307,40 +299,40 @@ public interface Store extends HeapSize,
    * Used for tests.
    * @return cache configuration for this Store.
    */
-  CacheConfig getCacheConfig();
+  public CacheConfig getCacheConfig();
 
   /**
    * @return the parent region info hosting this store
    */
-  HRegionInfo getRegionInfo();
+  public HRegionInfo getRegionInfo();
 
-  RegionCoprocessorHost getCoprocessorHost();
+  public RegionCoprocessorHost getCoprocessorHost();
 
-  boolean areWritesEnabled();
+  public boolean areWritesEnabled();
 
   /**
    * @return The smallest mvcc readPoint across all the scanners in this
    * region. Writes older than this readPoint, are included  in every
    * read operation.
    */
-  long getSmallestReadPoint();
+  public long getSmallestReadPoint();
 
-  String getColumnFamilyName();
+  public String getColumnFamilyName();
 
-  String getTableName();
+  public String getTableName();
 
   /*
    * @param o Observer who wants to know about changes in set of Readers
    */
-  void addChangedReaderObserver(ChangedReadersObserver o);
+  public void addChangedReaderObserver(ChangedReadersObserver o);
 
   /*
    * @param o Observer no longer interested in changes in set of Readers.
    */
-  void deleteChangedReaderObserver(ChangedReadersObserver o);
+  public void deleteChangedReaderObserver(ChangedReadersObserver o);
 
   /**
    * @return Whether this store has too many store files.
    */
-  boolean hasTooManyStoreFiles();
+  public boolean hasTooManyStoreFiles();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java Wed Jul 10 18:49:33 2013
@@ -34,22 +34,22 @@ public interface StoreConfigInformation 
    * TODO: remove after HBASE-7252 is fixed.
    * @return Gets the Memstore flush size for the region that this store works with.
    */
-  long getMemstoreFlushSize();
+  public long getMemstoreFlushSize();
 
   /**
    * @return Gets the cf-specific time-to-live for store files.
    */
-  long getStoreFileTtl();
+  public long getStoreFileTtl();
 
   /**
    * @return Gets the cf-specific compaction check frequency multiplier.
    *         The need for compaction (outside of normal checks during flush, open, etc.) will
    *         be ascertained every multiplier * HConstants.THREAD_WAKE_FREQUENCY milliseconds.
    */
-  long getCompactionCheckMultiplier();
+  public long getCompactionCheckMultiplier();
 
   /**
    * The number of files required before flushes for this store will be blocked.
    */
-  long getBlockingFileCount();
+  public long getBlockingFileCount();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java Wed Jul 10 18:49:33 2013
@@ -44,41 +44,40 @@ public interface StoreFileManager {
    * Loads the initial store files into empty StoreFileManager.
    * @param storeFiles The files to load.
    */
-  void loadFiles(List<StoreFile> storeFiles);
+  public abstract void loadFiles(List<StoreFile> storeFiles);
 
   /**
    * Adds new file, either for from MemStore flush or bulk insert, into the structure.
    * @param sf New store file.
    */
-  void insertNewFile(StoreFile sf);
+  public abstract void insertNewFile(StoreFile sf);
 
   /**
    * Adds compaction results into the structure.
    * @param compactedFiles The input files for the compaction.
    * @param results The resulting files for the compaction.
    */
-  void addCompactionResults(
-    Collection<StoreFile> compactedFiles, Collection<StoreFile> results
-  );
+  public abstract void addCompactionResults(
+      Collection<StoreFile> compactedFiles, Collection<StoreFile> results);
 
   /**
    * Clears all the files currently in use and returns them.
    * @return The files previously in use.
    */
-  ImmutableCollection<StoreFile> clearFiles();
+  public abstract ImmutableCollection<StoreFile> clearFiles();
 
   /**
    * Gets the snapshot of the store files currently in use. Can be used for things like metrics
    * and checks; should not assume anything about relations between store files in the list.
    * @return The list of StoreFiles.
    */
-  Collection<StoreFile> getStorefiles();
+  public abstract Collection<StoreFile> getStorefiles();
 
   /**
    * Returns the number of files currently in use.
    * @return The number of files.
    */
-  int getStorefileCount();
+  public abstract int getStorefileCount();
 
   /**
    * Gets the store files to scan for a Scan or Get request.
@@ -87,9 +86,8 @@ public interface StoreFileManager {
    * @param stopRow Stop row of the request.
    * @return The list of files that are to be read for this request.
    */
-  Collection<StoreFile> getFilesForScanOrGet(
-    boolean isGet, byte[] startRow, byte[] stopRow
-  );
+  public abstract Collection<StoreFile> getFilesForScanOrGet(boolean isGet,
+      byte[] startRow, byte[] stopRow);
 
   /**
    * Gets initial, full list of candidate store files to check for row-key-before.
@@ -97,9 +95,8 @@ public interface StoreFileManager {
    * @return The files that may have the key less than or equal to targetKey, in reverse
    *         order of new-ness, and preference for target key.
    */
-  Iterator<StoreFile> getCandidateFilesForRowKeyBefore(
-    KeyValue targetKey
-  );
+  public abstract Iterator<StoreFile> getCandidateFilesForRowKeyBefore(
+      KeyValue targetKey);
 
   /**
    * Updates the candidate list for finding row key before. Based on the list of candidates
@@ -112,9 +109,8 @@ public interface StoreFileManager {
    * @param candidate The current best candidate found.
    * @return The list to replace candidateFiles.
    */
-  Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
-    Iterator<StoreFile> candidateFiles, KeyValue targetKey, KeyValue candidate
-  );
+  public abstract Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
+      Iterator<StoreFile> candidateFiles, KeyValue targetKey, KeyValue candidate);
 
 
   /**
@@ -122,10 +118,10 @@ public interface StoreFileManager {
    * @return The mid-point, or null if no split is possible.
    * @throws IOException
    */
-  byte[] getSplitPoint() throws IOException;
+  public abstract byte[] getSplitPoint() throws IOException;
 
   /**
    * @return The store compaction priority.
    */
-  int getStoreCompactionPriority();
+  public abstract int getStoreCompactionPriority();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java Wed Jul 10 18:49:33 2013
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.
  */
 @InterfaceAudience.Private
 interface Dictionary {
-  byte NOT_IN_DICTIONARY = -1;
+  static final byte NOT_IN_DICTIONARY = -1;
 
   /**
    * Gets an entry from the dictionary.
@@ -36,7 +36,7 @@ interface Dictionary {
    * @param idx index of the entry
    * @return the entry, or null if non existent
    */
-  byte[] getEntry(short idx);
+  public byte[] getEntry(short idx);
 
   /**
    * Finds the index of an entry.
@@ -47,7 +47,7 @@ interface Dictionary {
    * @param length Length beyond <code>offset</code> that comprises entry; must be > 0.
    * @return the index of the entry, or {@link #NOT_IN_DICTIONARY} if not found
    */
-  short findEntry(byte[] data, int offset, int length);
+  public short findEntry(byte[] data, int offset, int length);
 
   /**
    * Adds an entry to the dictionary.
@@ -62,10 +62,10 @@ interface Dictionary {
    * @return the index of the entry
    */
 
-  short addEntry(byte[] data, int offset, int length);
+  public short addEntry(byte[] data, int offset, int length);
 
   /**
    * Flushes the dictionary, empties all values.
    */
-  void clear();
+  public void clear();
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed Jul 10 18:49:33 2013
@@ -41,26 +41,26 @@ import org.apache.hadoop.io.Writable;
 
 @InterfaceAudience.Private
 public interface HLog {
-  Log LOG = LogFactory.getLog(HLog.class);
+  public static final Log LOG = LogFactory.getLog(HLog.class);
 
   /** File Extension used while splitting an HLog into regions (HBASE-2312) */
-  String SPLITTING_EXT = "-splitting";
-  boolean SPLIT_SKIP_ERRORS_DEFAULT = false;
+  public static final String SPLITTING_EXT = "-splitting";
+  public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false;
   /** The META region's HLog filename extension */
-  String META_HLOG_FILE_EXTN = ".meta";
+  public static final String META_HLOG_FILE_EXTN = ".meta";
 
   /**
    * Configuration name of HLog Trailer's warning size. If a waltrailer's size is greater than the
    * configured size, a warning is logged. This is used with Protobuf reader/writer.
    */
-  String WAL_TRAILER_WARN_SIZE =
+  public static final String WAL_TRAILER_WARN_SIZE =
     "hbase.regionserver.waltrailer.warn.size";
-  int DEFAULT_WAL_TRAILER_WARN_SIZE = 1024*1024; // 1MB
+  public static final int DEFAULT_WAL_TRAILER_WARN_SIZE = 1024*1024; // 1MB
 
-  Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+");
-  String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
+  static final Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+");
+  public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
 
-  interface Reader {
+  public interface Reader {
 
     /**
      * @param fs File system.
@@ -88,7 +88,7 @@ public interface HLog {
     WALTrailer getWALTrailer();
   }
 
-  interface Writer {
+  public interface Writer {
     void init(FileSystem fs, Path path, Configuration c) throws IOException;
 
     void close() throws IOException;
@@ -110,7 +110,7 @@ public interface HLog {
    * Utility class that lets us keep track of the edit with it's key Only used
    * when splitting logs
    */
-  class Entry implements Writable {
+  public static class Entry implements Writable {
     private WALEdit edit;
     private HLogKey key;
 
@@ -185,19 +185,19 @@ public interface HLog {
    *
    * @param listener
    */
-  void registerWALActionsListener(final WALActionsListener listener);
+  public void registerWALActionsListener(final WALActionsListener listener);
 
   /**
    * unregisters WALActionsListener
    *
    * @param listener
    */
-  boolean unregisterWALActionsListener(final WALActionsListener listener);
+  public boolean unregisterWALActionsListener(final WALActionsListener listener);
 
   /**
    * @return Current state of the monotonically increasing file id.
    */
-  long getFilenum();
+  public long getFilenum();
 
   /**
    * Called by HRegionServer when it opens a new region to ensure that log
@@ -208,12 +208,12 @@ public interface HLog {
    *          We'll set log edit/sequence number to this value if it is greater
    *          than the current value.
    */
-  void setSequenceNumber(final long newvalue);
+  public void setSequenceNumber(final long newvalue);
 
   /**
    * @return log sequence number
    */
-  long getSequenceNumber();
+  public long getSequenceNumber();
 
   /**
    * Roll the log writer. That is, start writing log messages to a new file.
@@ -228,7 +228,7 @@ public interface HLog {
    * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException
    * @throws IOException
    */
-  byte[][] rollWriter() throws FailedLogCloseException, IOException;
+  public byte[][] rollWriter() throws FailedLogCloseException, IOException;
 
   /**
    * Roll the log writer. That is, start writing log messages to a new file.
@@ -246,7 +246,7 @@ public interface HLog {
    * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException
    * @throws IOException
    */
-  byte[][] rollWriter(boolean force) throws FailedLogCloseException,
+  public byte[][] rollWriter(boolean force) throws FailedLogCloseException,
       IOException;
 
   /**
@@ -254,22 +254,21 @@ public interface HLog {
    *
    * @throws IOException
    */
-  void close() throws IOException;
+  public void close() throws IOException;
 
   /**
    * Shut down the log and delete the log directory
    *
    * @throws IOException
    */
-  void closeAndDelete() throws IOException;
+  public void closeAndDelete() throws IOException;
 
   /**
    * Same as {@link #appendNoSync(HRegionInfo, byte[], WALEdit, UUID, long, HTableDescriptor)},
    * except it causes a sync on the log
    */
-  void append(
-    HRegionInfo info, byte[] tableName, WALEdit edits, final long now, HTableDescriptor htd
-  ) throws IOException;
+  public void append(HRegionInfo info, byte[] tableName, WALEdit edits,
+      final long now, HTableDescriptor htd) throws IOException;
 
   /**
    * Append a set of edits to the log. Log edits are keyed by (encoded)
@@ -282,14 +281,8 @@ public interface HLog {
    * @param htd
    * @param isInMemstore Whether the record is in memstore. False for system records.
    */
-  void append(
-    HRegionInfo info,
-    byte[] tableName,
-    WALEdit edits,
-    final long now,
-    HTableDescriptor htd,
-    boolean isInMemstore
-  ) throws IOException;
+  public void append(HRegionInfo info, byte[] tableName, WALEdit edits,
+      final long now, HTableDescriptor htd, boolean isInMemstore) throws IOException;
 
   /**
    * Append a set of edits to the log. Log edits are keyed by (encoded)
@@ -306,27 +299,21 @@ public interface HLog {
    * @return txid of this transaction
    * @throws IOException
    */
-  long appendNoSync(
-    HRegionInfo info,
-    byte[] tableName,
-    WALEdit edits,
-    UUID clusterId,
-    final long now,
-    HTableDescriptor htd
-  ) throws IOException;
+  public long appendNoSync(HRegionInfo info, byte[] tableName, WALEdit edits,
+      UUID clusterId, final long now, HTableDescriptor htd) throws IOException;
 
-  void hsync() throws IOException;
+  public void hsync() throws IOException;
 
-  void hflush() throws IOException;
+  public void hflush() throws IOException;
 
-  void sync() throws IOException;
+  public void sync() throws IOException;
 
-  void sync(long txid) throws IOException;
+  public void sync(long txid) throws IOException;
 
   /**
    * Obtain a log sequence number.
    */
-  long obtainSeqNum();
+  public long obtainSeqNum();
 
   /**
    * WAL keeps track of the sequence numbers that were not yet flushed from memstores
@@ -343,13 +330,13 @@ public interface HLog {
    *         the resulting file as an upper-bound seqNum for that file), or NULL if flush
    *         should not be started.
    */
-  Long startCacheFlush(final byte[] encodedRegionName);
+  public Long startCacheFlush(final byte[] encodedRegionName);
 
   /**
    * Complete the cache flush.
    * @param encodedRegionName Encoded region name.
    */
-  void completeCacheFlush(final byte[] encodedRegionName);
+  public void completeCacheFlush(final byte[] encodedRegionName);
 
   /**
    * Abort a cache flush. Call if the flush fails. Note that the only recovery
@@ -357,24 +344,24 @@ public interface HLog {
    * snapshot content dropped by the failure gets restored to the memstore.v
    * @param encodedRegionName Encoded region name.
    */
-  void abortCacheFlush(byte[] encodedRegionName);
+  public void abortCacheFlush(byte[] encodedRegionName);
 
   /**
    * @return Coprocessor host.
    */
-  WALCoprocessorHost getCoprocessorHost();
+  public WALCoprocessorHost getCoprocessorHost();
 
   /**
    * Get LowReplication-Roller status
    *
    * @return lowReplicationRollEnabled
    */
-  boolean isLowReplicationRollEnabled();
+  public boolean isLowReplicationRollEnabled();
 
   /** Gets the earliest sequence number in the memstore for this particular region.
    * This can serve as best-effort "recent" WAL number for this region.
    * @param encodedRegionName The region to get the number for.
    * @return The number if present, HConstants.NO_SEQNUM if absent.
    */
-  long getEarliestMemstoreSeqNum(byte[] encodedRegionName);
+  public long getEarliestMemstoreSeqNum(byte[] encodedRegionName);
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java Wed Jul 10 18:49:33 2013
@@ -38,7 +38,7 @@ public interface WALActionsListener {
    * @param oldPath the path to the old hlog
    * @param newPath the path to the new hlog
    */
-  void preLogRoll(Path oldPath, Path newPath) throws IOException;
+  public void preLogRoll(Path oldPath, Path newPath) throws IOException;
 
   /**
    * The WAL has been rolled. The oldPath can be null if this is
@@ -46,31 +46,31 @@ public interface WALActionsListener {
    * @param oldPath the path to the old hlog
    * @param newPath the path to the new hlog
    */
-  void postLogRoll(Path oldPath, Path newPath) throws IOException;
+  public void postLogRoll(Path oldPath, Path newPath) throws IOException;
 
   /**
    * The WAL is going to be archived.
    * @param oldPath the path to the old hlog
    * @param newPath the path to the new hlog
    */
-  void preLogArchive(Path oldPath, Path newPath) throws IOException;
+  public void preLogArchive(Path oldPath, Path newPath) throws IOException;
 
   /**
    * The WAL has been archived.
    * @param oldPath the path to the old hlog
    * @param newPath the path to the new hlog
    */
-  void postLogArchive(Path oldPath, Path newPath) throws IOException;
+  public void postLogArchive(Path oldPath, Path newPath) throws IOException;
 
   /**
    * A request was made that the WAL be rolled.
    */
-  void logRollRequested();
+  public void logRollRequested();
 
   /**
    * The WAL is about to close.
    */
-  void logCloseRequested();
+  public void logCloseRequested();
 
   /**
   * Called before each write.
@@ -78,9 +78,8 @@ public interface WALActionsListener {
   * @param logKey
   * @param logEdit
   */
-  void visitLogEntryBeforeWrite(
-    HRegionInfo info, HLogKey logKey, WALEdit logEdit
-  );
+ public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
+   WALEdit logEdit);
 
   /**
    *
@@ -88,8 +87,7 @@ public interface WALActionsListener {
    * @param logKey
    * @param logEdit
    */
-  void visitLogEntryBeforeWrite(
-    HTableDescriptor htd, HLogKey logKey, WALEdit logEdit
-  );
+ public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
+   WALEdit logEdit);
 
 }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java?rev=1501908&r1=1501907&r2=1501908&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java Wed Jul 10 18:49:33 2013
@@ -41,63 +41,61 @@ public interface ReplicationSourceInterf
    * @param peerClusterId the id of the peer cluster
    * @throws IOException
    */
-  void init(
-    final Configuration conf,
-    final FileSystem fs,
-    final ReplicationSourceManager manager,
-    final Stoppable stopper,
-    final String peerClusterId
-  ) throws IOException;
+  public void init(final Configuration conf,
+                   final FileSystem fs,
+                   final ReplicationSourceManager manager,
+                   final Stoppable stopper,
+                   final String peerClusterId) throws IOException;
 
   /**
    * Add a log to the list of logs to replicate
    * @param log path to the log to replicate
    */
-  void enqueueLog(Path log);
+  public void enqueueLog(Path log);
 
   /**
    * Get the current log that's replicated
    * @return the current log
    */
-  Path getCurrentPath();
+  public Path getCurrentPath();
 
   /**
    * Start the replication
    */
-  void startup();
+  public void startup();
 
   /**
    * End the replication
    * @param reason why it's terminating
    */
-  void terminate(String reason);
+  public void terminate(String reason);
 
   /**
    * End the replication
    * @param reason why it's terminating
    * @param cause the error that's causing it
    */
-  void terminate(String reason, Exception cause);
+  public void terminate(String reason, Exception cause);
 
   /**
    * Get the id that the source is replicating to
    *
    * @return peer cluster id
    */
-  String getPeerClusterZnode();
+  public String getPeerClusterZnode();
 
   /**
    * Get the id that the source is replicating to.
    *
    * @return peer cluster id
    */
-  String getPeerClusterId();
+  public String getPeerClusterId();
 
   /**
    * Get a string representation of the current statistics
    * for this source
    * @return printable stats
    */
-  String getStats();
+  public String getStats();
 
 }



Mime
View raw message