accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [14/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ c...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java Mon Jan 14 22:03:24 2013
@@ -28,7 +28,6 @@ import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
 import java.security.SecureRandom;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -197,6 +196,9 @@ import org.apache.accumulo.server.zookee
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.start.Platform;
+import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
+import org.apache.accumulo.start.classloader.vfs.ContextManager;
+import org.apache.accumulo.start.classloader.vfs.ContextManager.ContextConfig;
 import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -233,8 +235,6 @@ public class TabletServer extends Abstra
   
   private ServerConfiguration serverConfig;
   private LogSorter logSorter = null;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   public TabletServer(ServerConfiguration conf, FileSystem fs) {
     super();
@@ -345,6 +345,7 @@ public class TabletServer extends Abstra
       final long maxIdle = conf.getTimeInMillis(Property.TSERV_SESSION_MAXIDLE);
       
       TimerTask r = new TimerTask() {
+        @Override
         public void run() {
           sweep(maxIdle);
         }
@@ -445,6 +446,7 @@ public class TabletServer extends Abstra
       if (session != null) {
         final long removeTime = session.lastAccessTime;
         TimerTask r = new TimerTask() {
+          @Override
           public void run() {
             Session sessionToCleanup = null;
             synchronized (SessionManager.this) {
@@ -536,7 +538,7 @@ public class TabletServer extends Abstra
           }
           
           activeScans.add(new ActiveScan(ss.client, ss.user, ss.extent.getTableId().toString(), ct - ss.startTime, ct - ss.lastAccessTime, ScanType.SINGLE,
-              state, ss.extent.toThrift(), Translator.translate(ss.columnSet, Translator.CT), ss.ssiList, ss.ssio));
+              state, ss.extent.toThrift(), Translator.translate(ss.columnSet, Translator.CT), ss.ssiList, ss.ssio, ss.auths.getAuthorizationsBB()));
           
         } else if (session instanceof MultiScanSession) {
           MultiScanSession mss = (MultiScanSession) session;
@@ -562,7 +564,8 @@ public class TabletServer extends Abstra
           }
           
           activeScans.add(new ActiveScan(mss.client, mss.user, mss.threadPoolExtent.getTableId().toString(), ct - mss.startTime, ct - mss.lastAccessTime,
-              ScanType.BATCH, state, mss.threadPoolExtent.toThrift(), Translator.translate(mss.columnSet, Translator.CT), mss.ssiList, mss.ssio));
+              ScanType.BATCH, state, mss.threadPoolExtent.toThrift(), Translator.translate(mss.columnSet, Translator.CT), mss.ssiList, mss.ssio, mss.auths
+                  .getAuthorizationsBB()));
         }
       }
       
@@ -711,7 +714,7 @@ public class TabletServer extends Abstra
     public Tablet currentTablet;
     public MapCounter<Tablet> successfulCommits = new MapCounter<Tablet>();
     Map<KeyExtent,Long> failures = new HashMap<KeyExtent,Long>();
-    HashSet<KeyExtent> authFailures = new HashSet<KeyExtent>();
+    HashMap<KeyExtent, SecurityErrorCode> authFailures = new HashMap<KeyExtent, SecurityErrorCode>();
     public Violations violations;
     public AuthInfo credentials;
     public long totalUpdates = 0;
@@ -730,6 +733,7 @@ public class TabletServer extends Abstra
     public HashSet<Column> columnSet;
     public List<IterInfo> ssiList;
     public Map<String,Map<String,String>> ssio;
+    public Authorizations auths;
     public long entriesReturned = 0;
     public Stat nbTimes = new Stat();
     public long batchCount = 0;
@@ -840,7 +844,7 @@ public class TabletServer extends Abstra
   public AccumuloConfiguration getSystemConfiguration() {
     return serverConfig.getConfiguration();
   }
-
+  
   TransactionWatcher watcher = new TransactionWatcher();
   
   private class ThriftClientHandler extends ClientServiceHandler implements TabletClientService.Iface {
@@ -875,7 +879,7 @@ public class TabletServer extends Abstra
       if (!security.canPerformSystemActions(credentials))
         throw new ThriftSecurityException(credentials.user, SecurityErrorCode.PERMISSION_DENIED);
       
-      ArrayList<TKeyExtent> failures = new ArrayList<TKeyExtent>();
+      List<TKeyExtent> failures = new ArrayList<TKeyExtent>();
       
       for (Entry<TKeyExtent,Map<String,MapFileInfo>> entry : files.entrySet()) {
         TKeyExtent tke = entry.getKey();
@@ -893,9 +897,7 @@ public class TabletServer extends Abstra
             failures.add(tke);
           }
         }
-        
       }
-      
       return failures;
     }
     
@@ -914,17 +916,18 @@ public class TabletServer extends Abstra
       @Override
       public void run() {
         
-        ScanSession scanSession = (ScanSession) sessionManager.getSession(scanID);
+        final ScanSession scanSession = (ScanSession) sessionManager.getSession(scanID);
         String oldThreadName = Thread.currentThread().getName();
         
         try {
           runState.set(ScanRunState.RUNNING);
-          Thread.currentThread().setName(
-              "User: " + scanSession.user + " Start: " + scanSession.startTime + " Client: " + scanSession.client + " Tablet: " + scanSession.extent);
-
+          
           if (isCancelled() || scanSession == null)
             return;
           
+          Thread.currentThread().setName(
+              "User: " + scanSession.user + " Start: " + scanSession.startTime + " Client: " + scanSession.client + " Tablet: " + scanSession.extent);
+          
           Tablet tablet = onlineTablets.get(scanSession.extent);
           
           if (tablet == null) {
@@ -977,13 +980,15 @@ public class TabletServer extends Abstra
         String oldThreadName = Thread.currentThread().getName();
         
         try {
-          runState.set(ScanRunState.RUNNING);
-          Thread.currentThread().setName("Client: " + session.client + " User: " + session.user + " Start: " + session.startTime + " Table: ");
           if (isCancelled() || session == null)
             return;
           
           TableConfiguration acuTableConf = ServerConfiguration.getTableConfiguration(instance, session.threadPoolExtent.getTableId().toString());
           long maxResultsSize = acuTableConf.getMemoryInBytes(Property.TABLE_SCAN_MAXMEM);
+          
+          runState.set(ScanRunState.RUNNING);
+          Thread.currentThread().setName("Client: " + session.client + " User: " + session.user + " Start: " + session.startTime + " Table: ");
+          
           long bytesAdded = 0;
           long maxScanTime = 4000;
           
@@ -1125,14 +1130,15 @@ public class TabletServer extends Abstra
       scanSession.columnSet = new HashSet<Column>();
       scanSession.ssiList = ssiList;
       scanSession.ssio = ssio;
+      scanSession.auths = new Authorizations(authorizations);
       scanSession.interruptFlag = new AtomicBoolean();
       
       for (TColumn tcolumn : columns) {
         scanSession.columnSet.add(new Column(tcolumn));
       }
       
-      scanSession.scanner = tablet.createScanner(new Range(range), batchSize, scanSession.columnSet, new Authorizations(authorizations), ssiList, ssio,
-          isolated, scanSession.interruptFlag);
+      scanSession.scanner = tablet.createScanner(new Range(range), batchSize, scanSession.columnSet, scanSession.auths, ssiList, ssio, isolated,
+          scanSession.interruptFlag);
       
       long sid = sessionManager.createSession(scanSession, true);
       
@@ -1381,8 +1387,7 @@ public class TabletServer extends Abstra
       long t1 = System.currentTimeMillis();
       if (us.currentTablet != null && us.currentTablet.getExtent().equals(keyExtent))
         return;
-      
-      if (us.currentTablet == null && (us.failures.containsKey(keyExtent) || us.authFailures.contains(keyExtent))) {
+      if (us.currentTablet == null && (us.failures.containsKey(keyExtent) || us.authFailures.containsKey(keyExtent))) {
         // if there were previous failures, then do not accept additional writes
         return;
       }
@@ -1409,7 +1414,7 @@ public class TabletServer extends Abstra
           long t2 = System.currentTimeMillis();
           us.authTimes.addStat(t2 - t1);
           us.currentTablet = null;
-          us.authFailures.add(keyExtent);
+          us.authFailures.put(keyExtent, SecurityErrorCode.PERMISSION_DENIED);
           if (updateMetrics.isEnabled())
             updateMetrics.add(TabletServerUpdateMetrics.permissionErrors, 0);
           return;
@@ -1419,7 +1424,7 @@ public class TabletServer extends Abstra
         long t2 = System.currentTimeMillis();
         us.authTimes.addStat(t2 - t1);
         us.currentTablet = null;
-        us.authFailures.add(keyExtent);
+        us.authFailures.put(keyExtent, e.getCode());
         if (updateMetrics.isEnabled())
           updateMetrics.add(TabletServerUpdateMetrics.permissionErrors, 0);
         return;
@@ -1625,12 +1630,11 @@ public class TabletServer extends Abstra
         log.debug(String.format("Violations: %d, first %s occurs %d", violations.size(), first.violationDescription, first.numberOfViolatingMutations));
       }
       if (us.authFailures.size() > 0) {
-        KeyExtent first = us.authFailures.iterator().next();
+        KeyExtent first = us.authFailures.keySet().iterator().next();
         log.debug(String.format("Authentication Failures: %d, first %s", us.authFailures.size(), first.toString()));
       }
       
-      return new UpdateErrors(Translator.translate(us.failures, Translator.KET), Translator.translate(violations, Translator.CVST), Translator.translate(
-          us.authFailures, Translator.KET));
+      return new UpdateErrors(Translator.translate(us.failures, Translator.KET), Translator.translate(violations, Translator.CVST), Translator.translate(us.authFailures, Translator.KET));
     }
     
     @Override
@@ -1759,6 +1763,7 @@ public class TabletServer extends Abstra
         } finally {
           if (fatal) {
             Halt.halt(1, new Runnable() {
+              @Override
               public void run() {
                 logGCInfo(getSystemConfiguration());
               }
@@ -1774,6 +1779,7 @@ public class TabletServer extends Abstra
       
       if (tabletServerLock != null && tabletServerLock.wasLockAcquired() && !tabletServerLock.isLocked()) {
         Halt.halt(1, new Runnable() {
+          @Override
           public void run() {
             log.info("Tablet server no longer holds lock during checkPermission() : " + request + ", exiting");
             logGCInfo(getSystemConfiguration());
@@ -1846,6 +1852,7 @@ public class TabletServer extends Abstra
       // Root tablet assignment must take place immediately
       if (extent.isRootTablet()) {
         new Daemon("Root Tablet Assignment") {
+          @Override
           public void run() {
             ah.run();
             if (onlineTablets.containsKey(extent)) {
@@ -1941,6 +1948,7 @@ public class TabletServer extends Abstra
       checkPermission(credentials, lock, true, "halt");
       
       Halt.halt(0, new Runnable() {
+        @Override
         public void run() {
           log.info("Master requested tablet server halt");
           logGCInfo(getSystemConfiguration());
@@ -1968,6 +1976,7 @@ public class TabletServer extends Abstra
       return statsKeeper.getTabletStats();
     }
     
+    @Override
     public List<ActiveScan> getActiveScans(TInfo tinfo, AuthInfo credentials) throws ThriftSecurityException, TException {
       try {
         checkPermission(credentials, null, true, "getScans");
@@ -2044,8 +2053,7 @@ public class TabletServer extends Abstra
       Path logDir = new Path(Constants.getWalDirectory(acuConf), myname);
       Set<String> loggers = new HashSet<String>();
       logger.getLoggers(loggers);
-      nextFile:
-      for (String filename : filenames) {
+      nextFile: for (String filename : filenames) {
         for (String logger : loggers) {
           if (logger.contains(filename))
             continue nextFile;
@@ -2077,7 +2085,7 @@ public class TabletServer extends Abstra
               log.warn("Failed to delete walog " + source);
             if (fs.delete(new Path(Constants.getRecoveryDir(acuConf), filename), true))
               log.info("Deleted any recovery log " + filename);
-
+            
           }
         } catch (IOException e) {
           log.warn("Error attempting to delete write-ahead log " + filename + ": " + e);
@@ -2116,6 +2124,7 @@ public class TabletServer extends Abstra
   
   private class MajorCompactor implements Runnable {
     
+    @Override
     public void run() {
       while (!majorCompactorDisabled) {
         try {
@@ -2274,6 +2283,7 @@ public class TabletServer extends Abstra
       this.saveState = saveState;
     }
     
+    @Override
     public void run() {
       
       Tablet t = null;
@@ -2366,6 +2376,7 @@ public class TabletServer extends Abstra
       this.retryAttempt = retryAttempt;
     }
     
+    @Override
     public void run() {
       log.info(clientAddress + ": got assignment from master: " + extent);
       
@@ -2485,7 +2496,7 @@ public class TabletServer extends Abstra
           
           Assignment assignment = new Assignment(extentToOpen, getTabletSession());
           TabletStateStore.setLocation(assignment);
-
+          
           synchronized (openingTablets) {
             synchronized (onlineTablets) {
               openingTablets.remove(extentToOpen);
@@ -2542,11 +2553,11 @@ public class TabletServer extends Abstra
   private FileSystem fs;
   private Instance instance;
   
-  private SortedMap<KeyExtent,Tablet> onlineTablets = Collections.synchronizedSortedMap(new TreeMap<KeyExtent,Tablet>());
-  private SortedSet<KeyExtent> unopenedTablets = Collections.synchronizedSortedSet(new TreeSet<KeyExtent>());
-  private SortedSet<KeyExtent> openingTablets = Collections.synchronizedSortedSet(new TreeSet<KeyExtent>());
+  private final SortedMap<KeyExtent,Tablet> onlineTablets = Collections.synchronizedSortedMap(new TreeMap<KeyExtent,Tablet>());
+  private final SortedSet<KeyExtent> unopenedTablets = Collections.synchronizedSortedSet(new TreeSet<KeyExtent>());
+  private final SortedSet<KeyExtent> openingTablets = Collections.synchronizedSortedSet(new TreeSet<KeyExtent>());
   @SuppressWarnings("unchecked")
-  private Map<KeyExtent,Long> recentlyUnloadedCache = (Map<KeyExtent, Long>)Collections.synchronizedMap(new LRUMap(1000));
+  private final Map<KeyExtent,Long> recentlyUnloadedCache = Collections.synchronizedMap(new LRUMap(1000));
   
   private Thread majorCompactorThread;
   
@@ -2572,7 +2583,7 @@ public class TabletServer extends Abstra
   private static ObjectName OBJECT_NAME = null;
   
   static AtomicLong seekCount = new AtomicLong(0);
-
+  
   public TabletStatsKeeper getStatsKeeper() {
     return statsKeeper;
   }
@@ -2593,10 +2604,10 @@ public class TabletServer extends Abstra
     entry.logSet = logSet;
     MetadataTable.addLogEntry(SecurityConstants.getSystemCredentials(), entry, getLock());
   }
-
+  
   private int startServer(AccumuloConfiguration conf, Property portHint, TProcessor processor, String threadName) throws UnknownHostException {
     ServerPort sp = TServerUtils.startServer(conf, portHint, processor, this.getClass().getSimpleName(), threadName, Property.TSERV_PORTSEARCH,
-        Property.TSERV_MINTHREADS, Property.TSERV_THREADCHECK);
+        Property.TSERV_MINTHREADS, Property.TSERV_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
     this.server = sp.server;
     return sp.port;
   }
@@ -2661,6 +2672,7 @@ public class TabletServer extends Abstra
         @Override
         public void lostLock(final LockLossReason reason) {
           Halt.halt(0, new Runnable() {
+            @Override
             public void run() {
               if (!serverStopRequested)
                 log.fatal("Lost tablet server lock (reason = " + reason + "), exiting.");
@@ -2670,7 +2682,7 @@ public class TabletServer extends Abstra
         }
       };
       
-      byte[] lockContent = new ServerServices(getClientAddressString(), Service.TSERV_CLIENT).toString().getBytes(utf8);
+      byte[] lockContent = new ServerServices(getClientAddressString(), Service.TSERV_CLIENT).toString().getBytes();
       for (int i = 0; i < 120 / 5; i++) {
         zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.SKIP);
         
@@ -2693,7 +2705,7 @@ public class TabletServer extends Abstra
   // main loop listens for client requests
   public void run() {
     SecurityUtil.serverLogin();
-
+    
     int clientPort = 0;
     try {
       clientPort = startTabletClientService();
@@ -2708,7 +2720,7 @@ public class TabletServer extends Abstra
     announceExistence();
     
     ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");
-
+    
     bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ);
     try {
       bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
@@ -2722,7 +2734,7 @@ public class TabletServer extends Abstra
       log.error("Error setting watches for recoveries");
       throw new RuntimeException(ex);
     }
-
+    
     try {
       OBJECT_NAME = new ObjectName("accumulo.server.metrics:service=TServerInfo,name=TabletServerMBean,instance=" + Thread.currentThread().getName());
       // Do this because interface not in same package.
@@ -2751,7 +2763,7 @@ public class TabletServer extends Abstra
           // connection
           masterHost = getMasterAddress();
           iface = masterConnection(masterHost);
-          TServiceClient client = (TServiceClient) iface;
+          TServiceClient client = iface;
           
           // if while loop does not execute at all and mm != null,
           // then
@@ -3019,6 +3031,72 @@ public class TabletServer extends Abstra
       }
     }
     
+    try {
+      AccumuloVFSClassLoader.getContextManager().setContextConfig(new ContextManager.ContextsConfig() {
+        @Override
+        public ContextConfig getContextConfig(String context) {
+          String key = Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + context;
+          
+          String uris = null;
+          boolean preDelegate = true;
+
+          Iterator<Entry<String,String>> iter = getSystemConfiguration().iterator();
+          while (iter.hasNext()) {
+            Entry<String,String> entry = iter.next();
+            if (entry.getKey().equals(key)) {
+              uris = entry.getValue();
+            }
+            
+            if (entry.getKey().equals(key + ".delegation") && entry.getValue().trim().equalsIgnoreCase("post")) {
+              preDelegate = false;
+            }
+          }
+          
+          if (uris != null)
+            return new ContextConfig(uris, preDelegate);
+
+          return null;
+        }
+      });
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    
+    // A task that cleans up unused classloader contexts
+    TimerTask contextCleaner = new TimerTask() {
+      @Override
+      public void run() {
+        ArrayList<KeyExtent> extents;
+        
+        synchronized (onlineTablets) {
+          extents = new ArrayList<KeyExtent>(onlineTablets.keySet());
+        }
+        
+        Set<Text> tables = new HashSet<Text>();
+        
+        for (KeyExtent keyExtent : extents) {
+          tables.add(keyExtent.getTableId());
+        }
+        
+        HashSet<String> contexts = new HashSet<String>();
+        
+        for (Text tableid : tables) {
+          String context = getTableConfiguration(new KeyExtent(tableid, null, null)).get(Property.TABLE_CLASSPATH);
+          if (!context.equals("")) {
+            contexts.add(context);
+          }
+        }
+        
+        try {
+          AccumuloVFSClassLoader.getContextManager().removeUnusedContexts(contexts);
+        } catch (IOException e) {
+          log.warn(e.getMessage(), e);
+        }
+      }
+    };
+    
+    SimpleTimer.getInstance().schedule(contextCleaner, 60000, 60000);
+    
     FileSystemMonitor.start(getSystemConfiguration(), Property.TSERV_MONITOR_FS);
     
     TimerTask gcDebugTask = new TimerTask() {
@@ -3058,8 +3136,8 @@ public class TabletServer extends Abstra
       TableInfo table = tables.get(tableId);
       if (table == null) {
         table = new TableInfo();
-        table.minor = new Compacting();
-        table.major = new Compacting();
+        table.minors = new Compacting();
+        table.majors = new Compacting();
         tables.put(tableId, table);
       }
       Tablet tablet = entry.getValue();
@@ -3075,13 +3153,13 @@ public class TabletServer extends Abstra
       long recsInMemory = tablet.getNumEntriesInMemory();
       table.recsInMemory += recsInMemory;
       if (tablet.minorCompactionRunning())
-        table.minor.running++;
+        table.minors.running++;
       if (tablet.minorCompactionQueued())
-        table.minor.queued++;
+        table.minors.queued++;
       if (tablet.majorCompactionRunning())
-        table.major.running++;
+        table.majors.running++;
       if (tablet.majorCompactionQueued())
-        table.major.queued++;
+        table.majors.queued++;
     }
     
     for (Entry<String,MapCounter<ScanRunState>> entry : scanCounts.entrySet()) {
@@ -3148,28 +3226,28 @@ public class TabletServer extends Abstra
       log.error("Uncaught exception in TabletServer.main, exiting", ex);
     }
   }
-
+  
   private static void ensureHdfsSyncIsEnabled(FileSystem fs) {
     if (fs instanceof DistributedFileSystem) {
       if (!fs.getConf().getBoolean("dfs.durable.sync", false) && !fs.getConf().getBoolean("dfs.support.append", false)) {
-        String msg = "Must set dfs.durable.sync OR dfs.support.append to true.  Which one needs to be set depends on your version of HDFS.  See ACCUMULO-623. \n"+
-        		"HADOOP RELEASE          VERSION           SYNC NAME             DEFAULT\n"+
-            "Apache Hadoop           0.20.205          dfs.support.append    false\n"+
-            "Apache Hadoop            0.23.x           dfs.support.append    true\n"+
-            "Apache Hadoop             1.0.x           dfs.support.append    false\n"+
-            "Apache Hadoop             1.1.x           dfs.durable.sync      true\n"+
-            "Apache Hadoop          2.0.0-2.0.2        dfs.support.append    true\n"+
-            "Cloudera CDH             3u0-3u3             ????               true\n"+
-            "Cloudera CDH               3u4            dfs.support.append    true\n"+
-            "Hortonworks HDP           `1.0            dfs.support.append    false\n"+
-            "Hortonworks HDP           `1.1            dfs.support.append    false";
+        String msg = "Must set dfs.durable.sync OR dfs.support.append to true.  Which one needs to be set depends on your version of HDFS.  See ACCUMULO-623. \n"
+            + "HADOOP RELEASE          VERSION           SYNC NAME             DEFAULT\n"
+            + "Apache Hadoop           0.20.205          dfs.support.append    false\n"
+            + "Apache Hadoop            0.23.x           dfs.support.append    true\n"
+            + "Apache Hadoop             1.0.x           dfs.support.append    false\n"
+            + "Apache Hadoop             1.1.x           dfs.durable.sync      true\n"
+            + "Apache Hadoop          2.0.0-2.0.2        dfs.support.append    true\n"
+            + "Cloudera CDH             3u0-3u3             ????               true\n"
+            + "Cloudera CDH               3u4            dfs.support.append    true\n"
+            + "Hortonworks HDP           `1.0            dfs.support.append    false\n"
+            + "Hortonworks HDP           `1.1            dfs.support.append    false";
         log.fatal(msg);
         System.exit(-1);
       }
     }
     
   }
-
+  
   /**
    * Copy local walogs into HDFS on an upgrade
    * 
@@ -3192,7 +3270,7 @@ public class TabletServer extends Abstra
         log.debug("Local walog dir " + localWalDirectory + " not found ");
         continue;
       }
-
+      
       for (FileStatus file : localfs.listStatus(new Path(localWalDirectory))) {
         String name = file.getPath().getName();
         try {
@@ -3223,7 +3301,7 @@ public class TabletServer extends Abstra
       }
     }
   }
-
+  
   public void minorCompactionFinished(CommitSession tablet, String newDatafile, int walogSeq) throws IOException {
     totalMinorCompactions++;
     logger.minorCompactionFinished(tablet, newDatafile, walogSeq);
@@ -3431,10 +3509,12 @@ public class TabletServer extends Abstra
     return 0;
   }
   
+  @Override
   protected ObjectName getObjectName() {
     return OBJECT_NAME;
   }
   
+  @Override
   protected String getMetricsPrefix() {
     return METRICS_PREFIX;
   }
@@ -3442,6 +3522,7 @@ public class TabletServer extends Abstra
   public TableConfiguration getTableConfiguration(KeyExtent extent) {
     return ServerConfiguration.getTableConfiguration(instance, extent.getTableId().toString());
   }
+  
   public DfsLogger.ServerResources getServerConfig() {
     return new DfsLogger.ServerResources() {
       
@@ -3461,5 +3542,5 @@ public class TabletServer extends Abstra
       }
     };
   }
-
+  
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java Mon Jan 14 22:03:24 2013
@@ -45,14 +45,14 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.file.blockfile.cache.LruBlockCache;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
+import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.tabletserver.FileManager.ScanFileManager;
 import org.apache.accumulo.server.tabletserver.Tablet.MajorCompactionReason;
 import org.apache.accumulo.server.util.time.SimpleTimer;
-import org.apache.accumulo.start.classloader.AccumuloClassLoader;
+import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.log4j.Logger;
 
@@ -199,7 +199,7 @@ public class TabletServerResourceManager
     fileManager = new FileManager(conf, fs, maxOpenFiles, _dCache, _iCache);
     
     try {
-      Class<? extends MemoryManager> clazz = AccumuloClassLoader.loadClass(acuConf.get(Property.TSERV_MEM_MGMT), MemoryManager.class);
+      Class<? extends MemoryManager> clazz = AccumuloVFSClassLoader.loadClass(acuConf.get(Property.TSERV_MEM_MGMT), MemoryManager.class);
       memoryManager = clazz.newInstance();
       memoryManager.init(conf);
       log.debug("Loaded memory manager : " + memoryManager.getClass().getName());
@@ -250,7 +250,7 @@ public class TabletServerResourceManager
   }
   
   private class MemoryManagementFramework {
-    private Map<KeyExtent,TabletStateImpl> tabletReports;
+    private final Map<KeyExtent,TabletStateImpl> tabletReports;
     private LinkedBlockingQueue<TabletStateImpl> memUsageReports;
     private long lastMemCheckTime = System.currentTimeMillis();
     private long maxMem;
@@ -378,7 +378,7 @@ public class TabletServerResourceManager
     }
   }
   
-  private Object commitHold = new String("");
+  private final Object commitHold = new Object();
   private volatile boolean holdCommits = false;
   private long holdStartTime;
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletStatsKeeper.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletStatsKeeper.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletStatsKeeper.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletStatsKeeper.java Mon Jan 14 22:03:24 2013
@@ -56,7 +56,7 @@ public class TabletStatsKeeper {
         data.count += count;
         data.num++;
         data.elapsed += t;
-        data.queueTime += t;
+        data.queueTime += q;
         data.sumDev += t * t;
         data.queueSumDev += q * q;
         if (data.elapsed < 0 || data.sumDev < 0 || data.queueSumDev < 0 || data.queueTime < 0)

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/UniqueNameAllocator.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/UniqueNameAllocator.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/UniqueNameAllocator.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/UniqueNameAllocator.java Mon Jan 14 22:03:24 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.tabletserver;
 
-import java.nio.charset.Charset;
 import java.util.Random;
 
 import org.apache.accumulo.core.Constants;
@@ -37,8 +36,6 @@ public class UniqueNameAllocator {
   private long maxAllocated = 0;
   private String nextNamePath;
   private Random rand;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   private UniqueNameAllocator() {
     nextNamePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZNEXT_FILE;
@@ -55,7 +52,7 @@ public class UniqueNameAllocator {
           public byte[] mutate(byte[] currentValue) throws Exception {
             long l = Long.parseLong(new String(currentValue), Character.MAX_RADIX);
             l += allocate;
-            return Long.toString(l, Character.MAX_RADIX).getBytes(utf8);
+            return Long.toString(l, Character.MAX_RADIX).getBytes();
           }
         });
         

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java Mon Jan 14 22:03:24 2013
@@ -73,7 +73,7 @@ public class DfsLogger {
 
   private LinkedBlockingQueue<DfsLogger.LogWork> workQueue = new LinkedBlockingQueue<DfsLogger.LogWork>();
   
-  private String closeLock = new String("foo");
+  private final Object closeLock = new Object();
   
   private static final DfsLogger.LogWork CLOSED_MARKER = new DfsLogger.LogWork(null, null);
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java Mon Jan 14 22:03:24 2013
@@ -56,7 +56,7 @@ public class LogSorter {
   FileSystem fs;
   AccumuloConfiguration conf;
   
-  private Map<String,LogProcessor> currentWork = Collections.synchronizedMap(new HashMap<String,LogProcessor>());
+  private final Map<String,LogProcessor> currentWork = Collections.synchronizedMap(new HashMap<String,LogProcessor>());
 
   class LogProcessor implements Processor {
     

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecovery.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecovery.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecovery.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecovery.java Mon Jan 14 22:03:24 2013
@@ -46,6 +46,12 @@ import org.apache.log4j.Logger;
 public class SortedLogRecovery {
   private static final Logger log = Logger.getLogger(SortedLogRecovery.class);
   
+  static class EmptyMapFileException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public EmptyMapFileException() { super(); }
+  }
+  
   public SortedLogRecovery() {}
   
   private enum Status {
@@ -87,7 +93,12 @@ public class SortedLogRecovery {
       log.info("Looking at mutations from " + logfile + " for " + extent);
       MultiReader reader = new MultiReader(fs, conf, logfile);
       try {
-        tids[i] = findLastStartToFinish(reader, i, extent, tabletFiles, lastStartToFinish);
+        try {
+          tids[i] = findLastStartToFinish(reader, i, extent, tabletFiles, lastStartToFinish);
+        } catch (EmptyMapFileException ex) {
+          log.info("Ignoring empty map file " + logfile);
+          tids[i] = -1;
+        }
       } finally {
         try {
           reader.close();
@@ -117,13 +128,13 @@ public class SortedLogRecovery {
     }
   }
   
-  int findLastStartToFinish(MultiReader reader, int fileno, KeyExtent extent, Set<String> tabletFiles, LastStartToFinish lastStartToFinish) throws IOException {
+  int findLastStartToFinish(MultiReader reader, int fileno, KeyExtent extent, Set<String> tabletFiles, LastStartToFinish lastStartToFinish) throws IOException, EmptyMapFileException {
     // Scan for tableId for this extent (should always be in the log)
     LogFileKey key = new LogFileKey();
     LogFileValue value = new LogFileValue();
     int tid = -1;
     if (!reader.next(key, value))
-      throw new RuntimeException("Unable to read log entries");
+      throw new EmptyMapFileException();
     if (key.event != OPEN)
       throw new RuntimeException("First log entry value is not OPEN");
     

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/TabletServerLogger.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/TabletServerLogger.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/TabletServerLogger.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/log/TabletServerLogger.java Mon Jan 14 22:03:24 2013
@@ -70,7 +70,7 @@ public class TabletServerLogger {
   private AtomicInteger logSetId = new AtomicInteger();
   
   // Use a ReadWriteLock to allow multiple threads to use the log set, but obtain a write lock to change them
-  private ReentrantReadWriteLock logSetLock = new ReentrantReadWriteLock();
+  private final ReentrantReadWriteLock logSetLock = new ReentrantReadWriteLock();
   
   private final AtomicInteger seqGen = new AtomicInteger();
   
@@ -97,7 +97,7 @@ public class TabletServerLogger {
    *          a test/work pair
    * @throws IOException
    */
-  private static void testLockAndRun(ReadWriteLock rwlock, TestCallWithWriteLock code) throws IOException {
+  private static void testLockAndRun(final ReadWriteLock rwlock, TestCallWithWriteLock code) throws IOException {
     // Get a read lock
     rwlock.readLock().lock();
     try {

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java Mon Jan 14 22:03:24 2013
@@ -17,7 +17,10 @@
 package org.apache.accumulo.server.test;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -26,20 +29,38 @@ import org.apache.accumulo.server.client
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
+import com.beust.jcommander.Parameter;
+
 public class BulkImportDirectory {
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names={"-s","--source"}, description="directory to import from")
+    String source = null;
+    @Parameter(names={"-f","--failures"}, description="directory to copy failures into: will be deleted before the bulk import")
+    String failures = null;
+    @Parameter(description="<username> <password> <tablename> <sourcedir> <failuredir>")
+    List<String> args = new ArrayList<String>();
+  }
+  
+  
   public static void main(String[] args) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length != 5)
-      throw new RuntimeException("Usage: bin/accumulo " + BulkImportDirectory.class.getName() + " <username> <password> <tablename> <sourcedir> <failuredir>");
-    
-    final String user = args[0];
-    final byte[] pass = args[1].getBytes();
-    final String tableName = args[2];
-    final String dir = args[3];
-    final String failureDir = args[4];
-    final Path failureDirPath = new Path(failureDir);
     final FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    fs.delete(failureDirPath, true);
-    fs.mkdirs(failureDirPath);
-    HdfsZooInstance.getInstance().getConnector(user, pass).tableOperations().importDirectory(tableName, dir, failureDir, false);
+    Opts opts = new Opts();
+    if (args.length == 5) {
+      System.err.println("Deprecated syntax for BulkImportDirectory, please use the new style (see --help)");
+      final String user = args[0];
+      final byte[] pass = args[1].getBytes();
+      final String tableName = args[2];
+      final String dir = args[3];
+      final String failureDir = args[4];
+      final Path failureDirPath = new Path(failureDir);
+      fs.delete(failureDirPath, true);
+      fs.mkdirs(failureDirPath);
+      HdfsZooInstance.getInstance().getConnector(user, pass).tableOperations().importDirectory(tableName, dir, failureDir, false);
+    } else {
+      opts.parseArgs(BulkImportDirectory.class.getName(), args);
+      fs.delete(new Path(opts.failures), true);
+      fs.mkdirs(new Path(opts.failures));
+      opts.getConnector().tableOperations().importDirectory(opts.tableName, opts.source, opts.failures, false);
+    }
   }
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java Mon Jan 14 22:03:24 2013
@@ -20,26 +20,45 @@ import java.util.concurrent.ExecutorServ
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.core.cli.Help;
+
+import com.beust.jcommander.Parameter;
+
 public class CreateRFiles {
   
+  static class Opts extends Help {
+    
+    @Parameter(names="--output", description="the destiation directory")
+    String outputDirectory;
+    
+    @Parameter(names="--numThreads", description="number of threads to use when generating files")
+    int numThreads = 4;
+    
+    @Parameter(names="--start", description="the start number for test data")
+    long start = 0;
+    
+    @Parameter(names="--end", description="the maximum number for test data")
+    long end = 10*1000*1000;
+    
+    @Parameter(names="--splits", description="the number of splits in the data")
+    long numsplits = 4;
+  }
+  
   public static void main(String[] args) {
-    String dir = args[0];
-    int numThreads = Integer.parseInt(args[1]);
-    long start = Long.parseLong(args[2]);
-    long end = Long.parseLong(args[3]);
-    long numsplits = Long.parseLong(args[4]);
+    Opts opts = new Opts();
+    opts.parseArgs(CreateRFiles.class.getName(), args);
     
-    long splitSize = Math.round((end - start) / (double) numsplits);
+    long splitSize = Math.round((opts.end - opts.start) / (double) opts.numsplits);
     
-    long currStart = start;
-    long currEnd = start + splitSize;
+    long currStart = opts.start;
+    long currEnd = opts.start + splitSize;
     
-    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
+    ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
     
     int count = 0;
-    while (currEnd <= end && currStart < currEnd) {
+    while (currEnd <= opts.end && currStart < currEnd) {
       
-      final String tia = String.format("-rFile /%s/mf%05d -timestamp 1 -size 50 -random 56 %d %d 1", dir, count, currEnd - currStart, currStart);
+      final String tia = String.format("--rfile /%s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
       
       Runnable r = new Runnable() {
         
@@ -58,7 +77,7 @@ public class CreateRFiles {
       
       count++;
       currStart = currEnd;
-      currEnd = Math.min(end, currStart + splitSize);
+      currEnd = Math.min(opts.end, currStart + splitSize);
     }
     
     threadPool.shutdown();

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java Mon Jan 14 22:03:24 2013
@@ -49,6 +49,10 @@ public class CreateRandomRFile {
   }
   
   public static void main(String[] args) {
+    if (args.length != 2) {
+      System.err.println("Usage CreateRandomRFile <filename> <size>");
+      System.exit(-1);
+    }
     file = args[0];
     num = Integer.parseInt(args[1]);
     long rands[] = new long[num];

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java Mon Jan 14 22:03:24 2013
@@ -19,110 +19,72 @@ package org.apache.accumulo.server.test;
 import java.util.Map.Entry;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOnDefaultTable;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class CreateTestTable {
-  private static Option usernameOpt;
-  private static Option passwordOpt;
-  private static Option readonlyOpt;
-  
-  private static Options opts;
   
-  // root user is needed for tests
-  private static String user;
-  private static String password;
-  private static boolean readOnly = false;
-  private static int count = 10000;
+  static class Opts extends ClientOnDefaultTable {
+    @Parameter(names={"-readonly", "--readonly"}, description="read only")
+    boolean readOnly = false;
+    @Parameter(names={"-count", "--count"}, description="count", required = true)
+    int count = 10000;
+    Opts() { super("mrtest1"); }
+  }
   
-  private static void readBack(Connector conn, int last) throws Exception {
-    Scanner scanner = conn.createScanner("mrtest1", Constants.NO_AUTHS);
+  private static void readBack(Connector conn, Opts opts, ScannerOpts scanOpts) throws Exception {
+    Scanner scanner = conn.createScanner("mrtest1", opts.auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     int count = 0;
     for (Entry<Key,Value> elt : scanner) {
       String expected = String.format("%05d", count);
       assert (elt.getKey().getRow().toString().equals(expected));
       count++;
     }
-    assert (last == count);
-  }
-  
-  public static void setupOptions() {
-    usernameOpt = new Option("username", "username", true, "username");
-    passwordOpt = new Option("password", "password", true, "password");
-    readonlyOpt = new Option("readonly", "readonly", false, "read only");
-    
-    opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    opts.addOption(readonlyOpt);
+    assert (opts.count == count);
   }
   
   public static void main(String[] args) throws Exception {
-    setupOptions();
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      throw new RuntimeException(e);
-    }
-    String[] rargs = cl.getArgs();
-    if (rargs.length != 1) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp(" <count> ", opts);
-    }
-    count = Integer.parseInt(rargs[0]);
-    readOnly = cl.hasOption(readonlyOpt.getOpt());
-    user = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
+    String program = CreateTestTable.class.getName();
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(program, args, bwOpts, scanOpts);
     
     // create the test table within accumulo
-    String table = "mrtest1";
-    Connector connector;
-    
-    connector = HdfsZooInstance.getInstance().getConnector(user, password.getBytes());
+    Connector connector = opts.getConnector();
     
-    if (!readOnly) {
+    if (!opts.readOnly) {
       TreeSet<Text> keys = new TreeSet<Text>();
-      for (int i = 0; i < count / 100; i++) {
+      for (int i = 0; i < opts.count / 100; i++) {
         keys.add(new Text(String.format("%05d", i * 100)));
       }
       
       // presplit
-      connector.tableOperations().create(table);
-      connector.tableOperations().addSplits(table, keys);
-      BatchWriter b = connector.createBatchWriter(table, new BatchWriterConfig());
+      connector.tableOperations().create(opts.getTableName());
+      connector.tableOperations().addSplits(opts.getTableName(), keys);
+      BatchWriter b = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
       
       // populate
-      for (int i = 0; i < count; i++) {
+      for (int i = 0; i < opts.count; i++) {
         Mutation m = new Mutation(new Text(String.format("%05d", i)));
         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes()));
         b.addMutation(m);
       }
-      
       b.close();
-      
     }
     
-    readBack(connector, count);
-    
+    readBack(connector, opts, scanOpts);
+    opts.stopTracing();
   }
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java Mon Jan 14 22:03:24 2013
@@ -17,14 +17,14 @@
 package org.apache.accumulo.server.test;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.TablePermission;
@@ -32,17 +32,13 @@ import org.apache.hadoop.io.Text;
 
 public class GCLotsOfCandidatesTest {
   public static void main(String args[]) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    if (args.length != 4)
-      throw new IllegalArgumentException("Expected arguments: <instance name> <zookeeper server> <username> <password>");
+    ClientOpts opts = new ClientOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(GCLotsOfCandidatesTest.class.getName(), args, bwOpts);
     
-    Connector conn = new ZooKeeperInstance(args[0], args[1]).getConnector(args[2], args[3].getBytes());
-    generateCandidates(conn);
-  }
-  
-  private static void generateCandidates(Connector conn) throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
-      MutationsRejectedException {
+    Connector conn = opts.getConnector();
     conn.securityOperations().grantTablePermission(conn.whoami(), Constants.METADATA_TABLE_NAME, TablePermission.WRITE);
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, bwOpts.getBatchWriterConfig());
     
     for (int i = 0; i < 10000; ++i) {
       final Text emptyText = new Text("");

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GetMasterStats.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GetMasterStats.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GetMasterStats.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/GetMasterStats.java Mon Jan 14 22:03:24 2013
@@ -103,10 +103,10 @@ public class GetMasterStats {
             out(4, "Records in Memory %d", info.recsInMemory);
             out(4, "Ingest %.2f", info.ingestRate);
             out(4, "Queries %.2f", info.queryRate);
-            out(4, "Major Compacting %d", info.major == null ? 0 : info.major.running);
-            out(4, "Queued for Major Compaction %d", info.major == null ? 0 : info.major.queued);
-            out(4, "Minor Compacting %d", info.minor == null ? 0 : info.minor.running);
-            out(4, "Queued for Minor Compaction %d", info.minor == null ? 0 : info.minor.queued);
+            out(4, "Major Compacting %d", info.majors == null ? 0 : info.majors.running);
+            out(4, "Queued for Major Compaction %d", info.majors == null ? 0 : info.majors.queued);
+            out(4, "Minor Compacting %d", info.minors == null ? 0 : info.minors.running);
+            out(4, "Queued for Minor Compaction %d", info.minors == null ? 0 : info.minors.queued);
           }
         }
         out(2, "Recoveries %d", server.logSorts.size());

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/ListTables.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/ListTables.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/ListTables.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/ListTables.java Mon Jan 14 22:03:24 2013
@@ -18,15 +18,17 @@ package org.apache.accumulo.server.test;
 
 import java.util.Map.Entry;
 
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 
 /**
  * This little program is used by the functional test to get a list of table ids.
  */
 public class ListTables {
-  public static void main(String[] args) {
-    for (Entry<String,String> table : Tables.getNameToIdMap(HdfsZooInstance.getInstance()).entrySet())
+  public static void main(String[] args) throws Exception {
+    ClientOpts opts = new ClientOpts();
+    opts.parseArgs(ListTables.class.getName(), args);
+    for (Entry<String,String> table : Tables.getNameToIdMap(opts.getInstance()).entrySet())
       System.out.println(table.getKey() + " => " + table.getValue());
   }
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java Mon Jan 14 22:03:24 2013
@@ -25,6 +25,9 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.server.tabletserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
 public class NativeMapConcurrencyTest {
   
   private static final byte ROW_PREFIX[] = new byte[] {'r'};
@@ -70,14 +73,30 @@ public class NativeMapConcurrencyTest {
     return num / (ms / 1000.0);
   }
   
+  static class Opts {
+    @Parameter(names="--rows", description="rows", required = true)
+    int rows = 0;
+    @Parameter(names="--cols", description="cols")
+    int cols = 1;
+    @Parameter(names="--threads", description="threads")
+    int threads = 1;
+    @Parameter(names="--writeThreads", description="write threads")
+    int writeThreads = 1;
+    @Parameter(names="-help", help=true)
+    boolean help = false;
+  }
+  
   public static void main(String[] args) {
-    int rows = Integer.parseInt(args[0]);
-    int cols = Integer.parseInt(args[1]);
-    int threads = Integer.parseInt(args[2]);
-    int writeThreads = Integer.parseInt(args[3]);
-    
-    NativeMap nm = create(rows, cols);
-    runTest(nm, rows, cols, threads, writeThreads);
+    Opts opts = new Opts();
+    JCommander jc = new JCommander(opts);
+    jc.setProgramName(NativeMapConcurrencyTest.class.getName());
+    jc.parse(args);
+    if (opts.help) {
+      jc.usage();
+      return;
+    }
+    NativeMap nm = create(opts.rows, opts.cols);
+    runTest(nm, opts.rows, opts.cols, opts.threads, opts.writeThreads);
     nm.delete();
   }
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java Mon Jan 14 22:03:24 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Map.Entry;
@@ -26,6 +25,8 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -37,15 +38,10 @@ import org.apache.accumulo.core.data.Ran
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class QueryMetadataTable {
   private static AuthInfo credentials;
   
@@ -86,39 +82,21 @@ public class QueryMetadataTable {
     }
   }
   
+  static class Opts extends ClientOpts {
+    @Parameter(names="--numQueries", description="number of queries to run")
+    int numQueries = 1;
+    @Parameter(names="--numThreads", description="number of threads used to run the queries")
+    int numThreads = 1;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Option usernameOpt = new Option("username", "username", true, "username");
-    Option passwordOpt = new Option("password", "password", true, "password");
-    
-    Options opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e1) {
-      System.out.println("Parse Exception, exiting.");
-      return;
-    }
-    
-    if (cl.getArgs().length != 2) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("queryMetadataTable <numQueries> <numThreads> ", opts);
-      return;
-    }
-    String[] rargs = cl.getArgs();
-    
-    int numQueries = Integer.parseInt(rargs[0]);
-    int numThreads = Integer.parseInt(rargs[1]);
-    credentials = new AuthInfo(cl.getOptionValue("username", "root"), ByteBuffer.wrap(cl.getOptionValue("password", "secret").getBytes()), HdfsZooInstance
-        .getInstance().getInstanceID());
-    
-    Connector connector = HdfsZooInstance.getInstance().getConnector(credentials.user, credentials.password);
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
-    scanner.setBatchSize(20000);
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(QueryMetadataTable.class.getName(), args, scanOpts);
+    
+    Connector connector = opts.getConnector();
+    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, opts.auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     Text mdrow = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
     
     HashSet<Text> rowSet = new HashSet<Text>();
@@ -138,7 +116,6 @@ public class QueryMetadataTable {
       if (!entry.getKey().getRow().toString().startsWith(Constants.METADATA_TABLE_ID))
         rowSet.add(entry.getKey().getRow());
       count++;
-      
     }
     
     System.out.printf(" %,d%n", count);
@@ -147,11 +124,11 @@ public class QueryMetadataTable {
     
     Random r = new Random();
     
-    ExecutorService tp = Executors.newFixedThreadPool(numThreads);
+    ExecutorService tp = Executors.newFixedThreadPool(opts.numThreads);
     
     long t1 = System.currentTimeMillis();
     
-    for (int i = 0; i < numQueries; i++) {
+    for (int i = 0; i < opts.numQueries; i++) {
       int index = r.nextInt(rows.size());
       MDTQuery mdtq = new MDTQuery(rows.get(index));
       tp.submit(mdtq);
@@ -168,6 +145,6 @@ public class QueryMetadataTable {
     
     long t2 = System.currentTimeMillis();
     double delta = (t2 - t1) / 1000.0;
-    System.out.println("time : " + delta + "  queries per sec : " + (numQueries / delta));
+    System.out.println("time : " + delta + "  queries per sec : " + (opts.numQueries / delta));
   }
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java Mon Jan 14 22:03:24 2013
@@ -21,9 +21,10 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
@@ -31,18 +32,11 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+
+import com.beust.jcommander.Parameter;
 
 public class TestBinaryRows {
-  private static String username = "root";
-  private static byte[] passwd = "secret".getBytes();
-  private static String mode = null;
-  private static String table = null;
-  private static long start = 0;
-  private static long num = 0;
   private static final long byteOnes;
   
   static {
@@ -76,28 +70,31 @@ public class TestBinaryRows {
     return l;
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--mode", description="either 'ingest', 'delete', 'randomLookups', 'split', 'verify', 'verifyDeleted'", required=true)
+    String mode;
+    @Parameter(names="--start", description="the lowest numbered row")
+    long start = 0;
+    @Parameter(names="--count", description="number of rows to ingest", required=true)
+    long num = 0;
+  }
+  
   public static void main(String[] args) {
-    mode = args[0];
-    if (args.length < 4) {
-      System.err.println("ERROR : " + mode + " is not a valid operation or insufficient arguments.");
-      throw new RuntimeException("config error");
-    }
-    table = args[1];
-    start = Long.parseLong(args[2]);
-    num = Long.parseLong(args[3]);
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(TestBinaryRows.class.getName(), args, scanOpts, bwOpts);
     
     try {
-      Connector connector = HdfsZooInstance.getInstance().getConnector(username, passwd);
-      
-      Logger.getLogger(Constants.CORE_PACKAGE_NAME).setLevel(Level.DEBUG);
+      Connector connector = opts.getConnector();
       
-      if (mode.equals("ingest") || mode.equals("delete")) {
-        BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
-        boolean delete = mode.equals("delete");
-        
-        for (long i = 0; i < num; i++) {
-          byte[] row = encodeLong(i + start);
-          String value = "" + (i + start);
+      if (opts.mode.equals("ingest") || opts.mode.equals("delete")) {
+        BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
+        boolean delete = opts.mode.equals("delete");
+        
+        for (long i = 0; i < opts.num; i++) {
+          byte[] row = encodeLong(i + opts.start);
+          String value = "" + (i + opts.start);
           
           Mutation m = new Mutation(new Text(row));
           if (delete) {
@@ -109,10 +106,11 @@ public class TestBinaryRows {
         }
         
         bw.close();
-      } else if (mode.equals("verifyDeleted")) {
-        Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
-        Key startKey = new Key(encodeLong(start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
-        Key stopKey = new Key(encodeLong(start + num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
+      } else if (opts.mode.equals("verifyDeleted")) {
+        Scanner s = connector.createScanner(opts.tableName, opts.auths);
+        s.setBatchSize(scanOpts.scanBatchSize);
+        Key startKey = new Key(encodeLong(opts.start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
+        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
         s.setBatchSize(50000);
         s.setRange(new Range(startKey, stopKey));
         
@@ -122,16 +120,16 @@ public class TestBinaryRows {
           System.exit(1);
         }
         
-      } else if (mode.equals("verify")) {
+      } else if (opts.mode.equals("verify")) {
         long t1 = System.currentTimeMillis();
         
-        Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
-        Key startKey = new Key(encodeLong(start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
-        Key stopKey = new Key(encodeLong(start + num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
-        s.setBatchSize(50000);
+        Scanner s = connector.createScanner(opts.tableName, opts.auths);
+        Key startKey = new Key(encodeLong(opts.start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
+        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
+        s.setBatchSize(scanOpts.scanBatchSize);
         s.setRange(new Range(startKey, stopKey));
         
-        long i = start;
+        long i = opts.start;
         
         for (Entry<Key,Value> e : s) {
           Key k = e.getKey();
@@ -144,8 +142,8 @@ public class TestBinaryRows {
           i++;
         }
         
-        if (i != start + num) {
-          System.err.println("ERROR : did not see expected number of rows, saw " + (i - start) + " expected " + num);
+        if (i != opts.start + opts.num) {
+          System.err.println("ERROR : did not see expected number of rows, saw " + (i - opts.start) + " expected " + opts.num);
           System.err.println("exiting... ARGHHHHHH");
           System.exit(1);
           
@@ -154,9 +152,9 @@ public class TestBinaryRows {
         long t2 = System.currentTimeMillis();
         
         System.out.printf("time : %9.2f secs%n", ((t2 - t1) / 1000.0));
-        System.out.printf("rate : %9.2f entries/sec%n", num / ((t2 - t1) / 1000.0));
+        System.out.printf("rate : %9.2f entries/sec%n", opts.num / ((t2 - t1) / 1000.0));
         
-      } else if (mode.equals("randomLookups")) {
+      } else if (opts.mode.equals("randomLookups")) {
         int numLookups = 1000;
         
         Random r = new Random();
@@ -164,9 +162,10 @@ public class TestBinaryRows {
         long t1 = System.currentTimeMillis();
         
         for (int i = 0; i < numLookups; i++) {
-          long row = (Math.abs(r.nextLong()) % num) + start;
+          long row = (Math.abs(r.nextLong()) % opts.num) + opts.start;
           
-          Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
+          Scanner s = connector.createScanner(opts.tableName, opts.auths);
+          s.setBatchSize(scanOpts.scanBatchSize);
           Key startKey = new Key(encodeLong(row), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
           Key stopKey = new Key(encodeLong(row), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
           s.setRange(new Range(startKey, stopKey));
@@ -199,10 +198,10 @@ public class TestBinaryRows {
         System.out.printf("lookups : %9d keys%n", numLookups);
         System.out.printf("rate    : %9.2f lookups/sec%n", numLookups / ((t2 - t1) / 1000.0));
         
-      } else if (mode.equals("split")) {
+      } else if (opts.mode.equals("split")) {
         TreeSet<Text> splits = new TreeSet<Text>();
-        int shift = (int) start;
-        int count = (int) num;
+        int shift = (int) opts.start;
+        int count = (int) opts.num;
         
         for (long i = 0; i < count; i++) {
           long splitPoint = i << shift;
@@ -211,11 +210,11 @@ public class TestBinaryRows {
           System.out.printf("added split point 0x%016x  %,12d%n", splitPoint, splitPoint);
         }
         
-        connector.tableOperations().create(table);
-        connector.tableOperations().addSplits(table, splits);
+        connector.tableOperations().create(opts.tableName);
+        connector.tableOperations().addSplits(opts.tableName, splits);
         
       } else {
-        System.err.println("ERROR : " + mode + " is not a valid operation.");
+        System.err.println("ERROR : " + opts.mode + " is not a valid operation.");
         System.exit(1);
       }
     } catch (Exception e) {



Mime
View raw message