accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [13/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ c...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java Mon Jan 14 22:03:24 2013
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.io.Serializable;
-import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -135,9 +134,9 @@ class MoveExportedFiles extends MasterRe
   }
   
   @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
+  public Repo<Master> call(long tid, Master master) throws Exception {
     try {
-      FileSystem fs = environment.getFileSystem();
+      FileSystem fs = master.getFileSystem();
       
       Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
       
@@ -171,8 +170,6 @@ class PopulateMetadataTable extends Mast
   private static final long serialVersionUID = 1L;
   
   private ImportedTableInfo tableInfo;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   PopulateMetadataTable(ImportedTableInfo ti) {
     this.tableInfo = ti;
@@ -198,7 +195,7 @@ class PopulateMetadataTable extends Mast
   }
 
   @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
+  public Repo<Master> call(long tid, Master master) throws Exception {
     
     Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
     
@@ -206,10 +203,9 @@ class PopulateMetadataTable extends Mast
     ZipInputStream zis = null;
 
     try {
-      FileSystem fs = environment.getFileSystem();
+      FileSystem fs = master.getFileSystem();
       
-      mbw = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemCredentials())
-          .createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+      mbw = master.getConnector().createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
 
       zis = new ZipInputStream(fs.open(path));
       
@@ -249,14 +245,14 @@ class PopulateMetadataTable extends Mast
             
             if (m == null) {
               m = new Mutation(metadataRow);
-              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(utf8))));
+              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
               currentRow = metadataRow;
             }
             
             if (!currentRow.equals(metadataRow)) {
               mbw.addMutation(m);
               m = new Mutation(metadataRow);
-              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(utf8))));
+              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
             }
             
             m.put(key.getColumnFamily(), cq, val);

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java Mon Jan 14 22:03:24 2013
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.server.master.tableOps;
 
-import java.nio.charset.Charset;
-
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Tables;
@@ -28,7 +26,6 @@ import org.apache.accumulo.core.zookeepe
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.log4j.Logger;
@@ -39,8 +36,6 @@ public class RenameTable extends MasterR
   private String tableId;
   private String oldTableName;
   private String newTableName;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   @Override
   public long isReady(long tid, Master environment) throws Exception {
@@ -54,9 +49,9 @@ public class RenameTable extends MasterR
   }
   
   @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
+  public Repo<Master> call(long tid, Master master) throws Exception {
     
-    Instance instance = HdfsZooInstance.getInstance();
+    Instance instance = master.getInstance();
     
     IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
     Utils.tableNameLock.lock();
@@ -74,7 +69,7 @@ public class RenameTable extends MasterR
             throw new ThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND,
                 "Name changed while processing");
           }
-          return newTableName.getBytes(utf8);
+          return newTableName.getBytes();
         }
       });
       Tables.clearCache(instance);

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java Mon Jan 14 22:03:24 2013
@@ -20,11 +20,9 @@ import org.apache.accumulo.core.Constant
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.fate.Repo;
@@ -32,7 +30,6 @@ import org.apache.accumulo.server.master
 import org.apache.accumulo.server.master.state.MergeInfo;
 import org.apache.accumulo.server.master.state.MergeInfo.Operation;
 import org.apache.accumulo.server.master.state.MergeState;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.MetadataTable;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -59,11 +56,9 @@ class MakeDeleteEntries extends MasterRe
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.info("creating delete entries for merged metadata tablets");
-    Instance instance = master.getInstance();
-    Connector conn = instance.getConnector(SecurityConstants.getSystemCredentials());
+    Connector conn = master.getConnector();
     BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-    AccumuloConfiguration conf = instance.getConfiguration();
-    String tableDir = Constants.getMetadataTableDir(conf);
+    String tableDir = Constants.getMetadataTableDir(master.getConfiguration().getConfiguration());
     for (FileStatus fs : master.getFileSystem().listStatus(new Path(tableDir))) {
       // TODO: add the entries only if there are no !METADATA table references
       if (fs.isDir() && fs.getPath().getName().matches("^" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + ".*")) {
@@ -94,11 +89,11 @@ class TableRangeOpWait extends MasterRep
   }
   
   @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
+  public Repo<Master> call(long tid, Master master) throws Exception {
     Text tableIdText = new Text(tableId);
-    MergeInfo mergeInfo = env.getMergeInfo(tableIdText);
+    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
     log.warn("removing merge information " + mergeInfo);
-    env.clearMergeState(tableIdText);
+    master.clearMergeState(tableIdText);
     Utils.unreserveTable(tableId, tid, true);
     // We can't add entries to the metadata table if it is offline for this merge.
     // If the delete entries for the metadata table were in the root tablet, it would work just fine

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java Mon Jan 14 22:03:24 2013
@@ -17,7 +17,6 @@
 package org.apache.accumulo.server.master.tableOps;
 
 import java.math.BigInteger;
-import java.nio.charset.Charset;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -40,8 +39,6 @@ import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
 public class Utils {
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   static void checkTableDoesNotExist(Instance instance, String tableName, String tableId, TableOperation operation) throws ThriftTableOperationException {
     
@@ -57,12 +54,12 @@ public class Utils {
     try {
       IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
       final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
-      byte[] nid = zoo.mutate(ntp, "0".getBytes(utf8), ZooUtil.PUBLIC, new Mutator() {
+      byte[] nid = zoo.mutate(ntp, "0".getBytes(), ZooUtil.PUBLIC, new Mutator() {
         @Override
         public byte[] mutate(byte[] currentValue) throws Exception {
           BigInteger nextId = new BigInteger(new String(currentValue), Character.MAX_RADIX);
           nextId = nextId.add(BigInteger.ONE);
-          return nextId.toString(Character.MAX_RADIX).getBytes(utf8);
+          return nextId.toString(Character.MAX_RADIX).getBytes();
         }
       });
       return new String(nid);
@@ -98,7 +95,7 @@ public class Utils {
   public static long reserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
     Instance instance = HdfsZooInstance.getInstance();
     
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes(utf8)));
+    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
     
     IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
     
@@ -110,12 +107,12 @@ public class Utils {
   
   public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
     Instance instance = HdfsZooInstance.getInstance();
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes(utf8)));
+    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
     ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
   }
   
   private static Lock getLock(String tableId, long tid, boolean writeLock) throws Exception {
-    byte[] lockData = String.format("%016x", tid).getBytes(utf8);
+    byte[] lockData = String.format("%016x", tid).getBytes();
     ZooQueueLock qlock = new ZooQueueLock(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLE_LOCKS + "/" + tableId, false);
     Lock lock = DistributedReadWriteLock.recoverLock(qlock, lockData);
     if (lock == null) {

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java Mon Jan 14 22:03:24 2013
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.server.master.tserverOps;
 
-import java.nio.charset.Charset;
-
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.util.AddressUtil;
@@ -41,8 +39,6 @@ public class ShutdownTServer extends Mas
   private static final Logger log = Logger.getLogger(ShutdownTServer.class);
   private TServerInstance server;
   private boolean force;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   public ShutdownTServer(TServerInstance server, boolean force) {
     this.server = server;
@@ -55,29 +51,29 @@ public class ShutdownTServer extends Mas
   }
   
   @Override
-  public Repo<Master> call(long tid, Master m) throws Exception {
+  public Repo<Master> call(long tid, Master master) throws Exception {
     // suppress assignment of tablets to the server
     if (force) {
       String tserver = AddressUtil.toString(server.getLocation());
-      String path = ZooUtil.getRoot(m.getInstance()) + Constants.ZTSERVERS + "/" + tserver;
+      String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZTSERVERS + "/" + tserver;
       ZooLock.deleteLock(path);
-      path = ZooUtil.getRoot(m.getInstance()) + Constants.ZDEADTSERVERS + "/" + tserver;
+      path = ZooUtil.getRoot(master.getInstance()) + Constants.ZDEADTSERVERS + "/" + tserver;
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-      zoo.putPersistentData(path, "forced down".getBytes(utf8), NodeExistsPolicy.OVERWRITE);
+      zoo.putPersistentData(path, "forced down".getBytes(), NodeExistsPolicy.OVERWRITE);
       return null;
     }
     
     // TODO move this to isReady() and drop while loop?
-    Listener listener = m.getEventCoordinator().getListener();
-    m.shutdownTServer(server);
-    while (m.onlineTabletServers().contains(server)) {
-      TServerConnection connection = m.getConnection(server);
+    Listener listener = master.getEventCoordinator().getListener();
+    master.shutdownTServer(server);
+    while (master.onlineTabletServers().contains(server)) {
+      TServerConnection connection = master.getConnection(server);
       if (connection != null) {
         try {
           TabletServerStatus status = connection.getTableMap();
           if (status.tableMap != null && status.tableMap.isEmpty()) {
             log.info("tablet server hosts no tablets " + server);
-            connection.halt(m.getMasterLock());
+            connection.halt(master.getMasterLock());
             log.info("tablet server asked to halt " + server);
             break;
           }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java Mon Jan 14 22:03:24 2013
@@ -18,60 +18,35 @@ package org.apache.accumulo.server.metan
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Finds tablet creation events.
  */
 public class FindTablet {
-  public static void main(String[] args) throws Exception {
-    
-    Options options = new Options();
-    options.addOption("r", "row", true, "find tablets that contain this row");
-    
-    GnuParser parser = new GnuParser();
-    CommandLine cmd = null;
-    try {
-      cmd = parser.parse(options, args);
-      if (cmd.getArgs().length != 5) {
-        throw new ParseException("Command takes no arguments");
-      }
-    } catch (ParseException e) {
-      System.err.println("Failed to parse command line " + e.getMessage());
-      System.err.println();
-      HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp(FindTablet.class.getSimpleName() + " <instance> <zookeepers> <user> <pass> <table ID>", options);
-      System.exit(-1);
-    }
-    
-    String instance = cmd.getArgs()[0];
-    String zookeepers = cmd.getArgs()[1];
-    String user = cmd.getArgs()[2];
-    String pass = cmd.getArgs()[3];
-    String tableID = cmd.getArgs()[4];
+  
+  static public class Opts extends ClientOpts {
+    @Parameter(names={"-r", "--row"}, required=true, description="find tablets that contain this row")
+    String row = null;
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
+    @Parameter(names="--tableId", required=true, description="table id")
+    String tableId = null;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(FindTablet.class.getName(), args);
     
-    if (cmd.hasOption('r')) {
-      findContainingTablets(conn, tableID, cmd.getOptionValue('r'));
-    } else {
-      System.err.println("ERROR :  No search criteria given");
-    }
+    findContainingTablets(opts);
   }
 
   /**
@@ -80,16 +55,16 @@ public class FindTablet {
    * @param tableID
    * @param option
    */
-  private static void findContainingTablets(Connector conn, String tableID, String row) throws Exception {
-    Range range = new KeyExtent(new Text(tableID), null, null).toMetadataRange();
+  private static void findContainingTablets(Opts opts) throws Exception {
+    Range range = new KeyExtent(new Text(opts.tableId), null, null).toMetadataRange();
 
-    Scanner scanner = conn.createScanner("createEvents", new Authorizations());
-    
+    Scanner scanner = opts.getConnector().createScanner("createEvents", opts.auths);
     scanner.setRange(range);
-    
+
+    Text row = new Text(opts.row);
     for (Entry<Key,Value> entry : scanner) {
       KeyExtent ke = new KeyExtent(entry.getKey().getRow(), new Value(TextUtil.getBytes(entry.getKey().getColumnFamily())));
-      if (ke.contains(new Text(row))) {
+      if (ke.contains(row)) {
         System.out.println(entry.getKey().getColumnQualifier() + " " + ke + " " + entry.getValue());
       }
     }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java Mon Jan 14 22:03:24 2013
@@ -17,16 +17,16 @@
 package org.apache.accumulo.server.metanalysis;
 
 import java.io.IOException;
-import java.nio.charset.Charset;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -46,6 +46,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A map reduce job that takes write ahead logs containing mutations for the metadata table and indexes them into Accumulo tables for analysis.
  * 
@@ -53,14 +55,12 @@ import org.apache.log4j.Logger;
 
 public class IndexMeta extends Configured implements Tool {
   
-  private static final Charset utf8 = Charset.forName("UTF8");
-    
   public static class IndexMapper extends Mapper<LogFileKey,LogFileValue,Text,Mutation> {
     private static final Text CREATE_EVENTS_TABLE = new Text("createEvents");
     private static final Text TABLET_EVENTS_TABLE = new Text("tabletEvents");
     private Map<Integer,KeyExtent> tabletIds = new HashMap<Integer,KeyExtent>();
     private String uuid = null;
-
+    
     @Override
     protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
       tabletIds = new HashMap<Integer,KeyExtent>();
@@ -104,31 +104,27 @@ public class IndexMeta extends Configure
       
       if (prevRow != null) {
         Mutation createEvent = new Mutation(new Text(m.getRow()));
-        createEvent.put(prevRow, new Text(String.format("%020d", timestamp)), new Value(metaTablet.toString().getBytes(utf8)));
+        createEvent.put(prevRow, new Text(String.format("%020d", timestamp)), new Value(metaTablet.toString().getBytes()));
         context.write(CREATE_EVENTS_TABLE, createEvent);
       }
       
       Mutation tabletEvent = new Mutation(new Text(m.getRow()));
       tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mut"), new Value(serMut));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mtab"), new Value(metaTablet.toString().getBytes(utf8)));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("log"), new Value(logFile.getBytes(utf8)));
+      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mtab"), new Value(metaTablet.toString().getBytes()));
+      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("log"), new Value(logFile.getBytes()));
       context.write(TABLET_EVENTS_TABLE, tabletEvent);
     }
   }
 
-  
+  static class Opts extends ClientOpts {
+    @Parameter(description="<logfile> { <logfile> ...}")
+    List<String> logFiles = new ArrayList<String>();
+  }
   
   @Override
   public int run(String[] args) throws Exception {
-    if (args.length < 5) {
-      System.err.println("Usage : " + IndexMeta.class + " <instance> <zookeepers> <user> <pass> <logfile> {<logfile>}");
-      return -1;
-    }
-    
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
+    Opts opts = new Opts();
+    opts.parseArgs(IndexMeta.class.getName(), args);
 
     String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
     
@@ -148,13 +144,12 @@ public class IndexMeta extends Configure
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), instance, zookeepers);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), user, pass.getBytes(utf8), false, null);
+    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), opts.instance, opts.zookeepers);
+    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), opts.user, opts.getPassword(), false, null);
     
     job.setMapperClass(IndexMapper.class);
 
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
+    Connector conn = opts.getConnector();
     
     try {
       conn.tableOperations().create("createEvents");

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java Mon Jan 14 22:03:24 2013
@@ -23,9 +23,9 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -36,36 +36,32 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.server.logger.LogFileValue;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Looks up and prints mutations indexed by IndexMeta
  */
 public class PrintEvents {
   
+  static class Opts extends ClientOpts {
+    @Parameter(names={"-t", "--tableId"}, description="table id", required=true)
+    String tableId;
+    @Parameter(names={"-e", "--endRow"}, description="end row")
+    String endRow;
+    @Parameter(names={"-t", "--time"}, description="time, in milliseconds", required=true)
+    long time;
+  }
+  
   /**
    * @param args
    */
   public static void main(String[] args) throws Exception {
-    if (args.length != 7) {
-      System.err.println("Usage : " + IndexMeta.class + " <instance> <zookeepers> <user> <pass> <tableId> <endRow> <time>");
-      return;
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(PrintEvents.class.getName(), args);
     
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String tableId = args[4];
-    String endRow = args[5];
-    Long time = Long.parseLong(args[6]);
+    Connector conn = opts.getConnector();
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
-    
-    if (endRow.equals("null")) {
-      endRow = null;
-    }
-
-    printEvents(conn, tableId, endRow, time);
+    printEvents(conn, opts.tableId, opts.endRow, opts.time);
   }
   
   /**

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java Mon Jan 14 22:03:24 2013
@@ -51,7 +51,7 @@ public class MetricsConfiguration {
   
   private Configuration config = null;
   
-  private Object lock = new Object();
+  private final Object lock = new Object();
   
   private boolean needsReloading = false;
   
@@ -61,7 +61,7 @@ public class MetricsConfiguration {
   
   private static int CONFIG_FILE_CHECK_COUNTER = 100;
   
-  public static long CONFIG_FILE_RELOAD_DELAY = 60000;
+  public final static long CONFIG_FILE_RELOAD_DELAY = 60000;
   
   private MetricsConfigWatcher watcher = null;
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java Mon Jan 14 22:03:24 2013
@@ -121,18 +121,18 @@ public class Monitor {
   }
   
   private static final int MAX_TIME_PERIOD = 60 * 60 * 1000;
-  private static List<Pair<Long,Double>> loadOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> ingestRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> ingestByteRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Integer>> recoveriesOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Integer>> minorCompactionsOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Integer>> majorCompactionsOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> lookupsOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Integer>> queryRateOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Integer>> scanRateOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> queryByteRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> indexCacheHitRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
-  private static List<Pair<Long,Double>> dataCacheHitRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> loadOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> ingestRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> ingestByteRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Integer>> recoveriesOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Integer>> minorCompactionsOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Integer>> majorCompactionsOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> lookupsOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Integer>> queryRateOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Integer>> scanRateOverTime = Collections.synchronizedList(new MaxList<Integer>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> queryByteRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> indexCacheHitRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
+  private static final List<Pair<Long,Double>> dataCacheHitRateOverTime = Collections.synchronizedList(new MaxList<Double>(MAX_TIME_PERIOD));
   private static EventCounter lookupRateTracker = new EventCounter();
   private static EventCounter indexCacheHitTracker = new EventCounter();
   private static EventCounter indexCacheRequestTracker = new EventCounter();
@@ -167,19 +167,19 @@ public class Monitor {
   }
   
   public static void add(TableInfo total, TableInfo more) {
-    if (total.minor == null)
-      total.minor = new Compacting();
-    if (total.major == null)
-      total.major = new Compacting();
+    if (total.minors == null)
+      total.minors = new Compacting();
+    if (total.majors == null)
+      total.majors = new Compacting();
     if (total.scans == null)
       total.scans = new Compacting();
-    if (more.minor != null) {
-      total.minor.running += more.minor.running;
-      total.minor.queued += more.minor.queued;
-    }
-    if (more.major != null) {
-      total.major.running += more.major.running;
-      total.major.queued += more.major.queued;
+    if (more.minors != null) {
+      total.minors.running += more.minors.running;
+      total.minors.queued += more.minors.queued;
+    }
+    if (more.majors != null) {
+      total.majors.running += more.majors.running;
+      total.majors.queued += more.majors.queued;
     }
     if (more.scans != null) {
       total.scans.running += more.scans.running;
@@ -198,8 +198,8 @@ public class Monitor {
   
   public static TableInfo summarizeTableStats(TabletServerStatus status) {
     TableInfo summary = new TableInfo();
-    summary.major = new Compacting();
-    summary.minor = new Compacting();
+    summary.majors = new Compacting();
+    summary.minors = new Compacting();
     summary.scans = new Compacting();
     for (TableInfo rates : status.tableMap.values()) {
       add(summary, rates);
@@ -294,11 +294,11 @@ public class Monitor {
           client = MasterClient.getConnection(HdfsZooInstance.getInstance());
           if (client != null) {
             mmi = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+            retry = false;
           } else {
             mmi = null;
           }
           Monitor.gcStatus = fetchGcStatus();
-          retry = false;
         } catch (Exception e) {
           mmi = null;
           log.info("Error fetching stats: " + e);
@@ -319,7 +319,7 @@ public class Monitor {
         indexCacheRequestTracker.startingUpdates();
         dataCacheHitTracker.startingUpdates();
         dataCacheRequestTracker.startingUpdates();
-
+        
         for (TabletServerStatus server : mmi.tServerInfo) {
           TableInfo summary = Monitor.summarizeTableStats(server);
           totalIngestRate += summary.ingestRate;
@@ -330,8 +330,8 @@ public class Monitor {
           totalEntries += summary.recs;
           totalHoldTime += server.holdTime;
           totalLookups += server.lookups;
-          majorCompactions += summary.major.running;
-          minorCompactions += summary.minor.running;
+          majorCompactions += summary.majors.running;
+          minorCompactions += summary.minors.running;
           lookupRateTracker.updateTabletServer(server.name, server.lastContact, server.lookups);
           indexCacheHitTracker.updateTabletServer(server.name, server.lastContact, server.indexCacheHits);
           indexCacheRequestTracker.updateTabletServer(server.name, server.lastContact, server.indexCacheRequest);
@@ -466,7 +466,7 @@ public class Monitor {
     int port = config.getConfiguration().getPort(Property.MONITOR_PORT);
     try {
       log.debug("Creating monitor on port " + port);
-      server = EmbeddedWebServer.create(port);
+      server = new EmbeddedWebServer(hostname, port);
     } catch (Throwable ex) {
       log.error("Unable to start embedded web server", ex);
       throw new RuntimeException(ex);

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/ZooKeeperStatus.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/ZooKeeperStatus.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/ZooKeeperStatus.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/ZooKeeperStatus.java Mon Jan 14 22:03:24 2013
@@ -17,11 +17,9 @@
 package org.apache.accumulo.server.monitor;
 
 import java.net.InetSocketAddress;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
 
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.TTimeoutTransport;
@@ -36,10 +34,8 @@ public class ZooKeeperStatus implements 
   private static final Logger log = Logger.getLogger(ZooKeeperStatus.class);
   
   private volatile boolean stop = false;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
-  public static class ZooKeeperState {
+  public static class ZooKeeperState implements Comparable<ZooKeeperState> {
     public final String keeper;
     public final String mode;
     public final int clients;
@@ -49,9 +45,28 @@ public class ZooKeeperStatus implements 
       this.mode = mode;
       this.clients = clients;
     }
+    
+    @Override
+    public int compareTo(ZooKeeperState other) {
+      if (this == other) {
+        return 0;
+      } else if (other == null) {
+        return 1;
+      } else {
+        if (this.keeper == other.keeper) {
+          return 0;
+        } else if (null == this.keeper) {
+          return -1;
+        } else if (null == other.keeper) {
+          return 1;
+        } else {
+          return this.keeper.compareTo(other.keeper);
+        }
+      }
+    }
   }
   
-  private static Collection<ZooKeeperState> status = Collections.emptyList();
+  private static SortedSet<ZooKeeperState> status = new TreeSet<ZooKeeperState>();
   
   public static Collection<ZooKeeperState> getZooKeeperStatus() {
     return status;
@@ -66,7 +81,7 @@ public class ZooKeeperStatus implements 
     
     while (!stop) {
       
-      List<ZooKeeperState> update = new ArrayList<ZooKeeperState>();
+      TreeSet<ZooKeeperState> update = new TreeSet<ZooKeeperState>();
       
       String zookeepers[] = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST).split(",");
       for (String keeper : zookeepers) {
@@ -83,7 +98,7 @@ public class ZooKeeperStatus implements 
             addr = new InetSocketAddress(parts[0], 2181);
           
           transport = TTimeoutTransport.create(addr, 10 * 1000l);
-          transport.write("stat\n".getBytes(utf8), 0, 5);
+          transport.write("stat\n".getBytes(), 0, 5);
           StringBuilder response = new StringBuilder();
           try {
             transport.flush();

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java Mon Jan 14 22:03:24 2013
@@ -21,8 +21,6 @@ import java.io.FileInputStream;
 import java.io.FilePermission;
 import java.io.IOException;
 import java.io.InputStream;
-import java.net.InetSocketAddress;
-import java.nio.charset.Charset;
 import java.security.AccessControlContext;
 import java.security.AccessController;
 import java.security.PermissionCollection;
@@ -54,19 +52,10 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.mapred.ClusterStatus;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobTracker;
 
 public class DefaultServlet extends BasicServlet {
   
   private static final long serialVersionUID = 1L;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   @Override
   protected String getTitle(HttpServletRequest req) {
@@ -87,16 +76,16 @@ public class DefaultServlet extends Basi
       InputStream data = BasicServlet.class.getClassLoader().getResourceAsStream(path);
       ServletOutputStream out = resp.getOutputStream();
       try {
-    	  if (data != null) {
-    		  byte[] buffer = new byte[1024];
-    		  int n;
-    		  while ((n = data.read(buffer)) > 0)
-    			  out.write(buffer, 0, n);
-    	  } else {
-    		  out.write(("could not get resource " + path + "").getBytes(utf8));
-    	  }
+        if (data != null) {
+          byte[] buffer = new byte[1024];
+          int n;
+          while ((n = data.read(buffer)) > 0)
+            out.write(buffer, 0, n);
+        } else {
+          out.write(("could not get resource " + path + "").getBytes());
+        }
       } finally {
-    	  data.close();
+        data.close();
       }
     } catch (Throwable t) {
       log.error(t, t);
@@ -120,7 +109,7 @@ public class DefaultServlet extends Basi
       
       @Override
       public IOException run() {
-    	InputStream data = null;
+        InputStream data = null;
         try {
           File file = new File(aHome + path);
           data = new FileInputStream(file.getAbsolutePath());
@@ -139,7 +128,7 @@ public class DefaultServlet extends Basi
             } catch (IOException ex) {
               log.error(ex, ex);
             }
-          } 
+          }
         }
       }
     }, acc);
@@ -247,14 +236,6 @@ public class DefaultServlet extends Basi
     sb.append("</td>\n");
     
     sb.append("<td class='noborder'>\n");
-    doHdfsTable(sb);
-    sb.append("</td>\n");
-    
-    sb.append("<td class='noborder'>\n");
-    doJobTrackerTable(sb);
-    sb.append("</td>\n");
-    
-    sb.append("<td class='noborder'>\n");
     doZooKeeperTable(sb);
     sb.append("</td>\n");
     
@@ -334,66 +315,6 @@ public class DefaultServlet extends Basi
     sb.append("</table>\n");
   }
   
-  private void doHdfsTable(StringBuilder sb) throws IOException {
-    // HDFS
-    Configuration conf = CachedConfiguration.getInstance();
-    DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf);
-    String httpAddress = conf.get("dfs.http.address");
-    String port = httpAddress.split(":")[1];
-    String href = "http://" + fs.getUri().getHost() + ":" + port;
-    String liveUrl = href + "/dfsnodelist.jsp?whatNodes=LIVE";
-    String deadUrl = href + "/dfsnodelist.jsp?whatNodes=DEAD";
-    sb.append("<table>\n");
-    sb.append("<tr><th colspan='2'><a href='" + href + "'>NameNode</a></th></tr>\n");
-    try {
-      boolean highlight = false;
-      tableRow(sb, (highlight = !highlight), "Unreplicated&nbsp;Capacity", bytes(fs.getRawCapacity()));
-      tableRow(sb, (highlight = !highlight), "%&nbsp;Used", NumberType.commas(fs.getRawUsed() * 100. / fs.getRawCapacity(), 0, 90, 0, 100) + "%");
-      tableRow(sb, (highlight = !highlight), "Corrupt&nbsp;Blocks", NumberType.commas(fs.getCorruptBlocksCount(), 0, 0));
-      DatanodeInfo[] liveNodes = fs.getClient().datanodeReport(FSConstants.DatanodeReportType.LIVE);
-      DatanodeInfo[] deadNodes = fs.getClient().datanodeReport(FSConstants.DatanodeReportType.DEAD);
-      tableRow(sb, (highlight = !highlight), "<a href='" + liveUrl + "'>Live&nbsp;Data&nbsp;Nodes</a>", NumberType.commas(liveNodes.length));
-      tableRow(sb, (highlight = !highlight), "<a href='" + deadUrl + "'>Dead&nbsp;Data&nbsp;Nodes</a>", NumberType.commas(deadNodes.length));
-      long count = 0;
-      for (DatanodeInfo stat : liveNodes)
-        count += stat.getXceiverCount();
-      tableRow(sb, (highlight = !highlight), "Xceivers", NumberType.commas(count));
-    } catch (RemoteException ex) {
-      sb.append("<tr><td colspan='2'>Permission&nbsp;Denied</td></tr>\n");
-    } catch (Exception ex) {
-      sb.append("<tr><td colspan='2'><span class='error'>Down</span></td></tr>\n");
-    }
-    sb.append("</table>\n");
-  }
-  
-  private void doJobTrackerTable(StringBuilder sb) {
-    // Job Tracker
-    Configuration conf = CachedConfiguration.getInstance();
-    sb.append("<table>\n");
-    try {
-      InetSocketAddress address = JobTracker.getAddress(conf);
-      
-      // No alternative api in hadoop 20
-      JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));
-      String httpAddress = conf.get("mapred.job.tracker.http.address");
-      String port = httpAddress.split(":")[1];
-      String href = "http://" + address.getHostName() + ":" + port;
-      String activeUrl = href + "/machines.jsp?type=active";
-      String blacklistUrl = href + "/machines.jsp?type=blacklisted";
-      sb.append("<tr><th colspan='2'><a href='" + href + "'>JobTracker</a></th></tr>\n");
-      boolean highlight = false;
-      tableRow(sb, (highlight = !highlight), "Running&nbsp;Jobs", jc.jobsToComplete().length);
-      ClusterStatus status = jc.getClusterStatus();
-      tableRow(sb, (highlight = !highlight), "Map&nbsp;Tasks", status.getMapTasks() + "/" + status.getMaxMapTasks());
-      tableRow(sb, (highlight = !highlight), "Reduce&nbsp;Tasks", status.getReduceTasks() + "/" + status.getMaxReduceTasks());
-      tableRow(sb, (highlight = !highlight), "<a href='" + activeUrl + "'>Trackers</a>", status.getTaskTrackers());
-      tableRow(sb, (highlight = !highlight), "<a href='" + blacklistUrl + "'>Blacklisted</a>", status.getBlacklistedTrackers());
-    } catch (Exception ex) {
-      sb.append("<tr><td colspan='2'><span class='error'>Job Tracker is Down</span></td></tr>\n");
-    }
-    sb.append("</table>\n");
-  }
-  
   private void doZooKeeperTable(StringBuilder sb) throws IOException {
     // Zookeepers
     sb.append("<table>\n");

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/JSONServlet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/JSONServlet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/JSONServlet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/JSONServlet.java Mon Jan 14 22:03:24 2013
@@ -16,6 +16,10 @@
  */
 package org.apache.accumulo.server.monitor.servlets;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 
 import javax.servlet.http.HttpServletRequest;
@@ -27,9 +31,13 @@ import org.apache.accumulo.core.master.t
 import org.apache.accumulo.server.monitor.Monitor;
 import org.apache.accumulo.server.monitor.util.celltypes.TServerLinkType;
 
+import com.google.gson.Gson;
+
 public class JSONServlet extends BasicServlet {
   private static final long serialVersionUID = 1L;
   
+  private Gson gson = new Gson();
+  
   @Override
   protected String getTitle(HttpServletRequest req) {
     return "JSON Report";
@@ -38,15 +46,22 @@ public class JSONServlet extends BasicSe
   @Override
   protected void pageStart(HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) {
     resp.setContentType("application/json");
-    sb.append("{ 'servers': [\n");
   }
   
-  private static void addServerLine(StringBuilder sb, String ip, String hostname, double osload, double ingest, double query, double ingestMB, double queryMB,
+  private static Map<String,Object> addServer(String ip, String hostname, double osload, double ingest, double query, double ingestMB, double queryMB,
       int scans, double scansessions, long holdtime) {
-    sb.append("  {'ip': '").append(ip).append("',\n  'hostname': '").append(hostname).append("',\n  'osload': ").append(osload).append(",\n  'ingest': ")
-        .append(ingest).append(",\n  'query': ").append(query).append(",\n  'ingestMB': ").append(ingestMB).append(",\n  'queryMB': ").append(queryMB)
-        .append(",\n  'scans': ").append(scans).append(",\n  'scansessions': ").append(scansessions).append(",\n  'holdtime': ").append(holdtime)
-        .append("},\n");
+	Map<String,Object> map = new HashMap<String,Object>();
+	map.put("ip", ip);
+	map.put("hostname", hostname);
+	map.put("osload", osload);
+	map.put("ingest", ingest);
+	map.put("query", query);
+	map.put("ingestMB", ingestMB);
+	map.put("queryMB", queryMB);
+	map.put("scans", scans);
+	map.put("scans", scansessions);
+	map.put("holdtime", holdtime);
+	return map;
   }
   
   @Override
@@ -55,26 +70,34 @@ public class JSONServlet extends BasicSe
       return;
     }
     
+    Map<String,Object> results = new HashMap<String,Object>();
+    List<Map<String,Object>> servers = new ArrayList<Map<String,Object>>();
+    
     for (TabletServerStatus status : Monitor.getMmi().tServerInfo) {
       TableInfo summary = Monitor.summarizeTableStats(status);
-      addServerLine(sb, status.name, TServerLinkType.displayName(status.name), status.osLoad, summary.ingestRate, summary.queryRate,
+      servers.add(addServer(status.name, TServerLinkType.displayName(status.name), status.osLoad, summary.ingestRate, summary.queryRate,
           summary.ingestByteRate / 1000000.0, summary.queryByteRate / 1000000.0, summary.scans.running + summary.scans.queued, Monitor.getLookupRate(),
-          status.holdTime);
+          status.holdTime));
     }
     
     for (Entry<String,Byte> entry : Monitor.getMmi().badTServers.entrySet()) {
-      sb.append("  {'ip': '").append(entry.getKey()).append("',\n  'bad':true},\n");
+      Map<String,Object> badServer = new HashMap<String,Object>();
+      badServer.put("ip", entry.getKey());
+      badServer.put("bad", true);
+      servers.add(badServer);
     }
     
     for (DeadServer dead : Monitor.getMmi().deadTabletServers) {
-      sb.append("  {'ip': '").append(dead.server).append("',\n  'dead': true},\n");
+        Map<String,Object> deadServer = new HashMap<String,Object>();
+        deadServer.put("ip", dead.server);
+        deadServer.put("dead", true);
+        servers.add(deadServer);
     }
-    if (Monitor.getMmi().tServerInfo.size() > 0 || Monitor.getMmi().badTServers.size() > 0 || Monitor.getMmi().deadTabletServers.size() > 0)
-      sb.setLength(sb.length() - 2);
+    
+    results.put("servers", servers);
+    sb.append(gson.toJson(results));
   }
   
   @Override
-  protected void pageEnd(HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) {
-    sb.append("\n  ]\n}\n");
-  }
+  protected void pageEnd(HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) {}
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java Mon Jan 14 22:03:24 2013
@@ -158,8 +158,8 @@ public class TServersServlet extends Bas
         continue;
       }
       total.numEntries += info.numEntries;
-      TabletStatsKeeper.update(total.minor, info.minor);
-      TabletStatsKeeper.update(total.major, info.major);
+      TabletStatsKeeper.update(total.minors, info.minors);
+      TabletStatsKeeper.update(total.majors, info.majors);
       
       KeyExtent extent = new KeyExtent(info.extent);
       String tableId = extent.getTableId().toString();
@@ -176,36 +176,36 @@ public class TServersServlet extends Bas
       row.add(info.numEntries);
       row.add(info.ingestRate);
       row.add(info.queryRate);
-      row.add(info.minor.num != 0 ? info.minor.elapsed / info.minor.num : null);
-      row.add(stddev(info.minor.elapsed, info.minor.num, info.minor.sumDev));
-      row.add(info.minor.elapsed != 0 ? info.minor.count / info.minor.elapsed : null);
-      row.add(info.major.num != 0 ? info.major.elapsed / info.major.num : null);
-      row.add(stddev(info.major.elapsed, info.major.num, info.major.sumDev));
-      row.add(info.major.elapsed != 0 ? info.major.count / info.major.elapsed : null);
+      row.add(info.minors.num != 0 ? info.minors.elapsed / info.minors.num : null);
+      row.add(stddev(info.minors.elapsed, info.minors.num, info.minors.sumDev));
+      row.add(info.minors.elapsed != 0 ? info.minors.count / info.minors.elapsed : null);
+      row.add(info.majors.num != 0 ? info.majors.elapsed / info.majors.num : null);
+      row.add(stddev(info.majors.elapsed, info.majors.num, info.majors.sumDev));
+      row.add(info.majors.elapsed != 0 ? info.majors.count / info.majors.elapsed : null);
       perTabletResults.addRow(row);
     }
     
     // Calculate current averages oldServer adding in historical data
-    if (total.minor.num != 0)
-      currentMinorAvg = (long) (total.minor.elapsed / total.minor.num);
-    if (total.minor.elapsed != 0 && total.minor.num != 0)
-      currentMinorStdDev = stddev(total.minor.elapsed, total.minor.num, total.minor.sumDev);
-    if (total.major.num != 0)
-      currentMajorAvg = total.major.elapsed / total.major.num;
-    if (total.major.elapsed != 0 && total.major.num != 0 && total.major.elapsed > total.major.num)
-      currentMajorStdDev = stddev(total.major.elapsed, total.major.num, total.major.sumDev);
+    if (total.minors.num != 0)
+      currentMinorAvg = (long) (total.minors.elapsed / total.minors.num);
+    if (total.minors.elapsed != 0 && total.minors.num != 0)
+      currentMinorStdDev = stddev(total.minors.elapsed, total.minors.num, total.minors.sumDev);
+    if (total.majors.num != 0)
+      currentMajorAvg = total.majors.elapsed / total.majors.num;
+    if (total.majors.elapsed != 0 && total.majors.num != 0 && total.majors.elapsed > total.majors.num)
+      currentMajorStdDev = stddev(total.majors.elapsed, total.majors.num, total.majors.sumDev);
     
     // After these += operations, these variables are now total for current
     // tablets and historical tablets
-    TabletStatsKeeper.update(total.minor, historical.minor);
-    TabletStatsKeeper.update(total.major, historical.major);
-    totalElapsedForAll += total.major.elapsed + historical.split.elapsed + total.minor.elapsed;
-    
-    minorStdDev = stddev(total.minor.elapsed, total.minor.num, total.minor.sumDev);
-    minorQueueStdDev = stddev(total.minor.queueTime, total.minor.num, total.minor.queueSumDev);
-    majorStdDev = stddev(total.major.elapsed, total.major.num, total.major.sumDev);
-    majorQueueStdDev = stddev(total.major.queueTime, total.major.num, total.major.queueSumDev);
-    splitStdDev = stddev(historical.split.num, historical.split.elapsed, historical.split.sumDev);
+    TabletStatsKeeper.update(total.minors, historical.minors);
+    TabletStatsKeeper.update(total.majors, historical.majors);
+    totalElapsedForAll += total.majors.elapsed + historical.splits.elapsed + total.minors.elapsed;
+    
+    minorStdDev = stddev(total.minors.elapsed, total.minors.num, total.minors.sumDev);
+    minorQueueStdDev = stddev(total.minors.queueTime, total.minors.num, total.minors.queueSumDev);
+    majorStdDev = stddev(total.majors.elapsed, total.majors.num, total.majors.sumDev);
+    majorQueueStdDev = stddev(total.majors.queueTime, total.majors.num, total.majors.queueSumDev);
+    splitStdDev = stddev(historical.splits.num, historical.splits.elapsed, historical.splits.sumDev);
     
     doDetailTable(req, sb, address, tsStats.size(), total, historical);
     doAllTimeTable(req, sb, total, historical, majorQueueStdDev, minorQueueStdDev, totalElapsedForAll, splitStdDev, majorStdDev, minorStdDev);
@@ -237,14 +237,14 @@ public class TServersServlet extends Bas
     opHistoryDetails.addSortableColumn("Std.&nbsp;Dev.<br />Time", new SecondType(), null);
     opHistoryDetails.addSortableColumn("Percentage&nbsp;Time&nbsp;Spent", new ProgressChartType(totalElapsedForAll), null);
     
-    opHistoryDetails.addRow("Split", historical.split.num, historical.split.fail, null, null,
-        historical.split.num != 0 ? (historical.split.elapsed / historical.split.num) : null, splitStdDev, historical.split.elapsed);
-    opHistoryDetails.addRow("Major&nbsp;Compaction", total.major.num, total.major.fail,
-        total.major.num != 0 ? (total.major.queueTime / total.major.num) : null, majorQueueStdDev,
-        total.major.num != 0 ? (total.major.elapsed / total.major.num) : null, majorStdDev, total.major.elapsed);
-    opHistoryDetails.addRow("Minor&nbsp;Compaction", total.minor.num, total.minor.fail,
-        total.minor.num != 0 ? (total.minor.queueTime / total.minor.num) : null, minorQueueStdDev,
-        total.minor.num != 0 ? (total.minor.elapsed / total.minor.num) : null, minorStdDev, total.minor.elapsed);
+    opHistoryDetails.addRow("Split", historical.splits.num, historical.splits.fail, null, null,
+        historical.splits.num != 0 ? (historical.splits.elapsed / historical.splits.num) : null, splitStdDev, historical.splits.elapsed);
+    opHistoryDetails.addRow("Major&nbsp;Compaction", total.majors.num, total.majors.fail,
+        total.majors.num != 0 ? (total.majors.queueTime / total.majors.num) : null, majorQueueStdDev,
+        total.majors.num != 0 ? (total.majors.elapsed / total.majors.num) : null, majorStdDev, total.majors.elapsed);
+    opHistoryDetails.addRow("Minor&nbsp;Compaction", total.minors.num, total.minors.fail,
+        total.minors.num != 0 ? (total.minors.queueTime / total.minors.num) : null, minorQueueStdDev,
+        total.minors.num != 0 ? (total.minors.elapsed / total.minors.num) : null, minorStdDev, total.minors.elapsed);
     opHistoryDetails.generate(req, sb);
   }
   
@@ -256,7 +256,7 @@ public class TServersServlet extends Bas
     detailTable.addSortableColumn("Minor&nbsp;Compacting", new NumberType<Integer>(), null);
     detailTable.addSortableColumn("Major&nbsp;Compacting", new NumberType<Integer>(), null);
     detailTable.addSortableColumn("Splitting", new NumberType<Integer>(), null);
-    detailTable.addRow(numTablets, total.numEntries, total.minor.status, total.major.status, historical.split.status);
+    detailTable.addRow(numTablets, total.numEntries, total.minors.status, total.majors.status, historical.splits.status);
     detailTable.generate(req, sb);
   }
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/XMLServlet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/XMLServlet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/XMLServlet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/XMLServlet.java Mon Jan 14 22:03:24 2013
@@ -72,9 +72,9 @@ public class XMLServlet extends BasicSer
       
       TableInfo summary = Monitor.summarizeTableStats(status);
       sb.append("<compactions>\n");
-      sb.append("<major>").append("<running>").append(summary.major.running).append("</running>").append("<queued>").append(summary.major.queued)
+      sb.append("<major>").append("<running>").append(summary.majors.running).append("</running>").append("<queued>").append(summary.majors.queued)
           .append("</queued>").append("</major>\n");
-      sb.append("<minor>").append("<running>").append(summary.minor.running).append("</running>").append("<queued>").append(summary.minor.queued)
+      sb.append("<minor>").append("<running>").append(summary.minors.running).append("</running>").append("<queued>").append(summary.minors.queued)
           .append("</queued>").append("</minor>\n");
       sb.append("</compactions>\n");
       
@@ -151,7 +151,7 @@ public class XMLServlet extends BasicSer
       sb.append("<queryByteRate>").append(tableInfo.queryRate).append("</queryByteRate>\n");
       int running = 0;
       int queued = 0;
-      Compacting compacting = entry.getValue().major;
+      Compacting compacting = entry.getValue().majors;
       if (compacting != null) {
         running = compacting.running;
         queued = compacting.queued;

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java Mon Jan 14 22:03:24 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.monitor.servlets.trace;
 
-import java.nio.charset.Charset;
 import java.util.Date;
 
 import javax.servlet.http.HttpServletRequest;
@@ -28,7 +27,6 @@ import org.apache.accumulo.core.client.S
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.trace.TraceDump;
 import org.apache.accumulo.core.trace.TraceFormatter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.Monitor;
@@ -37,8 +35,6 @@ import org.apache.accumulo.server.monito
 abstract class Basic extends BasicServlet {
   
   private static final long serialVersionUID = 1L;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   public static String getStringParameter(HttpServletRequest req, String name, String defaultValue) {
     String result = req.getParameter(name);
@@ -68,11 +64,11 @@ abstract class Basic extends BasicServle
   protected Scanner getScanner(StringBuilder sb) throws AccumuloException {
     AccumuloConfiguration conf = Monitor.getSystemConfiguration();
     String user = conf.get(Property.TRACE_USER);
-    byte[] passwd = conf.get(Property.TRACE_PASSWORD).getBytes(utf8);
+    byte[] passwd = conf.get(Property.TRACE_PASSWORD).getBytes();
     String table = conf.get(Property.TRACE_TABLE);
     try {
       Connector conn = HdfsZooInstance.getInstance().getConnector(user, passwd);
-      if (!conn.tableOperations().exists(TraceDump.TRACE_TABLE)) {
+      if (!conn.tableOperations().exists(table)) {
         return new NullScanner();
       }
       Scanner scanner = conn.createScanner(table, conn.securityOperations().getUserAuthorizations(user));

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/NullScanner.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/NullScanner.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/NullScanner.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/NullScanner.java Mon Jan 14 22:03:24 2013
@@ -31,7 +31,7 @@ public class NullScanner implements Scan
   
   @Override
   public void addScanIterator(IteratorSetting cfg) {}
-    
+  
   @Override
   public void updateScanIteratorOption(String iteratorName, String key, String value) {}
   
@@ -47,9 +47,11 @@ public class NullScanner implements Scan
   @Override
   public void clearScanIterators() {}
   
+  @Deprecated
   @Override
   public void setTimeOut(int timeOut) {}
   
+  @Deprecated
   @Override
   public int getTimeOut() {
     return 0;

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/CompactionsType.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/CompactionsType.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/CompactionsType.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/CompactionsType.java Mon Jan 14 22:03:24 2013
@@ -32,9 +32,9 @@ public class CompactionsType extends Cel
     if (obj == null)
       return "-";
     TableInfo summary = (TableInfo) obj;
-    Compacting c = summary.major;
+    Compacting c = summary.majors;
     if (fieldName.equals("minor"))
-      c = summary.minor;
+      c = summary.minors;
     else if (fieldName.equals("scans"))
       c = summary.scans;
     if (c == null)
@@ -48,11 +48,11 @@ public class CompactionsType extends Cel
       return -1;
     if (o2 == null)
       return 1;
-    Compacting c1 = o1.major;
-    Compacting c2 = o2.major;
+    Compacting c1 = o1.majors;
+    Compacting c2 = o2.majors;
     if (fieldName.equals("minor")) {
-      c1 = o1.minor;
-      c2 = o2.minor;
+      c1 = o1.minors;
+      c2 = o2.minors;
     } else if (fieldName.equals("scans")) {
       c1 = o1.scans;
       c2 = o2.scans;

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/TableStateType.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/TableStateType.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/TableStateType.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/monitor/util/celltypes/TableStateType.java Mon Jan 14 22:03:24 2013
@@ -31,12 +31,12 @@ public class TableStateType extends Cell
     String style = null;
     switch (state) {
       case ONLINE:
+      case OFFLINE:
         break;
       case NEW:
       case DELETING:
         style = "warning";
         break;
-      case OFFLINE:
       case UNKNOWN:
       default:
         style = "error";

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java Mon Jan 14 22:03:24 2013
@@ -55,7 +55,7 @@ public class ProblemReports implements I
   
   private static final Logger log = Logger.getLogger(ProblemReports.class);
   
-  private LRUMap problemReports = new LRUMap(1000);
+  private final LRUMap problemReports = new LRUMap(1000);
   
   /*
    * use a thread pool so that reporting a problem never blocks

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java Mon Jan 14 22:03:24 2013
@@ -22,7 +22,6 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.security.SecurityPermission;
@@ -40,8 +39,6 @@ import org.apache.commons.codec.binary.B
 public class SecurityConstants {
   private static SecurityPermission SYSTEM_CREDENTIALS_PERMISSION = new SecurityPermission("systemCredentialsPermission");
   
-  private static final Charset utf8 = Charset.forName("UTF8");
-  
   public static final String SYSTEM_USERNAME = "!SYSTEM";
   private static final byte[] SYSTEM_PASSWORD = makeSystemPassword();
   private static final AuthInfo systemCredentials = new AuthInfo(SYSTEM_USERNAME, ByteBuffer.wrap(SYSTEM_PASSWORD), HdfsZooInstance.getInstance()
@@ -57,8 +54,8 @@ public class SecurityConstants {
   }
   
   private static byte[] makeSystemPassword() {
-    byte[] version = Constants.VERSION.getBytes(utf8);
-    byte[] inst = HdfsZooInstance.getInstance().getInstanceID().getBytes(utf8);
+    byte[] version = Constants.VERSION.getBytes();
+    byte[] inst = HdfsZooInstance.getInstance().getInstanceID().getBytes();
     try {
       confChecksum = getSystemConfigChecksum();
     } catch (NoSuchAlgorithmException e) {
@@ -100,10 +97,10 @@ public class SecurityConstants {
     try {
       byte[] buff = new byte[in.readInt()];
       in.readFully(buff);
-      versionFails = !Arrays.equals(buff, Constants.VERSION.getBytes(utf8));
+      versionFails = !Arrays.equals(buff, Constants.VERSION.getBytes());
       buff = new byte[in.readInt()];
       in.readFully(buff);
-      instanceFails = !Arrays.equals(buff, HdfsZooInstance.getInstance().getInstanceID().getBytes(utf8));
+      instanceFails = !Arrays.equals(buff, HdfsZooInstance.getInstance().getInstanceID().getBytes());
       buff = new byte[in.readInt()];
       in.readFully(buff);
       confFails = !Arrays.equals(buff, getSystemConfigChecksum());
@@ -134,14 +131,14 @@ public class SecurityConstants {
       
       // seed the config with the version and instance id, so at least
       // it's not empty
-      md.update(Constants.VERSION.getBytes(utf8));
-      md.update(HdfsZooInstance.getInstance().getInstanceID().getBytes(utf8));
+      md.update(Constants.VERSION.getBytes());
+      md.update(HdfsZooInstance.getInstance().getInstanceID().getBytes());
       
       for (Entry<String,String> entry : ServerConfiguration.getSiteConfiguration()) {
         // only include instance properties
         if (entry.getKey().startsWith(Property.INSTANCE_PREFIX.toString())) {
-          md.update(entry.getKey().getBytes(utf8));
-          md.update(entry.getValue().getBytes(utf8));
+          md.update(entry.getKey().getBytes());
+          md.update(entry.getValue().getBytes());
         }
       }
       

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/InMemoryMap.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/InMemoryMap.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/InMemoryMap.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/InMemoryMap.java Mon Jan 14 22:03:24 2013
@@ -387,7 +387,7 @@ public class InMemoryMap {
     return map.size();
   }
   
-  private Set<MemoryIterator> activeIters = Collections.synchronizedSet(new HashSet<MemoryIterator>());
+  private final Set<MemoryIterator> activeIters = Collections.synchronizedSet(new HashSet<MemoryIterator>());
   
   class MemoryDataSource implements DataSource {
     

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/NativeMap.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/NativeMap.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/NativeMap.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/NativeMap.java Mon Jan 14 22:03:24 2013
@@ -59,9 +59,9 @@ public class NativeMap implements Iterab
   
   private long nmPointer;
   
-  private ReadWriteLock rwLock;
-  private Lock rlock;
-  private Lock wlock;
+  private final ReadWriteLock rwLock;
+  private final Lock rlock;
+  private final Lock wlock;
   
   int modCount = 0;
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java Mon Jan 14 22:03:24 2013
@@ -24,7 +24,6 @@ package org.apache.accumulo.server.table
  */
 
 import java.io.IOException;
-import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -123,6 +122,7 @@ import org.apache.accumulo.server.util.M
 import org.apache.accumulo.server.util.MetadataTable.LogEntry;
 import org.apache.accumulo.server.util.TabletOperations;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.commons.codec.DecoderException;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.conf.Configuration;
@@ -146,8 +146,6 @@ import org.apache.zookeeper.KeeperExcept
 
 public class Tablet {
   
-  private Charset utf8 = Charset.forName("UTF8");
-    
   enum MajorCompactionReason {
     // do not change the order, the order of this enum determines the order
     // in which queued major compactions are executed
@@ -373,9 +371,9 @@ public class Tablet {
   
   private TabletMemory tabletMemory;
   
-  private TabletTime tabletTime;
+  private final TabletTime tabletTime;
   private long persistedTime;
-  private Object timeLock = new Object();
+  private final Object timeLock = new Object();
   
   private Path location; // absolute path of this tablets dir
   private TServerInstance lastLocation;
@@ -449,7 +447,7 @@ public class Tablet {
   
   private final int logId;
   // ensure we only have one reader/writer of our bulk file notes at at time
-  public Object bulkFileImportLock = new Object();
+  public final Object bulkFileImportLock = new Object();
   
   public int getLogId() {
     return logId;
@@ -1472,6 +1470,13 @@ public class Tablet {
           + " entries created)");
     }
     
+    String contextName = acuTableConf.get(Property.TABLE_CLASSPATH);
+    if (contextName != null && !contextName.equals("")) {
+      // initialize context classloader, instead of possibly waiting for it to initialize for a scan
+      // TODO this could hang causing other tablets to fail to load
+      AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
+    }
+
     // do this last after tablet is completely setup because it
     // could cause major compaction to start
     datafileManager = new DatafileManager(datafiles);
@@ -2177,7 +2182,7 @@ public class Tablet {
     private DataFileValue stats;
     private String mergeFile;
     private long flushId;
-
+    
     MinorCompactionTask(String mergeFile, CommitSession commitSession, long flushId) {
       queued = System.currentTimeMillis();
       minorCompactionWaitingToStart = true;
@@ -2408,7 +2413,7 @@ public class Tablet {
       List<IteratorSetting> allIters = new ArrayList<IteratorSetting>();
       for (int i = 1; i < tokens.length; i++) {
         Hex hex = new Hex();
-        List<IteratorSetting> iters = IteratorUtil.decodeIteratorSettings(hex.decode(tokens[i].split("=")[1].getBytes(utf8)));
+        List<IteratorSetting> iters = IteratorUtil.decodeIteratorSettings(hex.decode(tokens[i].split("=")[1].getBytes()));
         allIters.addAll(iters);
       }
       
@@ -3705,7 +3710,7 @@ public class Tablet {
   private boolean removingLogs = false;
   
   // this lock is basically used to synchronize writing of log info to !METADATA
-  private ReentrantLock logLock = new ReentrantLock();
+  private final ReentrantLock logLock = new ReentrantLock();
   
   public synchronized int getLogCount() {
     return currentLogs.size();



Mime
View raw message