accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [13/16] git commit: Merge branch '1.5.1-SNAPSHOT' into 1.6.0-SNAPSHOT
Date Tue, 11 Feb 2014 18:41:54 GMT
Merge branch '1.5.1-SNAPSHOT' into 1.6.0-SNAPSHOT

Conflicts:
	core/src/main/java/org/apache/accumulo/core/Constants.java
	core/src/main/java/org/apache/accumulo/core/conf/Property.java
	core/src/main/java/org/apache/accumulo/core/util/AddressUtil.java
	server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
	server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
	server/base/src/main/java/org/apache/accumulo/server/util/Info.java
	server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/099701cc
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/099701cc
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/099701cc

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 099701cc30376a8915ae7030f4bc314361391c60
Parents: 2d42417 0351d0d
Author: Josh Elser <elserj@apache.org>
Authored: Tue Feb 11 11:08:28 2014 -0500
Committer: Josh Elser <elserj@apache.org>
Committed: Tue Feb 11 13:08:16 2014 -0500

----------------------------------------------------------------------
 .../1GB/native-standalone/accumulo-env.sh       |   1 -
 conf/examples/1GB/standalone/accumulo-env.sh    |   1 -
 .../2GB/native-standalone/accumulo-env.sh       |   1 -
 conf/examples/2GB/standalone/accumulo-env.sh    |   1 -
 .../3GB/native-standalone/accumulo-env.sh       |   1 -
 conf/examples/3GB/standalone/accumulo-env.sh    |   1 -
 .../512MB/native-standalone/accumulo-env.sh     |   1 -
 conf/examples/512MB/standalone/accumulo-env.sh  |   1 -
 .../org/apache/accumulo/core/Constants.java     |   6 +-
 .../org/apache/accumulo/core/conf/Property.java |   2 +
 .../apache/accumulo/core/util/MonitorUtil.java  |   4 +-
 .../org/apache/accumulo/server/Accumulo.java    |  77 +----
 .../apache/accumulo/server/init/Initialize.java |   2 +
 .../accumulo/server/monitor/LogService.java     |  39 ++-
 .../server/watcher/MonitorLog4jWatcher.java     | 154 +++++++++
 .../org/apache/accumulo/monitor/Monitor.java    | 312 +++++++++++++------
 16 files changed, 425 insertions(+), 179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/1GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/1GB/standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/2GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/2GB/standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/3GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/3GB/standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/512MB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/conf/examples/512MB/standalone/accumulo-env.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/core/src/main/java/org/apache/accumulo/core/Constants.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/Constants.java
index 9065fd5,095319e..e0e88eb
--- a/core/src/main/java/org/apache/accumulo/core/Constants.java
+++ b/core/src/main/java/org/apache/accumulo/core/Constants.java
@@@ -45,65 -60,120 +45,67 @@@ public class Constants 
    public static final String ZMASTERS = "/masters";
    public static final String ZMASTER_LOCK = ZMASTERS + "/lock";
    public static final String ZMASTER_GOAL_STATE = ZMASTERS + "/goal_state";
 +
    public static final String ZGC = "/gc";
    public static final String ZGC_LOCK = ZGC + "/lock";
 -  
 +
    public static final String ZMONITOR = "/monitor";
-   public static final String ZMONITOR_LOG4J_PORT = ZMONITOR + "/log4j_port";
- 
+   public static final String ZMONITOR_LOCK = ZMONITOR + "/lock";
+   public static final String ZMONITOR_HTTP_ADDR = ZMONITOR + "/http_addr";
+   public static final String ZMONITOR_LOG4J_ADDR = ZMONITOR + "/log4j_addr";
+   
    public static final String ZCONFIG = "/config";
 -  
 +
    public static final String ZTSERVERS = "/tservers";
 -  
 +
    public static final String ZDEAD = "/dead";
 -  public static final String ZDEADTSERVERS = "/dead/tservers";
 -  
 +  public static final String ZDEADTSERVERS = ZDEAD + "/tservers";
 +
    public static final String ZTRACERS = "/tracers";
 -  
 +
    public static final String ZPROBLEMS = "/problems";
 -  public static final String ZUSERS = "/users";
 -  
 +
    public static final String BULK_ARBITRATOR_TYPE = "bulkTx";
 -  
 +
    public static final String ZFATE = "/fate";
 -  
 +
    public static final String ZNEXT_FILE = "/next_file";
 -  
 +
    public static final String ZBULK_FAILED_COPYQ = "/bulk_failed_copyq";
 -  
 +
    public static final String ZHDFS_RESERVATIONS = "/hdfs_reservations";
    public static final String ZRECOVERY = "/recovery";
 -  
 -  public static final String METADATA_TABLE_ID = "!0";
 -  public static final String METADATA_TABLE_NAME = "!METADATA";
 +
 +  /**
 +   * Initial tablet directory name for the default tablet in all tables
 +   */
    public static final String DEFAULT_TABLET_LOCATION = "/default_tablet";
 -  public static final String TABLE_TABLET_LOCATION = "/table_info";
 +
    public static final String ZTABLE_LOCKS = "/table_locks";
 -  
 -  // reserved keyspace is any row that begins with a tilde '~' character
 -  public static final Key METADATA_RESERVED_KEYSPACE_START_KEY = new Key(new Text(new byte[] {'~'}));
 -  public static final Key METADATA_RESERVED_KEYSPACE_STOP_KEY = new Key(new Text(new byte[] {'~' + 1}));
 -  public static final Range METADATA_RESERVED_KEYSPACE = new Range(METADATA_RESERVED_KEYSPACE_START_KEY, true, METADATA_RESERVED_KEYSPACE_STOP_KEY, false);
 -  public static final String METADATA_DELETE_FLAG_PREFIX = "~del";
 -  public static final String METADATA_DELETE_FLAG_FOR_METADATA_PREFIX = "!!" + METADATA_DELETE_FLAG_PREFIX;
 -  public static final Range METADATA_DELETES_KEYSPACE = new Range(new Key(new Text(METADATA_DELETE_FLAG_PREFIX)), true, new Key(new Text("~dem")), false);
 -  public static final Range METADATA_DELETES_FOR_METADATA_KEYSPACE = new Range(new Key(new Text(METADATA_DELETE_FLAG_FOR_METADATA_PREFIX)), true, new Key(new Text("!!~dem")), false);
 -  public static final String METADATA_BLIP_FLAG_PREFIX = "~blip"; // BLIP = bulk load in progress
 -  public static final Range METADATA_BLIP_KEYSPACE = new Range(new Key(new Text(METADATA_BLIP_FLAG_PREFIX)), true, new Key(new Text("~bliq")), false);
 -  
 -  public static final Text METADATA_SERVER_COLUMN_FAMILY = new Text("srv");
 -  public static final Text METADATA_TABLET_COLUMN_FAMILY = new Text("~tab"); // this needs to sort after all other column families for that tablet
 -  public static final Text METADATA_CURRENT_LOCATION_COLUMN_FAMILY = new Text("loc");
 -  public static final Text METADATA_FUTURE_LOCATION_COLUMN_FAMILY = new Text("future");
 -  public static final Text METADATA_LAST_LOCATION_COLUMN_FAMILY = new Text("last");
 -  public static final Text METADATA_BULKFILE_COLUMN_FAMILY = new Text("loaded"); // temporary marker that indicates a tablet loaded a bulk file
 -  public static final Text METADATA_CLONED_COLUMN_FAMILY = new Text("!cloned"); // temporary marker that indicates a tablet was successfully cloned
 -  
 -  // README : very important that prevRow sort last to avoid race conditions between
 -  // garbage collector and split
 -  public static final ColumnFQ METADATA_PREV_ROW_COLUMN = new ColumnFQ(METADATA_TABLET_COLUMN_FAMILY, new Text("~pr")); // this needs to sort after everything
 -                                                                                                                        // else for that tablet
 -  public static final ColumnFQ METADATA_OLD_PREV_ROW_COLUMN = new ColumnFQ(METADATA_TABLET_COLUMN_FAMILY, new Text("oldprevrow"));
 -  public static final ColumnFQ METADATA_DIRECTORY_COLUMN = new ColumnFQ(METADATA_SERVER_COLUMN_FAMILY, new Text("dir"));
 -  public static final ColumnFQ METADATA_TIME_COLUMN = new ColumnFQ(METADATA_SERVER_COLUMN_FAMILY, new Text("time"));
 -  public static final ColumnFQ METADATA_FLUSH_COLUMN = new ColumnFQ(METADATA_SERVER_COLUMN_FAMILY, new Text("flush"));
 -  public static final ColumnFQ METADATA_COMPACT_COLUMN = new ColumnFQ(METADATA_SERVER_COLUMN_FAMILY, new Text("compact"));
 -  public static final ColumnFQ METADATA_SPLIT_RATIO_COLUMN = new ColumnFQ(METADATA_TABLET_COLUMN_FAMILY, new Text("splitRatio"));
 -  public static final ColumnFQ METADATA_LOCK_COLUMN = new ColumnFQ(METADATA_SERVER_COLUMN_FAMILY, new Text("lock"));
 -  
 -  public static final Text METADATA_DATAFILE_COLUMN_FAMILY = new Text("file");
 -  public static final Text METADATA_SCANFILE_COLUMN_FAMILY = new Text("scan");
 -  public static final Text METADATA_LOG_COLUMN_FAMILY = new Text("log");
 -  public static final Text METADATA_CHOPPED_COLUMN_FAMILY = new Text("chopped");
 -  public static final ColumnFQ METADATA_CHOPPED_COLUMN = new ColumnFQ(METADATA_CHOPPED_COLUMN_FAMILY, new Text("chopped"));
 -  
 -  public static final Range NON_ROOT_METADATA_KEYSPACE = new Range(
 -      new Key(KeyExtent.getMetadataEntry(new Text(METADATA_TABLE_ID), null)).followingKey(PartialKey.ROW), true, METADATA_RESERVED_KEYSPACE_START_KEY, false);
 -  public static final Range METADATA_KEYSPACE = new Range(new Key(new Text(METADATA_TABLE_ID)), true, METADATA_RESERVED_KEYSPACE_START_KEY, false);
 -  
 -  public static final KeyExtent ROOT_TABLET_EXTENT = new KeyExtent(new Text(METADATA_TABLE_ID), KeyExtent.getMetadataEntry(new Text(METADATA_TABLE_ID), null),
 -      null);
 -  public static final Range METADATA_ROOT_TABLET_KEYSPACE = new Range(ROOT_TABLET_EXTENT.getMetadataEntry(), false, KeyExtent.getMetadataEntry(new Text(
 -      METADATA_TABLE_ID), null), true);
 -  
 -  public static final String VALUE_ENCODING = "UTF-8";
 -  
 +
    public static final String BULK_PREFIX = "b-";
 -  public static final String OLD_BULK_PREFIX = "bulk_";
 -  
 -  // note: all times are in milliseconds
 -  
 -  public static final int SCAN_BATCH_SIZE = 1000; // this affects the table client caching of metadata
 -  
 -  public static final long MIN_MASTER_LOOP_TIME = 1000;
 -  public static final int MASTER_TABLETSERVER_CONNECTION_TIMEOUT = 3000;
 -  public static final long CLIENT_SLEEP_BEFORE_RECONNECT = 1000;
 -  
 +
 +  // this affects the table client caching of metadata
 +  public static final int SCAN_BATCH_SIZE = 1000;
 +
 +  // Scanners will default to fetching 3 batches of Key/Value pairs before asynchronously
 +  // fetching the next batch.
 +  public static final long SCANNER_DEFAULT_READAHEAD_THRESHOLD = 3l;
 +
    // Security configuration
    public static final String PW_HASH_ALGORITHM = "SHA-256";
 -  
 -  // Representation of an empty set of authorizations
 -  // (used throughout the code, because scans of metadata table and many tests do not set record-level visibility)
 -  public static final Authorizations NO_AUTHS = new Authorizations();
 -  
 -  public static final int DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME = 60 * 3; // in seconds
 -  
 +
 +  /**
 +   * @deprecated since 1.6.0; Use {@link Authorizations#EMPTY} instead
 +   */
 +  @Deprecated
 +  public static final Authorizations NO_AUTHS = Authorizations.EMPTY;
 +
    public static final int MAX_DATA_TO_PRINT = 64;
 -  public static final int CLIENT_RETRIES = 5;
 -  public static final int TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER = 2;
    public static final String CORE_PACKAGE_NAME = "org.apache.accumulo.core";
 -  public static final String OLD_PACKAGE_NAME = "cloudbase";
 -  public static final String VALID_TABLE_NAME_REGEX = "^\\w+$";
    public static final String MAPFILE_EXTENSION = "map";
    public static final String GENERATED_TABLET_DIRECTORY_PREFIX = "t-";
 -  
 +
    public static final String EXPORT_METADATA_FILE = "metadata.bin";
    public static final String EXPORT_TABLE_CONFIG_FILE = "table_config.txt";
    public static final String EXPORT_FILE = "exportMetadata.zip";

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/conf/Property.java
index bc4c7e1,1c2dfdb..63c720e
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@@ -281,18 -228,12 +281,20 @@@ public enum Property 
    MONITOR_BANNER_COLOR("monitor.banner.color", "#c4c4c4", PropertyType.STRING, "The color of the banner text displayed on the monitor page."),
    MONITOR_BANNER_BACKGROUND("monitor.banner.background", "#304065", PropertyType.STRING,
        "The background color of the banner text displayed on the monitor page."),
 -  MONITOR_SSL_KEYSTORE("monitor.ssl.keyStore", "", PropertyType.PATH, "The keystore for enabling monitor SSL.", true, false),
 -  MONITOR_SSL_KEYSTOREPASS("monitor.ssl.keyStorePassword", "", PropertyType.STRING, "The keystore password for enabling monitor SSL.", true, false),
 -  MONITOR_SSL_TRUSTSTORE("monitor.ssl.trustStore", "", PropertyType.PATH, "The truststore for enabling monitor SSL.", true, false),
 -  MONITOR_SSL_TRUSTSTOREPASS("monitor.ssl.trustStorePassword", "", PropertyType.STRING, "The truststore password for enabling monitor SSL.", true, false),
 +
 +  @Experimental
 +  MONITOR_SSL_KEYSTORE("monitor.ssl.keyStore", "", PropertyType.PATH, "The keystore for enabling monitor SSL."),
 +  @Experimental
 +  @Sensitive
 +  MONITOR_SSL_KEYSTOREPASS("monitor.ssl.keyStorePassword", "", PropertyType.STRING, "The keystore password for enabling monitor SSL."),
 +  @Experimental
 +  MONITOR_SSL_TRUSTSTORE("monitor.ssl.trustStore", "", PropertyType.PATH, "The truststore for enabling monitor SSL."),
 +  @Experimental
 +  @Sensitive
 +  MONITOR_SSL_TRUSTSTOREPASS("monitor.ssl.trustStorePassword", "", PropertyType.STRING, "The truststore password for enabling monitor SSL."),
 +
+   MONITOR_LOCK_CHECK_INTERVAL("monitor.lock.check.interval", "5s", PropertyType.TIMEDURATION, "The amount of time to sleep between checking for the Montior ZooKeeper lock"),
+   
    TRACE_PREFIX("trace.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of distributed tracing."),
    TRACE_PORT("trace.port.client", "12234", PropertyType.PORT, "The listening port for the trace server"),
    TRACE_TABLE("trace.table", "trace", PropertyType.STRING, "The name of the table to store distributed traces"),

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
index 158a170,0000000..1ebbf13
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
@@@ -1,31 -1,0 +1,31 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooReader;
 +import org.apache.zookeeper.KeeperException;
 +
 +public class MonitorUtil {
 +  public static String getLocation(Instance instance) throws KeeperException, InterruptedException {
 +    ZooReader zr = new ZooReader(instance.getZooKeepers(), 5000);
-     byte[] loc = zr.getData(ZooUtil.getRoot(instance) + Constants.ZMONITOR, null);
-     return loc==null ? null : new String(loc);
++    byte[] loc = zr.getData(ZooUtil.getRoot(instance) + Constants.ZMONITOR_HTTP_ADDR, null);
++    return loc==null ? null : new String(loc, Constants.UTF8);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 728a0b5,0000000..2fa9051
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@@ -1,288 -1,0 +1,225 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server;
 +
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.util.Map.Entry;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.trace.DistributedTrace;
 +import org.apache.accumulo.core.util.AddressUtil;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.core.util.Version;
- import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.util.time.SimpleTimer;
++import org.apache.accumulo.server.watcher.MonitorLog4jWatcher;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
- import org.apache.log4j.LogManager;
 +import org.apache.log4j.Logger;
- import org.apache.log4j.helpers.FileWatchdog;
 +import org.apache.log4j.helpers.LogLog;
 +import org.apache.log4j.xml.DOMConfigurator;
 +import org.apache.zookeeper.KeeperException;
- import org.apache.zookeeper.WatchedEvent;
- import org.apache.zookeeper.Watcher;
 +
 +public class Accumulo {
 +  
 +  private static final Logger log = Logger.getLogger(Accumulo.class);
 +  
 +  public static synchronized void updateAccumuloVersion(VolumeManager fs) {
 +    try {
 +      if (getAccumuloPersistentVersion(fs) == ServerConstants.PREV_DATA_VERSION) {
 +        fs.create(new Path(ServerConstants.getDataVersionLocation() + "/" + ServerConstants.DATA_VERSION));
 +        fs.delete(new Path(ServerConstants.getDataVersionLocation() + "/" + ServerConstants.PREV_DATA_VERSION));
 +      }
 +    } catch (IOException e) {
 +      throw new RuntimeException("Unable to set accumulo version: an error occurred.", e);
 +    }
 +  }
 +  
 +  public static synchronized int getAccumuloPersistentVersion(FileSystem fs, Path path) {
 +    int dataVersion;
 +    try {
 +      FileStatus[] files = fs.listStatus(path);
 +      if (files == null || files.length == 0) {
 +        dataVersion = -1; // assume it is 0.5 or earlier
 +      } else {
 +        dataVersion = Integer.parseInt(files[0].getPath().getName());
 +      }
 +      return dataVersion;
 +    } catch (IOException e) {
 +      throw new RuntimeException("Unable to read accumulo version: an error occurred.", e);
 +    }
 +  }
 +  
 +  public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) {
 +    Path path = ServerConstants.getDataVersionLocation();
 +    return getAccumuloPersistentVersion(fs.getFileSystemByPath(path), path);
 +  }
 +
 +  public static void enableTracing(String address, String application) {
 +    try {
 +      DistributedTrace.enable(HdfsZooInstance.getInstance(), ZooReaderWriter.getInstance(), application, address);
 +    } catch (Exception ex) {
 +      log.error("creating remote sink for trace spans", ex);
 +    }
 +  }
 +  
-   private static class LogMonitor extends FileWatchdog implements Watcher {
-     String path;
-     
-     protected LogMonitor(String instance, String filename, int delay) {
-       super(filename);
-       setDelay(delay);
-       this.path = ZooUtil.getRoot(instance) + Constants.ZMONITOR_LOG4J_PORT;
-     }
-     
-     private void setMonitorPort() {
-       try {
-         String port = new String(ZooReaderWriter.getInstance().getData(path, null), Constants.UTF8);
-         System.setProperty("org.apache.accumulo.core.host.log.port", port);
-         log.info("Changing monitor log4j port to "+port);
-         doOnChange();
-       } catch (Exception e) {
-         log.error("Error reading zookeeper data for monitor log4j port", e);
-       }
-     }
-     
-     @Override
-     public void run() {
-       try {
-         if (ZooReaderWriter.getInstance().getZooKeeper().exists(path, this) != null)
-           setMonitorPort();
-         log.info("Set watch for monitor log4j port");
-       } catch (Exception e) {
-         log.error("Unable to set watch for monitor log4j port " + path);
-       }
-       super.run();
-     }
-     
-     @Override
-     protected void doOnChange() {
-       LogManager.resetConfiguration();
-       new DOMConfigurator().doConfigure(filename, LogManager.getLoggerRepository());
-     }
-     
-     @Override
-     public void process(WatchedEvent event) {
-       setMonitorPort();
-       if (event.getPath() != null) {
-         try {
-           ZooReaderWriter.getInstance().exists(event.getPath(), this);
-         } catch (Exception ex) {
-           log.error("Unable to reset watch for monitor log4j port", ex);
-         }
-       }
-     }
-   }
-   
 +  public static void init(VolumeManager fs, ServerConfiguration config, String application) throws UnknownHostException {
 +    
 +    System.setProperty("org.apache.accumulo.core.application", application);
 +    
 +    if (System.getenv("ACCUMULO_LOG_DIR") != null)
 +      System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_LOG_DIR"));
 +    else
 +      System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_HOME") + "/logs/");
 +    
 +    String localhost = InetAddress.getLocalHost().getHostName();
 +    System.setProperty("org.apache.accumulo.core.ip.localhost.hostname", localhost);
 +    
-     if (System.getenv("ACCUMULO_LOG_HOST") != null)
-       System.setProperty("org.apache.accumulo.core.host.log", System.getenv("ACCUMULO_LOG_HOST"));
-     else
-       System.setProperty("org.apache.accumulo.core.host.log", localhost);
-     
 +    int logPort = config.getConfiguration().getPort(Property.MONITOR_LOG4J_PORT);
 +    System.setProperty("org.apache.accumulo.core.host.log.port", Integer.toString(logPort));
 +    
 +    // Use a specific log config, if it exists
 +    String logConfig = String.format("%s/%s_logger.xml", System.getenv("ACCUMULO_CONF_DIR"), application);
 +    if (!new File(logConfig).exists()) {
 +      // otherwise, use the generic config
 +      logConfig = String.format("%s/generic_logger.xml", System.getenv("ACCUMULO_CONF_DIR"));
 +    }
 +    // Turn off messages about not being able to reach the remote logger... we protect against that.
 +    LogLog.setQuietMode(true);
-     
-     // Configure logging
-     if (logPort==0)
-       new LogMonitor(config.getInstance().getInstanceID(), logConfig, 5000).start();
-     else
-       DOMConfigurator.configureAndWatch(logConfig, 5000);
-     
++
 +    // Read the auditing config
 +    String auditConfig = String.format("%s/auditLog.xml", System.getenv("ACCUMULO_CONF_DIR"));
-     
++
 +    DOMConfigurator.configureAndWatch(auditConfig, 5000);
-     
++
++    // Configure logging using information advertised in zookeeper by the monitor
++    new MonitorLog4jWatcher(config.getInstance().getInstanceID(), logConfig, 5000).start();
++
 +    log.info(application + " starting");
 +    log.info("Instance " + config.getInstance().getInstanceID());
 +    int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
 +    log.info("Data Version " + dataVersion);
 +    Accumulo.waitForZookeeperAndHdfs(fs);
 +    
 +    Version codeVersion = new Version(Constants.VERSION);
 +    if (dataVersion != ServerConstants.DATA_VERSION && dataVersion != ServerConstants.PREV_DATA_VERSION) {
 +      throw new RuntimeException("This version of accumulo (" + codeVersion + ") is not compatible with files stored using data version " + dataVersion);
 +    }
 +    
 +    TreeMap<String,String> sortedProps = new TreeMap<String,String>();
 +    for (Entry<String,String> entry : config.getConfiguration())
 +      sortedProps.put(entry.getKey(), entry.getValue());
 +    
 +    for (Entry<String,String> entry : sortedProps.entrySet()) {
 +      String key = entry.getKey();
 +      log.info(key + " = " + (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
 +    }
 +    
 +    monitorSwappiness();
 +  }
 +  
 +  /**
 +   * 
 +   */
 +  public static void monitorSwappiness() {
 +    SimpleTimer.getInstance().schedule(new Runnable() {
 +      @Override
 +      public void run() {
 +        try {
 +          String procFile = "/proc/sys/vm/swappiness";
 +          File swappiness = new File(procFile);
 +          if (swappiness.exists() && swappiness.canRead()) {
 +            InputStream is = new FileInputStream(procFile);
 +            try {
 +              byte[] buffer = new byte[10];
 +              int bytes = is.read(buffer);
 +              String setting = new String(buffer, 0, bytes, Constants.UTF8);
 +              setting = setting.trim();
 +              if (bytes > 0 && Integer.parseInt(setting) > 10) {
 +                log.warn("System swappiness setting is greater than ten (" + setting + ") which can cause time-sensitive operations to be delayed. "
 +                    + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.");
 +              }
 +            } finally {
 +              is.close();
 +            }
 +          }
 +        } catch (Throwable t) {
 +          log.error(t, t);
 +        }
 +      }
 +    }, 1000, 10 * 60 * 1000);
 +  }
 +  
 +  public static void waitForZookeeperAndHdfs(VolumeManager fs) {
 +    log.info("Attempting to talk to zookeeper");
 +    while (true) {
 +      try {
 +        ZooReaderWriter.getInstance().getChildren(Constants.ZROOT);
 +        break;
 +      } catch (InterruptedException e) {
 +        // ignored
 +      } catch (KeeperException ex) {
 +        log.info("Waiting for accumulo to be initialized");
 +        UtilWaitThread.sleep(1000);
 +      }
 +    }
 +    log.info("Zookeeper connected and initialized, attemping to talk to HDFS");
 +    long sleep = 1000;
 +    int unknownHostTries = 3;
 +    while (true) {
 +      try {
 +        if (fs.isReady())
 +          break;
 +        log.warn("Waiting for the NameNode to leave safemode");
 +      } catch (IOException ex) {
 +        log.warn("Unable to connect to HDFS", ex);
 +      } catch (IllegalArgumentException exception) {
 +        /* Unwrap the UnknownHostException so we can deal with it directly */
 +        if (exception.getCause() instanceof UnknownHostException) {
 +          if (unknownHostTries > 0) {
 +            log.warn("Unable to connect to HDFS, will retry. cause: " + exception.getCause());
 +            /* We need to make sure our sleep period is long enough to avoid getting a cached failure of the host lookup. */
 +            sleep = Math.max(sleep, (AddressUtil.getAddressCacheNegativeTtl((UnknownHostException)(exception.getCause()))+1)*1000);
 +          } else {
 +            log.error("Unable to connect to HDFS and have exceeded max number of retries.", exception);
 +            throw exception;
 +          }
 +          unknownHostTries--;
 +        } else {
 +          throw exception;
 +        }
 +      }
 +      log.info("Backing off due to failure; current sleep period is " + sleep / 1000. + " seconds");
 +      UtilWaitThread.sleep(sleep);
 +      /* Back off to give transient failures more time to clear. */
 +      sleep = Math.min(60 * 1000, sleep * 2);
 +    }
 +    log.info("Connected to HDFS");
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 8b631ff,0000000..25defe8
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@@ -1,629 -1,0 +1,631 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.init;
 +
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Locale;
 +import java.util.Map.Entry;
 +import java.util.UUID;
 +
 +import jline.console.ConsoleReader;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.cli.Help;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.impl.Namespaces;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.conf.SiteConfiguration;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.FileOperations;
 +import org.apache.accumulo.core.file.FileSKVWriter;
 +import org.apache.accumulo.core.iterators.user.VersioningIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.master.thrift.MasterGoalState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 +import org.apache.accumulo.core.security.SecurityUtil;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 +import org.apache.accumulo.server.Accumulo;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.constraints.MetadataConstraints;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.fs.VolumeManagerImpl;
 +import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
 +import org.apache.accumulo.server.security.AuditedSecurityOperation;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.tables.TableManager;
 +import org.apache.accumulo.server.tablets.TabletTime;
 +import org.apache.accumulo.server.util.TablePropUtil;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.apache.zookeeper.KeeperException;
 +import org.apache.zookeeper.ZooDefs.Ids;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * This class is used to setup the directory structure and the root tablet to get an instance started
 + * 
 + */
 +public class Initialize {
 +  private static final Logger log = Logger.getLogger(Initialize.class);
 +  private static final String DEFAULT_ROOT_USER = "root";
 +  public static final String TABLE_TABLETS_TABLET_DIR = "/table_info";
 +
 +  private static ConsoleReader reader = null;
 +  private static IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 +
 +  private static ConsoleReader getConsoleReader() throws IOException {
 +    if (reader == null)
 +      reader = new ConsoleReader();
 +    return reader;
 +  }
 +
 +  /**
 +   * Sets this class's ZooKeeper reader/writer.
 +   * 
 +   * @param izoo
 +   *          reader/writer
 +   */
 +  static void setZooReaderWriter(IZooReaderWriter izoo) {
 +    zoo = izoo;
 +  }
 +
 +  /**
 +   * Gets this class's ZooKeeper reader/writer.
 +   * 
 +   * @return reader/writer
 +   */
 +  static IZooReaderWriter getZooReaderWriter() {
 +    return zoo;
 +  }
 +
 +  private static HashMap<String,String> initialMetadataConf = new HashMap<String,String>();
 +  static {
 +    initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
 +    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
 +    initialMetadataConf.put(Property.TABLE_WALOG_ENABLED.getKey(), "true");
 +    initialMetadataConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
 +    initialMetadataConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
 +    initialMetadataConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1", MetadataConstraints.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName());
 +    initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
 +        String.format("%s,%s", TabletsSection.TabletColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME));
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME,
 +        LogColumnFamily.NAME, TabletsSection.ServerColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME));
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
 +    initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
 +    initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
 +    initialMetadataConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
 +  }
 +
 +  static boolean checkInit(Configuration conf, VolumeManager fs, SiteConfiguration sconf) throws IOException {
 +    String fsUri;
 +    if (!sconf.get(Property.INSTANCE_DFS_URI).equals(""))
 +      fsUri = sconf.get(Property.INSTANCE_DFS_URI);
 +    else
 +      fsUri = FileSystem.getDefaultUri(conf).toString();
 +    log.info("Hadoop Filesystem is " + fsUri);
 +    log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getConfiguredBaseDirs()));
 +    log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
 +    log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
 +    if (!zookeeperAvailable()) {
 +      log.fatal("Zookeeper needs to be up and running in order to init. Exiting ...");
 +      return false;
 +    }
 +    if (sconf.get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) {
 +      ConsoleReader c = getConsoleReader();
 +      c.beep();
 +      c.println();
 +      c.println();
 +      c.println("Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
 +      c.println();
 +      c.println();
 +      c.println("You can change the instance secret in accumulo by using:");
 +      c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName() + " oldPassword newPassword.");
 +      c.println("You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. Without this accumulo will not operate correctly");
 +    }
 +    try {
 +      if (isInitialized(fs)) {
 +        String instanceDfsDir = sconf.get(Property.INSTANCE_DFS_DIR);
 +        log.fatal("It appears the directories " + Arrays.asList(ServerConstants.getConfiguredBaseDirs()) + " were previously initialized.");
 +        String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES);
 +        String instanceDfsUri = sconf.get(Property.INSTANCE_DFS_URI);
 +
 +        if (!instanceVolumes.isEmpty()) {
 +          log.fatal("Change the property " + Property.INSTANCE_VOLUMES + " to use different filesystems,");
 +        } else if (!instanceDfsDir.isEmpty()) {
 +          log.fatal("Change the property " + Property.INSTANCE_DFS_URI + " to use a different filesystem,");
 +        } else {
 +          log.fatal("You are using the default URI for the filesystem. Set the property " + Property.INSTANCE_VOLUMES + " to use a different filesystem,");
 +        }
 +        log.fatal("or change the property " + Property.INSTANCE_DFS_DIR + " to use a different directory.");
 +        log.fatal("The current value of " + Property.INSTANCE_DFS_URI + " is |" + instanceDfsUri + "|");
 +        log.fatal("The current value of " + Property.INSTANCE_DFS_DIR + " is |" + instanceDfsDir + "|");
 +        log.fatal("The current value of " + Property.INSTANCE_VOLUMES + " is |" + instanceVolumes + "|");
 +        return false;
 +      }
 +    } catch (IOException e) {
 +      throw new IOException("Failed to check if filesystem already initialized", e);
 +    }
 +
 +    return true;
 +  }
 +
 +  public static boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException {
 +    if (!checkInit(conf, fs, ServerConfiguration.getSiteConfiguration())) {
 +      return false;
 +    }
 +
 +    // prompt user for instance name and root password early, in case they
 +    // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
 +    String instanceNamePath;
 +    try {
 +      instanceNamePath = getInstanceNamePath(opts);
 +    } catch (Exception e) {
 +      log.fatal("Failed to talk to zookeeper", e);
 +      return false;
 +    }
 +    opts.rootpass = getRootPassword(opts);
 +    return initialize(opts, instanceNamePath, fs);
 +  }
 +
 +  public static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) {
 +
 +    UUID uuid = UUID.randomUUID();
 +    // the actual disk locations of the root table and tablets
 +    String[] configuredTableDirs = ServerConstants.prefix(ServerConstants.getConfiguredBaseDirs(), ServerConstants.TABLE_DIR);
 +    final Path rootTablet = new Path(fs.choose(configuredTableDirs) + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION);
 +    try {
 +      initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTablet);
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize zookeeper", e);
 +      return false;
 +    }
 +
 +    try {
 +      initFileSystem(opts, fs, uuid, rootTablet);
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize filesystem", e);
 +      for (FileSystem filesystem : fs.getFileSystems().values()) {
 +        log.fatal("For FileSystem:" + filesystem.getUri());
 +        
 +        // Try to warn the user about what the actual problem is
 +        Configuration fsConf = filesystem.getConf();
 +        
 +        final String defaultFsUri = "file:///";
 +        String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
 +        
 +        // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
 +        if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
 +          log.fatal("Default filesystem value ('fs.defaultFS' or 'fs.default.name') was found in the Hadoop configuration");
 +          log.fatal("Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
 +        }
 +      }
 +      
 +      return false;
 +    }
 +
 +    try {
 +      initSecurity(opts, uuid.toString());
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize security", e);
 +      return false;
 +    }
 +    return true;
 +  }
 +
 +  private static boolean zookeeperAvailable() {
 +    try {
 +      return zoo.exists("/");
 +    } catch (KeeperException e) {
 +      return false;
 +    } catch (InterruptedException e) {
 +      return false;
 +    }
 +  }
 +
 +  private static Path[] paths(String[] paths) {
 +    Path[] result = new Path[paths.length];
 +    for (int i = 0; i < paths.length; i++) {
 +      result[i] = new Path(paths[i]);
 +    }
 +    return result;
 +  }
 +
 +  private static void initDirs(VolumeManager fs, UUID uuid, String[] baseDirs, boolean print) throws IOException {
 +    for (String baseDir : baseDirs) {
 +      fs.mkdirs(new Path(new Path(baseDir, ServerConstants.VERSION_DIR), "" + ServerConstants.DATA_VERSION));
 +
 +      // create an instance id
 +      Path iidLocation = new Path(baseDir, ServerConstants.INSTANCE_ID_DIR);
 +      fs.mkdirs(iidLocation);
 +      fs.createNewFile(new Path(iidLocation, uuid.toString()));
 +      if (print)
 +        log.info("Initialized volume " + baseDir);
 +    }
 +  }
 +
 +  // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
 +  @SuppressWarnings("deprecation")
 +  private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, Path rootTablet) throws IOException {
 +    FileStatus fstat;
 +
 +    initDirs(fs, uuid, ServerConstants.getConfiguredBaseDirs(), false);
 +
 +    // the actual disk locations of the metadata table and tablets
 +    final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
 +
 +    String tableMetadataTabletDir = fs.choose(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), TABLE_TABLETS_TABLET_DIR));
 +    String defaultMetadataTabletDir = fs.choose(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION));
 +
 +    // initialize initial metadata config in zookeeper
 +    initMetadataConfig();
 +
 +    // create metadata table
 +    for (Path mtd : metadataTableDirs) {
 +      try {
 +        fstat = fs.getFileStatus(mtd);
 +        if (!fstat.isDir()) {
 +          log.fatal("location " + mtd.toString() + " exists but is not a directory");
 +          return;
 +        }
 +      } catch (FileNotFoundException fnfe) {
 +        if (!fs.mkdirs(mtd)) {
 +          log.fatal("unable to create directory " + mtd.toString());
 +          return;
 +        }
 +      }
 +    }
 +
 +    // create root table and tablet
 +    try {
 +      fstat = fs.getFileStatus(rootTablet);
 +      if (!fstat.isDir()) {
 +        log.fatal("location " + rootTablet.toString() + " exists but is not a directory");
 +        return;
 +      }
 +    } catch (FileNotFoundException fnfe) {
 +      if (!fs.mkdirs(rootTablet)) {
 +        log.fatal("unable to create directory " + rootTablet.toString());
 +        return;
 +      }
 +    }
 +
 +    // populate the root tablet with info about the default tablet
 +    // the root tablet contains the key extent and locations of all the
 +    // metadata tablets
 +    String initRootTabFile = rootTablet + "/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
 +    FileSystem ns = fs.getFileSystemByPath(new Path(initRootTabFile));
 +    FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(), AccumuloConfiguration.getDefaultConfiguration());
 +    mfw.startDefaultLocalityGroup();
 +
 +    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
 +
 +    // table tablet's directory
 +    Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(tableDirKey, new Value(tableMetadataTabletDir.getBytes(Constants.UTF8)));
 +
 +    // table tablet time
 +    Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));
 +
 +    // table tablet's prevrow
 +    Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
 +        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
 +
 +    // ----------] default tablet info
 +    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
 +
 +    // default's directory
 +    Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(defaultDirKey, new Value(defaultMetadataTabletDir.getBytes(Constants.UTF8)));
 +
 +    // default's time
 +    Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));
 +
 +    // default's prevrow
 +    Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
 +        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
 +    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
 +
 +    mfw.close();
 +
 +    // create table and default tablets directories
 +    for (String s : Arrays.asList(tableMetadataTabletDir, defaultMetadataTabletDir)) {
 +      Path dir = new Path(s);
 +      try {
 +        fstat = fs.getFileStatus(dir);
 +        if (!fstat.isDir()) {
 +          log.fatal("location " + dir.toString() + " exists but is not a directory");
 +          return;
 +        }
 +      } catch (FileNotFoundException fnfe) {
 +        try {
 +          fstat = fs.getFileStatus(dir);
 +          if (!fstat.isDir()) {
 +            log.fatal("location " + dir.toString() + " exists but is not a directory");
 +            return;
 +          }
 +        } catch (FileNotFoundException fnfe2) {
 +          // create table info dir
 +          if (!fs.mkdirs(dir)) {
 +            log.fatal("unable to create directory " + dir.toString());
 +            return;
 +          }
 +        }
 +
 +        // create default dir
 +        if (!fs.mkdirs(dir)) {
 +          log.fatal("unable to create directory " + dir.toString());
 +          return;
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath, Path rootTablet) throws KeeperException, InterruptedException {
 +    // setup basic data in zookeeper
 +    ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
 +    ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT + Constants.ZINSTANCES, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
 +
 +    // setup instance name
 +    if (opts.clearInstanceName)
 +      zoo.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
 +    zoo.putPersistentData(instanceNamePath, uuid.getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
 +
 +    final byte[] EMPTY_BYTE_ARRAY = new byte[0], ZERO_CHAR_ARRAY = new byte[] {'0'};
 +    
 +    // setup the instance
 +    String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
 +    zoo.putPersistentData(zkInstanceRoot, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZNAMESPACES, new byte[0], NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewNamespaceState(uuid, Namespaces.DEFAULT_NAMESPACE_ID, Namespaces.DEFAULT_NAMESPACE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewNamespaceState(uuid, Namespaces.ACCUMULO_NAMESPACE_ID, Namespaces.ACCUMULO_NAMESPACE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewTableState(uuid, RootTable.ID, Namespaces.ACCUMULO_NAMESPACE_ID, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewTableState(uuid, MetadataTable.ID, Namespaces.ACCUMULO_NAMESPACE_ID, MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTablet.toString().getBytes(), NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, ZERO_CHAR_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, ZERO_CHAR_ARRAY, NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +  }
 +
 +  private static String getInstanceNamePath(Opts opts) throws IOException, KeeperException, InterruptedException {
 +    // setup the instance name
 +    String instanceName, instanceNamePath = null;
 +    boolean exists = true;
 +    do {
 +      if (opts.cliInstanceName == null) {
 +        instanceName = getConsoleReader().readLine("Instance name : ");
 +      } else {
 +        instanceName = opts.cliInstanceName;
 +      }
 +      if (instanceName == null)
 +        System.exit(0);
 +      instanceName = instanceName.trim();
 +      if (instanceName.length() == 0)
 +        continue;
 +      instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
 +      if (opts.clearInstanceName) {
 +        exists = false;
 +        break;
 +      } else if (exists = zoo.exists(instanceNamePath)) {
 +        String decision = getConsoleReader().readLine("Instance name \"" + instanceName + "\" exists. Delete existing entry from zookeeper? [Y/N] : ");
 +        if (decision == null)
 +          System.exit(0);
 +        if (decision.length() == 1 && decision.toLowerCase(Locale.ENGLISH).charAt(0) == 'y') {
 +          opts.clearInstanceName = true;
 +          exists = false;
 +        }
 +      }
 +    } while (exists);
 +    return instanceNamePath;
 +  }
 +
 +  private static byte[] getRootPassword(Opts opts) throws IOException {
 +    if (opts.cliPassword != null) {
 +      return opts.cliPassword.getBytes(Constants.UTF8);
 +    }
 +    String rootpass;
 +    String confirmpass;
 +    do {
 +      rootpass = getConsoleReader()
 +          .readLine("Enter initial password for " + DEFAULT_ROOT_USER + " (this may not be applicable for your security setup): ", '*');
 +      if (rootpass == null)
 +        System.exit(0);
 +      confirmpass = getConsoleReader().readLine("Confirm initial password for " + DEFAULT_ROOT_USER + ": ", '*');
 +      if (confirmpass == null)
 +        System.exit(0);
 +      if (!rootpass.equals(confirmpass))
 +        log.error("Passwords do not match");
 +    } while (!rootpass.equals(confirmpass));
 +    return rootpass.getBytes(Constants.UTF8);
 +  }
 +
 +  private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
 +    AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()), DEFAULT_ROOT_USER,
 +        opts.rootpass);
 +  }
 +
 +  public static void initMetadataConfig(String tableId) throws IOException {
 +    try {
 +      Configuration conf = CachedConfiguration.getInstance();
 +      int max = conf.getInt("dfs.replication.max", 512);
 +      // Hadoop 0.23 switched the min value configuration name
 +      int min = Math.max(conf.getInt("dfs.replication.min", 1), conf.getInt("dfs.namenode.replication.min", 1));
 +      if (max < 5)
 +        setMetadataReplication(max, "max");
 +      if (min > 5)
 +        setMetadataReplication(min, "min");
 +      for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
 +        if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue()))
 +          throw new IOException("Cannot create per-table property " + entry.getKey());
 +        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
 +          throw new IOException("Cannot create per-table property " + entry.getKey());
 +      }
 +    } catch (Exception e) {
 +      log.fatal("error talking to zookeeper", e);
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  protected static void initMetadataConfig() throws IOException {
 +    initMetadataConfig(RootTable.ID);
 +    initMetadataConfig(MetadataTable.ID);
 +  }
 +
 +  private static void setMetadataReplication(int replication, String reason) throws IOException {
 +    String rep = getConsoleReader().readLine(
 +        "Your HDFS replication " + reason + " is not compatible with our default " + MetadataTable.NAME + " replication of 5. What do you want to set your "
 +            + MetadataTable.NAME + " replication to? (" + replication + ") ");
 +    if (rep == null || rep.length() == 0)
 +      rep = Integer.toString(replication);
 +    else
 +      // Lets make sure it's a number
 +      Integer.parseInt(rep);
 +    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
 +  }
 +
 +  public static boolean isInitialized(VolumeManager fs) throws IOException {
 +    for (String baseDir : ServerConstants.getConfiguredBaseDirs()) {
 +      if (fs.exists(new Path(baseDir, ServerConstants.INSTANCE_ID_DIR)) || fs.exists(new Path(baseDir, ServerConstants.VERSION_DIR)))
 +        return true;
 +    }
 +
 +    return false;
 +  }
 +
 +  private static void addVolumes(VolumeManager fs) throws IOException {
 +    HashSet<String> initializedDirs = new HashSet<String>();
 +    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseDirs(ServerConstants.getConfiguredBaseDirs(), true)));
 +
 +    HashSet<String> uinitializedDirs = new HashSet<String>();
 +    uinitializedDirs.addAll(Arrays.asList(ServerConstants.getConfiguredBaseDirs()));
 +    uinitializedDirs.removeAll(initializedDirs);
 +
 +    Path aBasePath = new Path(initializedDirs.iterator().next());
 +    Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR);
 +    Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR);
 +
 +    UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath));
 +
 +    if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion(versionPath.getFileSystem(CachedConfiguration.getInstance()), versionPath)) {
 +      throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + Accumulo.getAccumuloPersistentVersion(fs));
 +    }
 +
 +    initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true);
 +  }
 +
 +  static class Opts extends Help {
 +    @Parameter(names = "--add-volumes", description = "Initialize any uninitialized volumes listed in instance.volumes")
 +    boolean addVolumes = false;
 +    @Parameter(names = "--reset-security", description = "just update the security information")
 +    boolean resetSecurity = false;
 +    @Parameter(names = "--clear-instance-name", description = "delete any existing instance name without prompting")
 +    boolean clearInstanceName = false;
 +    @Parameter(names = "--instance-name", description = "the instance name, if not provided, will prompt")
 +    String cliInstanceName;
 +    @Parameter(names = "--password", description = "set the password on the command line")
 +    String cliPassword;
 +
 +    byte[] rootpass = null;
 +  }
 +
 +  public static void main(String[] args) {
 +    Opts opts = new Opts();
 +    opts.parseArgs(Initialize.class.getName(), args);
 +
 +    try {
 +      SecurityUtil.serverLogin();
 +      Configuration conf = CachedConfiguration.getInstance();
 +
 +      @SuppressWarnings("deprecation")
 +      VolumeManager fs = VolumeManagerImpl.get(SiteConfiguration.getSiteConfiguration());
 +
 +      if (opts.resetSecurity) {
 +        if (isInitialized(fs)) {
 +          opts.rootpass = getRootPassword(opts);
 +          initSecurity(opts, HdfsZooInstance.getInstance().getInstanceID());
 +        } else {
 +          log.fatal("Attempted to reset security on accumulo before it was initialized");
 +        }
 +      }
 +
 +      if (opts.addVolumes) {
 +        addVolumes(fs);
 +      }
 +
 +      if (!opts.resetSecurity && !opts.addVolumes)
 +        if (!doInit(opts, conf, fs))
 +          System.exit(-1);
 +    } catch (Exception e) {
 +      log.fatal(e, e);
 +      throw new RuntimeException(e);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
index 4df585d,0000000..0a5341a
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
@@@ -1,158 -1,0 +1,187 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.monitor;
 +
 +import java.io.IOException;
 +import java.net.ServerSocket;
 +import java.net.Socket;
 +import java.util.ArrayList;
 +import java.util.LinkedHashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.util.Daemon;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
- import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.log4j.LogManager;
 +import org.apache.log4j.Logger;
 +import org.apache.log4j.net.SocketNode;
 +import org.apache.log4j.spi.LoggingEvent;
++import org.apache.zookeeper.KeeperException;
 +
 +/**
 + * Hijack log4j and capture log events for display.
 + * 
 + */
 +public class LogService extends org.apache.log4j.AppenderSkeleton {
 +
 +  private static final Logger log = Logger.getLogger(LogService.class);
 +
 +  /**
 +   * Read logging events forward to us over the net.
 +   * 
 +   */
 +  static class SocketServer implements Runnable {
 +    private ServerSocket server = null;
 +
 +    public SocketServer(int port) {
 +      try {
 +        server = new ServerSocket(port);
 +      } catch (IOException io) {
 +        throw new RuntimeException(io);
 +      }
 +    }
 +
 +    public int getLocalPort() {
 +      return server.getLocalPort();
 +    }
 +
 +    @Override
 +    public void run() {
 +      try {
 +        while (true) {
 +          log.debug("Waiting for log message senders");
 +          Socket socket = server.accept();
 +          log.debug("Got a new connection");
 +          Thread t = new Daemon(new SocketNode(socket, LogManager.getLoggerRepository()));
 +          t.start();
 +        }
 +      } catch (IOException io) {
 +        log.error(io, io);
 +      }
 +    }
 +  }
 +
-   public static void startLogListener(AccumuloConfiguration conf, String instanceId) {
++  /**
++   * Place the host:port advertisement for the Monitor's Log4j listener in ZooKeeper
++   * 
++   * @param conf
++   *          configuration for the instance
++   * @param instanceId
++   *          instanceId for the instance
++   * @param hostAddress
++   *          Address that monitor process is bound to
++   */
++  public static void startLogListener(AccumuloConfiguration conf, String instanceId, String hostAddress) {
 +    try {
 +      SocketServer server = new SocketServer(conf.getPort(Property.MONITOR_LOG4J_PORT));
-       ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(instanceId) + Constants.ZMONITOR_LOG4J_PORT,
-           Integer.toString(server.getLocalPort()).getBytes(Constants.UTF8), NodeExistsPolicy.OVERWRITE);
++
++      // getLocalPort will return the actual ephemeral port used when '0' was provided.
++      String logForwardingAddr = hostAddress + ":" + server.getLocalPort();
++
++      log.debug("Setting monitor log4j log-forwarding address to: " + logForwardingAddr);
++
++      final String path = ZooUtil.getRoot(instanceId) + Constants.ZMONITOR_LOG4J_ADDR;
++      final ZooReaderWriter zoo = ZooReaderWriter.getInstance();
++
++      // Delete before we try to re-create in case the previous session hasn't yet expired
++      try {
++        zoo.delete(path, -1);
++      } catch (KeeperException e) {
++        // We don't care if the node is already gone
++        if (!KeeperException.Code.NONODE.equals(e.code())) {
++          throw e;
++        }
++      }
++
++      zoo.putEphemeralData(path, logForwardingAddr.getBytes(Constants.UTF8));
++
 +      new Daemon(server).start();
 +    } catch (Throwable t) {
-       log.info("Unable to listen to cluster-wide ports", t);
++      log.info("Unable to start/advertise Log4j listener for log-forwarding to monitor", t);
 +    }
 +  }
 +
 +  static private LogService instance = null;
 +
 +  synchronized public static LogService getInstance() {
 +    if (instance == null)
 +      return new LogService();
 +    return instance;
 +  }
 +
 +  private static final int MAX_LOGS = 50;
 +
 +  private LinkedHashMap<String,DedupedLogEvent> events = new LinkedHashMap<String,DedupedLogEvent>(MAX_LOGS + 1, (float) .75, true) {
 +
 +    private static final long serialVersionUID = 1L;
 +
 +    @Override
 +    @SuppressWarnings("rawtypes")
 +    protected boolean removeEldestEntry(Map.Entry eldest) {
 +      return size() > MAX_LOGS;
 +    }
 +  };
 +
 +  public LogService() {
 +    synchronized (LogService.class) {
 +      instance = this;
 +    }
 +  }
 +
 +  @Override
 +  synchronized protected void append(LoggingEvent ev) {
 +    Object application = ev.getMDC("application");
 +    if (application == null || application.toString().isEmpty())
 +      return;
 +
 +    DedupedLogEvent dev = new DedupedLogEvent(ev);
 +
 +    // if event is present, increase the count
 +    if (events.containsKey(dev.toString())) {
 +      DedupedLogEvent oldDev = events.remove(dev.toString());
 +      dev.setCount(oldDev.getCount() + 1);
 +    }
 +    events.put(dev.toString(), dev);
 +  }
 +
 +  @Override
 +  public void close() {
 +    events = null;
 +  }
 +
 +  @Override
 +  public synchronized void doAppend(LoggingEvent event) {
 +    super.doAppend(event);
 +  }
 +
 +  @Override
 +  public boolean requiresLayout() {
 +    return false;
 +  }
 +
 +  synchronized public List<DedupedLogEvent> getEvents() {
 +    return new ArrayList<DedupedLogEvent>(events.values());
 +  }
 +
 +  synchronized public void clear() {
 +    events.clear();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/099701cc/server/base/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
index 0000000,0000000..c0cef08
new file mode 100644
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/watcher/MonitorLog4jWatcher.java
@@@ -1,0 -1,0 +1,154 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.accumulo.server.watcher;
++
++import org.apache.accumulo.core.Constants;
++import org.apache.accumulo.core.zookeeper.ZooUtil;
++import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
++import org.apache.log4j.Appender;
++import org.apache.log4j.LogManager;
++import org.apache.log4j.Logger;
++import org.apache.log4j.helpers.FileWatchdog;
++import org.apache.log4j.xml.DOMConfigurator;
++import org.apache.zookeeper.KeeperException.NoNodeException;
++import org.apache.zookeeper.WatchedEvent;
++import org.apache.zookeeper.Watcher;
++
++import com.google.common.net.HostAndPort;
++
++/**
++ * Watcher that updates the monitor's log4j port from ZooKeeper in a system property
++ */
++public class MonitorLog4jWatcher extends FileWatchdog implements Watcher {
++  private static final Logger log = Logger.getLogger(MonitorLog4jWatcher.class);
++
++  private static final String HOST_PROPERTY_NAME = "org.apache.accumulo.core.host.log";
++  private static final String PORT_PROPERTY_NAME = "org.apache.accumulo.core.host.log.port";
++
++  private final Object lock;
++  private boolean loggingDisabled = false;
++  protected String path;
++
++  /**
++   * @param zkPath
++   * @param filename
++   * @param delay
++   * @param propertyName
++   */
++  public MonitorLog4jWatcher(String instance, String filename, int delay) {
++    super(filename);
++    setDelay(delay);
++    this.path = ZooUtil.getRoot(instance) + Constants.ZMONITOR_LOG4J_ADDR;
++    this.lock = new Object();
++  }
++
++  @Override
++  public void run() {
++    try {
++      // Initially set the logger if the Monitor's log4j advertisement node exists
++      if (ZooReaderWriter.getInstance().getZooKeeper().exists(path, this) != null)
++        updateMonitorLog4jLocation();
++      log.info("Set watch for Monitor Log4j watcher");
++    } catch (Exception e) {
++      log.error("Unable to set watch for Monitor Log4j watcher on " + path);
++    }
++
++    super.run();
++  }
++
++  @Override
++  protected void doOnChange() {
++    // this method gets called in the parent class' constructor
++    // I'm not sure of a better way to get around this. final helps though.
++    if (null == lock) {
++      resetLogger();
++      return;
++    }
++    
++    synchronized (lock) {
++      // We might triggered by file-reloading or from ZK update.
++      // Either way will result in log-forwarding being restarted
++      loggingDisabled = false;
++      log.info("Enabled log-forwarding");
++      resetLogger();
++    }
++  }
++  
++  private void resetLogger() {
++    // Force a reset on the logger's configuration
++    LogManager.resetConfiguration();
++    new DOMConfigurator().doConfigure(filename, LogManager.getLoggerRepository());
++  }
++
++  @Override
++  public void process(WatchedEvent event) {
++    // We got an update, process the data in the node
++    updateMonitorLog4jLocation();
++
++    if (event.getPath() != null) {
++      try {
++        ZooReaderWriter.getInstance().exists(event.getPath(), this);
++      } catch (Exception ex) {
++        log.error("Unable to reset watch for Monitor Log4j watcher", ex);
++      }
++    }
++  }
++
++  /**
++   * Read the host and port information for the Monitor's log4j socket and update the system properties so that, on logger refresh, it sees the new information.
++   */
++  protected void updateMonitorLog4jLocation() {
++    try {
++      String hostPortString = new String(ZooReaderWriter.getInstance().getData(path, null), Constants.UTF8);
++      HostAndPort hostAndPort = HostAndPort.fromString(hostPortString);
++
++      System.setProperty(HOST_PROPERTY_NAME, hostAndPort.getHostText());
++      System.setProperty(PORT_PROPERTY_NAME, Integer.toString(hostAndPort.getPort()));
++
++      log.info("Changing monitor log4j address to " + hostAndPort.toString());
++
++      doOnChange();
++    } catch (NoNodeException e) {
++      // Not sure on the synchronization guarantees for Loggers and Appenders
++      // on configuration reload
++      synchronized (lock) {
++        // Don't need to try to re-disable'ing it.
++        if (loggingDisabled) {
++          return;
++        }
++
++        Logger logger = LogManager.getLogger("org.apache.accumulo");
++        if (null != logger) {
++          // TODO ACCUMULO-2343 Create a specific appender for log-forwarding to the monitor
++          // that can replace the AsyncAppender+SocketAppender.
++          Appender appender = logger.getAppender("ASYNC");
++          if (null != appender) {
++            log.info("Closing log-forwarding appender");
++            appender.close();
++            log.info("Removing log-forwarding appender");
++            logger.removeAppender(appender);
++            loggingDisabled = true;
++          }
++        }
++      }
++    } catch (IllegalArgumentException e) {
++      log.error("Could not parse host and port information", e);
++    } catch (Exception e) {
++      log.error("Error reading zookeeper data for Monitor Log4j watcher", e);
++    }
++  }
++}


Mime
View raw message