accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [16/50] [abbrv] Merge branch '1.5' into 1.6
Date Sat, 01 Nov 2014 04:57:10 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 1e7658a,0000000..6c40094
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@@ -1,329 -1,0 +1,331 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.util.Map.Entry;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.trace.DistributedTrace;
 +import org.apache.accumulo.core.util.AddressUtil;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.core.util.Version;
 +import org.apache.accumulo.core.volume.Volume;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.ReadOnlyStore;
 +import org.apache.accumulo.fate.ReadOnlyTStore;
 +import org.apache.accumulo.fate.ZooStore;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.util.time.SimpleTimer;
 +import org.apache.accumulo.server.watcher.Log4jConfiguration;
 +import org.apache.accumulo.server.watcher.MonitorLog4jWatcher;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.log4j.Logger;
 +import org.apache.log4j.helpers.LogLog;
 +import org.apache.log4j.xml.DOMConfigurator;
 +import org.apache.zookeeper.KeeperException;
 +
 +public class Accumulo {
 +
 +  private static final Logger log = Logger.getLogger(Accumulo.class);
 +
 +  public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) {
 +    for (Volume volume : fs.getVolumes()) {
 +      try {
 +        if (getAccumuloPersistentVersion(fs) == oldVersion) {
 +          log.debug("Attempting to upgrade " + volume);
 +          Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume);
 +          fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))).close();
 +          // TODO document failure mode & recovery if FS permissions cause above to work and below to fail ACCUMULO-2596
 +          Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion));
 +          if (!fs.delete(prevDataVersionLoc)) {
 +            throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume);
 +          }
 +        }
 +      } catch (IOException e) {
 +        throw new RuntimeException("Unable to set accumulo version: an error occurred.", e);
 +      }
 +    }
 +  }
 +
 +  public static synchronized int getAccumuloPersistentVersion(FileSystem fs, Path path) {
 +    int dataVersion;
 +    try {
 +      FileStatus[] files = fs.listStatus(path);
 +      if (files == null || files.length == 0) {
 +        dataVersion = -1; // assume it is 0.5 or earlier
 +      } else {
 +        dataVersion = Integer.parseInt(files[0].getPath().getName());
 +      }
 +      return dataVersion;
 +    } catch (IOException e) {
 +      throw new RuntimeException("Unable to read accumulo version: an error occurred.", e);
 +    }
 +  }
 +
 +  public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) {
 +    // It doesn't matter which Volume is used as they should all have the data version stored
 +    Volume v = fs.getVolumes().iterator().next();
 +    Path path = ServerConstants.getDataVersionLocation(v);
 +    return getAccumuloPersistentVersion(v.getFileSystem(), path);
 +  }
 +
 +  public static synchronized Path getAccumuloInstanceIdPath(VolumeManager fs) {
 +    // It doesn't matter which Volume is used as they should all have the instance ID stored
 +    Volume v = fs.getVolumes().iterator().next();
 +    return ServerConstants.getInstanceIdLocation(v);
 +  }
 +
 +  public static void enableTracing(String address, String application) {
 +    try {
 +      DistributedTrace.enable(HdfsZooInstance.getInstance(), ZooReaderWriter.getInstance(), application, address);
 +    } catch (Exception ex) {
 +      log.error("creating remote sink for trace spans", ex);
 +    }
 +  }
 +
 +  /**
 +   * Finds the best log4j configuration file. A generic file is used only if an
 +   * application-specific file is not available. An XML file is preferred over
 +   * a properties file, if possible.
 +   *
 +   * @param confDir directory where configuration files should reside
 +   * @param application application name for configuration file name
 +   * @return configuration file name
 +   */
 +  static String locateLogConfig(String confDir, String application) {
 +    String explicitConfigFile = System.getProperty("log4j.configuration");
 +    if (explicitConfigFile != null) {
 +      return explicitConfigFile;
 +    }
 +    String[] configFiles = {
 +      String.format("%s/%s_logger.xml", confDir, application),
 +      String.format("%s/%s_logger.properties", confDir, application),
 +      String.format("%s/generic_logger.xml", confDir),
 +      String.format("%s/generic_logger.properties", confDir)
 +    };
 +    String defaultConfigFile = configFiles[2];  // generic_logger.xml
 +    for (String f : configFiles) {
 +      if (new File(f).exists()) {
 +        return f;
 +      }
 +    }
 +    return defaultConfigFile;
 +  }
 +
 +  public static void setupLogging(String application) throws UnknownHostException {
 +    System.setProperty("org.apache.accumulo.core.application", application);
 +
 +    if (System.getenv("ACCUMULO_LOG_DIR") != null)
 +      System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_LOG_DIR"));
 +    else
 +      System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_HOME") + "/logs/");
 +
 +    String localhost = InetAddress.getLocalHost().getHostName();
 +    System.setProperty("org.apache.accumulo.core.ip.localhost.hostname", localhost);
 +
 +    // Use a specific log config, if it exists
 +    String logConfigFile = locateLogConfig(System.getenv("ACCUMULO_CONF_DIR"), application);
 +    // Turn off messages about not being able to reach the remote logger... we protect against that.
 +    LogLog.setQuietMode(true);
 +
 +    // Read the auditing config
 +    String auditConfig = String.format("%s/auditLog.xml", System.getenv("ACCUMULO_CONF_DIR"));
 +
 +    // Set up local file-based logging right away
 +    Log4jConfiguration logConf = new Log4jConfiguration(logConfigFile);
 +    logConf.resetLogger();
 +
 +    // Watch the auditLog.xml for the future updates
 +    DOMConfigurator.configureAndWatch(auditConfig, 5000);
 +  }
 +
 +  public static void init(VolumeManager fs, ServerConfiguration serverConfig, String application) throws IOException {
 +    final AccumuloConfiguration conf = serverConfig.getConfiguration();
 +    final Instance instance = serverConfig.getInstance();
 +
 +    // Use a specific log config, if it exists
 +    final String logConfigFile = locateLogConfig(System.getenv("ACCUMULO_CONF_DIR"), application);
 +
 +    // Set up polling log4j updates and log-forwarding using information advertised in zookeeper by the monitor
 +    MonitorLog4jWatcher logConfigWatcher = new MonitorLog4jWatcher(instance.getInstanceID(), logConfigFile);
 +    logConfigWatcher.setDelay(5000L);
 +    logConfigWatcher.start();
 +
 +    // Makes sure the log-forwarding to the monitor is configured
 +    int logPort = conf.getPort(Property.MONITOR_LOG4J_PORT);
 +    System.setProperty("org.apache.accumulo.core.host.log.port", Integer.toString(logPort));
 +
 +    log.info(application + " starting");
 +    log.info("Instance " + serverConfig.getInstance().getInstanceID());
 +    int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
 +    log.info("Data Version " + dataVersion);
 +    Accumulo.waitForZookeeperAndHdfs(fs);
 +
 +    Version codeVersion = new Version(Constants.VERSION);
 +    if (!(canUpgradeFromDataVersion(dataVersion))) {
 +      throw new RuntimeException("This version of accumulo (" + codeVersion + ") is not compatible with files stored using data version " + dataVersion);
 +    }
 +
 +    TreeMap<String,String> sortedProps = new TreeMap<String,String>();
 +    for (Entry<String,String> entry : conf)
 +      sortedProps.put(entry.getKey(), entry.getValue());
 +
 +    for (Entry<String,String> entry : sortedProps.entrySet()) {
 +      String key = entry.getKey();
 +      log.info(key + " = " + (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
 +    }
 +
 +    monitorSwappiness();
 +  }
 +
 +  /**
 +   * Sanity check that the current persistent version is allowed to upgrade to the version of Accumulo running.
 +   * @param dataVersion the version that is persisted in the backing Volumes
 +   */
 +  public static boolean canUpgradeFromDataVersion(final int dataVersion) {
 +    return dataVersion == ServerConstants.DATA_VERSION || dataVersion == ServerConstants.PREV_DATA_VERSION || dataVersion == ServerConstants.TWO_DATA_VERSIONS_AGO;
 +  }
 +
 +  /**
 +   * Does the data version number stored in the backing Volumes indicate we need to upgrade something?
 +   */
 +  public static boolean persistentVersionNeedsUpgrade(final int accumuloPersistentVersion) {
 +    return accumuloPersistentVersion == ServerConstants.TWO_DATA_VERSIONS_AGO || accumuloPersistentVersion == ServerConstants.PREV_DATA_VERSION;
 +  }
 +
 +  /**
 +   *
 +   */
 +  public static void monitorSwappiness() {
 +    SimpleTimer.getInstance().schedule(new Runnable() {
 +      @Override
 +      public void run() {
 +        try {
 +          String procFile = "/proc/sys/vm/swappiness";
 +          File swappiness = new File(procFile);
 +          if (swappiness.exists() && swappiness.canRead()) {
 +            InputStream is = new FileInputStream(procFile);
 +            try {
 +              byte[] buffer = new byte[10];
 +              int bytes = is.read(buffer);
-               String setting = new String(buffer, 0, bytes, Constants.UTF8);
++              String setting = new String(buffer, 0, bytes, UTF_8);
 +              setting = setting.trim();
 +              if (bytes > 0 && Integer.parseInt(setting) > 10) {
 +                log.warn("System swappiness setting is greater than ten (" + setting + ") which can cause time-sensitive operations to be delayed. "
 +                    + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.");
 +              }
 +            } finally {
 +              is.close();
 +            }
 +          }
 +        } catch (Throwable t) {
 +          log.error(t, t);
 +        }
 +      }
 +    }, 1000, 10 * 60 * 1000);
 +  }
 +
 +  public static void waitForZookeeperAndHdfs(VolumeManager fs) {
 +    log.info("Attempting to talk to zookeeper");
 +    while (true) {
 +      try {
 +        ZooReaderWriter.getInstance().getChildren(Constants.ZROOT);
 +        break;
 +      } catch (InterruptedException e) {
 +        // ignored
 +      } catch (KeeperException ex) {
 +        log.info("Waiting for accumulo to be initialized");
 +        UtilWaitThread.sleep(1000);
 +      }
 +    }
 +    log.info("Zookeeper connected and initialized, attemping to talk to HDFS");
 +    long sleep = 1000;
 +    int unknownHostTries = 3;
 +    while (true) {
 +      try {
 +        if (fs.isReady())
 +          break;
 +        log.warn("Waiting for the NameNode to leave safemode");
 +      } catch (IOException ex) {
 +        log.warn("Unable to connect to HDFS", ex);
 +      } catch (IllegalArgumentException exception) {
 +        /* Unwrap the UnknownHostException so we can deal with it directly */
 +        if (exception.getCause() instanceof UnknownHostException) {
 +          if (unknownHostTries > 0) {
 +            log.warn("Unable to connect to HDFS, will retry. cause: " + exception.getCause());
 +            /* We need to make sure our sleep period is long enough to avoid getting a cached failure of the host lookup. */
 +            sleep = Math.max(sleep, (AddressUtil.getAddressCacheNegativeTtl((UnknownHostException)(exception.getCause()))+1)*1000);
 +          } else {
 +            log.error("Unable to connect to HDFS and have exceeded max number of retries.", exception);
 +            throw exception;
 +          }
 +          unknownHostTries--;
 +        } else {
 +          throw exception;
 +        }
 +      }
 +      log.info("Backing off due to failure; current sleep period is " + sleep / 1000. + " seconds");
 +      UtilWaitThread.sleep(sleep);
 +      /* Back off to give transient failures more time to clear. */
 +      sleep = Math.min(60 * 1000, sleep * 2);
 +    }
 +    log.info("Connected to HDFS");
 +  }
 +
 +  /**
 +   * Exit loudly if there are outstanding Fate operations.
 +   * Since Fate serializes class names, we need to make sure there are no queued
 +   * transactions from a previous version before continuing an upgrade. The status of the operations is
 +   * irrelevant; those in SUCCESSFUL status cause the same problem as those just queued.
 +   *
 +   * Note that the Master should not allow write access to Fate until after all upgrade steps are complete.
 +   *
 +   * Should be called as a guard before performing any upgrade steps, after determining that an upgrade is needed.
 +   *
 +   * see ACCUMULO-2519
 +   */
 +  public static void abortIfFateTransactions() {
 +    try {
 +      final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<Accumulo>(new ZooStore<Accumulo>(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE,
 +          ZooReaderWriter.getInstance()));
 +      if (!(fate.list().isEmpty())) {
 +        throw new AccumuloException("Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. Please see the README document for instructions on what to do under your previous version.");
 +      }
 +    } catch (Exception exception) {
 +      log.fatal("Problem verifying Fate readiness", exception);
 +      System.exit(1);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 3508164,0000000..05f3c6c
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@@ -1,198 -1,0 +1,200 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.client;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.IOException;
 +import java.nio.ByteBuffer;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.UUID;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.impl.ConnectorImpl;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.security.Credentials;
 +import org.apache.accumulo.core.util.ByteBufferUtil;
 +import org.apache.accumulo.core.util.OpTimer;
 +import org.apache.accumulo.core.util.StringUtil;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 +import org.apache.accumulo.server.Accumulo;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.fs.VolumeManagerImpl;
 +import org.apache.accumulo.server.zookeeper.ZooLock;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * An implementation of Instance that looks in HDFS and ZooKeeper to find the master and root tablet location.
 + * 
 + */
 +public class HdfsZooInstance implements Instance {
 +
 +  private HdfsZooInstance() {
 +    AccumuloConfiguration acuConf = ServerConfiguration.getSiteConfiguration();
 +    zooCache = new ZooCacheFactory().getZooCache(acuConf.get(Property.INSTANCE_ZK_HOST), (int) acuConf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT));
 +  }
 +
 +  private static HdfsZooInstance cachedHdfsZooInstance = null;
 +
 +  public static synchronized Instance getInstance() {
 +    if (cachedHdfsZooInstance == null)
 +      cachedHdfsZooInstance = new HdfsZooInstance();
 +    return cachedHdfsZooInstance;
 +  }
 +
 +  private static ZooCache zooCache;
 +  private static String instanceId = null;
 +  private static final Logger log = Logger.getLogger(HdfsZooInstance.class);
 +
 +  @Override
 +  public String getRootTabletLocation() {
 +    String zRootLocPath = ZooUtil.getRoot(this) + RootTable.ZROOT_TABLET_LOCATION;
 +
 +    OpTimer opTimer = new OpTimer(log, Level.TRACE).start("Looking up root tablet location in zoocache.");
 +
 +    byte[] loc = zooCache.get(zRootLocPath);
 +
-     opTimer.stop("Found root tablet at " + (loc == null ? null : new String(loc, Constants.UTF8)) + " in %DURATION%");
++    opTimer.stop("Found root tablet at " + (loc == null ? null : new String(loc, UTF_8)) + " in %DURATION%");
 +
 +    if (loc == null) {
 +      return null;
 +    }
 +
-     return new String(loc, Constants.UTF8).split("\\|")[0];
++    return new String(loc, UTF_8).split("\\|")[0];
 +  }
 +
 +  @Override
 +  public List<String> getMasterLocations() {
 +
 +    String masterLocPath = ZooUtil.getRoot(this) + Constants.ZMASTER_LOCK;
 +
 +    OpTimer opTimer = new OpTimer(log, Level.TRACE).start("Looking up master location in zoocache.");
 +
 +    byte[] loc = ZooLock.getLockData(zooCache, masterLocPath, null);
 +
-     opTimer.stop("Found master at " + (loc == null ? null : new String(loc, Constants.UTF8)) + " in %DURATION%");
++    opTimer.stop("Found master at " + (loc == null ? null : new String(loc, UTF_8)) + " in %DURATION%");
 +
 +    if (loc == null) {
 +      return Collections.emptyList();
 +    }
 +
-     return Collections.singletonList(new String(loc, Constants.UTF8));
++    return Collections.singletonList(new String(loc, UTF_8));
 +  }
 +
 +  @Override
 +  public String getInstanceID() {
 +    if (instanceId == null)
 +      _getInstanceID();
 +    return instanceId;
 +  }
 +
 +  private static synchronized void _getInstanceID() {
 +    if (instanceId == null) {
 +      AccumuloConfiguration acuConf = ServerConfiguration.getSiteConfiguration();
 +      // InstanceID should be the same across all volumes, so just choose one
 +      VolumeManager fs;
 +      try {
 +        fs = VolumeManagerImpl.get();
 +      } catch (IOException e) {
 +        throw new RuntimeException(e);
 +      }
 +      Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(fs);
 +      log.trace("Looking for instanceId from " + instanceIdPath);
 +      String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, acuConf);
 +      instanceId = instanceIdFromFile;
 +    }
 +  }
 +
 +  @Override
 +  public String getInstanceName() {
 +    return ZooKeeperInstance.lookupInstanceName(zooCache, UUID.fromString(getInstanceID()));
 +  }
 +
 +  @Override
 +  public String getZooKeepers() {
 +    return ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST);
 +  }
 +
 +  @Override
 +  public int getZooKeepersSessionTimeOut() {
 +    return (int) ServerConfiguration.getSiteConfiguration().getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
 +  }
 +
 +  @Override
 +  public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
 +    return new ConnectorImpl(this, new Credentials(principal, token));
 +  }
 +
 +  @Deprecated
 +  @Override
 +  public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
 +    return getConnector(user, new PasswordToken(pass));
 +  }
 +
 +  @Deprecated
 +  @Override
 +  public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
 +    return getConnector(user, ByteBufferUtil.toBytes(pass));
 +  }
 +
 +  @Deprecated
 +  @Override
 +  public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
 +    return getConnector(user, TextUtil.getBytes(new Text(pass.toString())));
 +  }
 +
 +  private AccumuloConfiguration conf = null;
 +
 +  @Deprecated
 +  @Override
 +  public AccumuloConfiguration getConfiguration() {
 +    if (conf == null)
 +      conf = new ServerConfiguration(this).getConfiguration();
 +    return conf;
 +  }
 +
 +  @Override
 +  @Deprecated
 +  public void setConfiguration(AccumuloConfiguration conf) {
 +    this.conf = conf;
 +  }
 +
 +  public static void main(String[] args) {
 +    Instance instance = HdfsZooInstance.getInstance();
 +    System.out.println("Instance Name: " + instance.getInstanceName());
 +    System.out.println("Instance ID: " + instance.getInstanceID());
 +    System.out.println("ZooKeepers: " + instance.getZooKeepers());
 +    System.out.println("Masters: " + StringUtil.join(instance.getMasterLocations(), ", "));
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
index df1be50,0000000..18feb4f
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
@@@ -1,160 -1,0 +1,161 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.conf;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.List;
 +import java.util.Map;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration.PropertyFilter;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * A helper object for accessing properties in a {@link ZooCache}.
 + */
 +public class ZooCachePropertyAccessor {
 +  private static final Logger log = Logger.getLogger(ZooCachePropertyAccessor.class);
 +
 +  static class PropCacheKey {
 +    final String instanceId;
 +    final String scope;
 +
 +    PropCacheKey(String iid, String s) {
 +      instanceId = iid;
 +      scope = s;
 +    }
 +
 +    @Override
 +    public boolean equals(Object other) {
 +      if (this == other) {
 +        return true;
 +      }
 +      if (!(other instanceof PropCacheKey)) {
 +        return false;
 +      }
 +      PropCacheKey o = (PropCacheKey) other;
 +      return (instanceId.equals(o.instanceId) && scope.equals(o.scope));
 +    }
 +
 +    @Override
 +    public int hashCode() {
 +      int c = 17;
 +      c = (37 * c) + instanceId.hashCode();
 +      c = (37 * c) + scope.hashCode();
 +      return c;
 +    }
 +  }
 +
 +  private final ZooCache propCache;
 +
 +  /**
 +   * Creates a new accessor.
 +   *
 +   * @param propCache
 +   *          property cache
 +   */
 +  ZooCachePropertyAccessor(ZooCache propCache) {
 +    this.propCache = propCache;
 +  }
 +
 +  /**
 +   * Gets the property cache accessed by this object.
 +   *
 +   * @return property cache
 +   */
 +  ZooCache getZooCache() {
 +    return propCache;
 +  }
 +
 +  /**
 +   * Gets a property. If the property is not in ZooKeeper or is present but an invalid format for the property type, the parent configuration is consulted (if
 +   * provided).
 +   *
 +   * @param property
 +   *          property to get
 +   * @param path
 +   *          ZooKeeper path where properties lie
 +   * @param parent
 +   *          parent configuration (optional)
 +   * @return property value, or null if not found
 +   */
 +  String get(Property property, String path, AccumuloConfiguration parent) {
 +    String key = property.getKey();
 +    String value = get(path + "/" + key);
 +
 +    if (value == null || !property.getType().isValidFormat(value)) {
 +      if (value != null) {
 +        log.error("Using default value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
 +      }
 +      if (parent != null) {
 +        value = parent.get(property);
 +      }
 +    }
 +    return value;
 +  }
 +
 +  private String get(String path) {
 +    byte[] v = propCache.get(path);
 +    if (v != null) {
-       return new String(v, Constants.UTF8);
++      return new String(v, UTF_8);
 +    } else {
 +      return null;
 +    }
 +  }
 +
 +  /**
 +   * Gets all properties into the given map. Properties are filtered using the given filter. Properties from a parent configuration are also added to the map
 +   * and filtered, either using a separate filter or, if not specified, the other filter.
 +   *
 +   * @param props
 +   *          map to populate with properties
 +   * @param path
 +   *          ZooKeeper path where properties lie
 +   * @param filter
 +   *          property filter (required)
 +   * @param parent
 +   *          parent configuration (required)
 +   * @param parentFilter
 +   *          separate filter for parent properties (optional)
 +   */
 +  void getProperties(Map<String,String> props, String path, PropertyFilter filter, AccumuloConfiguration parent, PropertyFilter parentFilter) {
 +    parent.getProperties(props, parentFilter != null ? parentFilter : filter);
 +
 +    List<String> children = propCache.getChildren(path);
 +    if (children != null) {
 +      for (String child : children) {
 +        if (child != null && filter.accept(child)) {
 +          String value = get(path + "/" + child);
 +          if (value != null) {
 +            props.put(child, value);
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Clears the internal {@link ZooCache}.
 +   */
 +  void invalidateCache() {
 +    propCache.clear();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
index a49ec43,0000000..6d25f1d
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
@@@ -1,118 -1,0 +1,120 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.conf;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.log4j.Logger;
 +
 +public class ZooConfiguration extends AccumuloConfiguration {
 +  private static final Logger log = Logger.getLogger(ZooConfiguration.class);
 +
 +  private final String instanceId;
 +  private final ZooCache propCache;
 +  private final AccumuloConfiguration parent;
 +  private final Map<String,String> fixedProps = Collections.synchronizedMap(new HashMap<String,String>());
 +
 +  protected ZooConfiguration(String instanceId, ZooCache propCache, AccumuloConfiguration parent) {
 +    this.instanceId = instanceId;
 +    this.propCache = propCache;
 +    this.parent = parent;
 +  }
 +
 +  @Override
 +  public void invalidateCache() {
 +    if (propCache != null)
 +      propCache.clear();
 +  }
 +
 +  /**
 +   * Gets the parent configuration of this configuration.
 +   *
 +   * @return parent configuration
 +   */
 +  public AccumuloConfiguration getParentConfiguration() {
 +    return parent;
 +  }
 +
 +  private String _get(Property property) {
 +    String key = property.getKey();
 +    String value = null;
 +
 +    if (Property.isValidZooPropertyKey(key)) {
 +      value = get(key);
 +    }
 +
 +    if (value == null || !property.getType().isValidFormat(value)) {
 +      if (value != null)
 +        log.error("Using parent value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
 +      value = parent.get(property);
 +    }
 +    return value;
 +  }
 +
 +  @Override
 +  public String get(Property property) {
 +    if (Property.isFixedZooPropertyKey(property)) {
 +      if (fixedProps.containsKey(property.getKey())) {
 +        return fixedProps.get(property.getKey());
 +      } else {
 +        synchronized (fixedProps) {
 +          String val = _get(property);
 +          fixedProps.put(property.getKey(), val);
 +          return val;
 +        }
 +
 +      }
 +    } else {
 +      return _get(property);
 +    }
 +  }
 +
 +  private String get(String key) {
 +    String zPath = ZooUtil.getRoot(instanceId) + Constants.ZCONFIG + "/" + key;
 +    byte[] v = propCache.get(zPath);
 +    String value = null;
 +    if (v != null)
-       value = new String(v, Constants.UTF8);
++      value = new String(v, UTF_8);
 +    return value;
 +  }
 +
 +  @Override
 +  public void getProperties(Map<String,String> props, PropertyFilter filter) {
 +    parent.getProperties(props, filter);
 +
 +    List<String> children = propCache.getChildren(ZooUtil.getRoot(instanceId) + Constants.ZCONFIG);
 +    if (children != null) {
 +      for (String child : children) {
 +        if (child != null && filter.accept(child)) {
 +          String value = get(child);
 +          if (value != null)
 +            props.put(child, value);
 +        }
 +      }
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 807e821,0000000..587e38a
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@@ -1,315 -1,0 +1,317 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.constraints;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.HashSet;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.constraints.Constraint;
 +import org.apache.accumulo.core.data.ColumnUpdate;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 +import org.apache.accumulo.core.util.ColumnFQ;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 +import org.apache.accumulo.server.zookeeper.ZooCache;
 +import org.apache.accumulo.server.zookeeper.ZooLock;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +
 +public class MetadataConstraints implements Constraint {
 +  
 +  private ZooCache zooCache = null;
 +  private String zooRoot = null;
 +  
 +  private static final Logger log = Logger.getLogger(MetadataConstraints.class);
 +  
 +  private static boolean[] validTableNameChars = new boolean[256];
 +  
 +  {
 +    for (int i = 0; i < 256; i++) {
 +      validTableNameChars[i] = ((i >= 'a' && i <= 'z') || (i >= '0' && i <= '9')) || i == '!';
 +    }
 +  }
 +  
 +  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {
 +      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
 +      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN,
 +      TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN}));
 +  
 +  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME,
 +      LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME,
 +      TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME,
 +      ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME}));
 +  
 +  private static boolean isValidColumn(ColumnUpdate cu) {
 +    
 +    if (validColumnFams.contains(new Text(cu.getColumnFamily())))
 +      return true;
 +    
 +    if (validColumnQuals.contains(new ColumnFQ(cu)))
 +      return true;
 +    
 +    return false;
 +  }
 +  
 +  static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
 +    if (lst == null)
 +      lst = new ArrayList<Short>();
 +    lst.add((short) violation);
 +    return lst;
 +  }
 +  
 +  static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
 +    if (lst == null)
 +      return addViolation(lst, intViolation);
 +    short violation = (short) intViolation;
 +    if (!lst.contains(violation))
 +      return addViolation(lst, intViolation);
 +    return lst;
 +  }
 +  
 +  @Override
 +  public List<Short> check(Environment env, Mutation mutation) {
 +    
 +    ArrayList<Short> violations = null;
 +    
 +    Collection<ColumnUpdate> colUpdates = mutation.getUpdates();
 +    
 +    // check the row, it should contains at least one ; or end with <
 +    boolean containsSemiC = false;
 +    
 +    byte[] row = mutation.getRow();
 +    
 +    // always allow rows that fall within reserved areas
 +    if (row.length > 0 && row[0] == '~')
 +      return null;
 +    if (row.length > 2 && row[0] == '!' && row[1] == '!' && row[2] == '~')
 +      return null;
 +    
 +    for (byte b : row) {
 +      if (b == ';') {
 +        containsSemiC = true;
 +      }
 +      
 +      if (b == ';' || b == '<')
 +        break;
 +      
 +      if (!validTableNameChars[0xff & b]) {
 +        violations = addIfNotPresent(violations, 4);
 +      }
 +    }
 +    
 +    if (!containsSemiC) {
 +      // see if last row char is <
 +      if (row.length == 0 || row[row.length - 1] != '<') {
 +        violations = addIfNotPresent(violations, 4);
 +      }
 +    } else {
 +      if (row.length == 0) {
 +        violations = addIfNotPresent(violations, 4);
 +      }
 +    }
 +    
 +    if (row.length > 0 && row[0] == '!') {
 +      if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
 +        violations = addIfNotPresent(violations, 4);
 +      }
 +    }
 +    
 +    // ensure row is not less than Constants.METADATA_TABLE_ID
 +    if (new Text(row).compareTo(new Text(MetadataTable.ID)) < 0) {
 +      violations = addViolation(violations, 5);
 +    }
 +    
 +    boolean checkedBulk = false;
 +    
 +    for (ColumnUpdate columnUpdate : colUpdates) {
 +      Text columnFamily = new Text(columnUpdate.getColumnFamily());
 +      
 +      if (columnUpdate.isDeleted()) {
 +        if (!isValidColumn(columnUpdate)) {
 +          violations = addViolation(violations, 2);
 +        }
 +        continue;
 +      }
 +      
 +      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
 +        violations = addViolation(violations, 6);
 +      }
 +      
 +      if (columnFamily.equals(DataFileColumnFamily.NAME)) {
 +        try {
 +          DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
 +          
 +          if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
 +            violations = addViolation(violations, 1);
 +          }
 +        } catch (NumberFormatException nfe) {
 +          violations = addViolation(violations, 1);
 +        } catch (ArrayIndexOutOfBoundsException aiooe) {
 +          violations = addViolation(violations, 1);
 +        }
 +      } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
 +        
 +      } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
 +        if (!columnUpdate.isDeleted() && !checkedBulk) {
 +          // splits, which also write the time reference, are allowed to write this reference even when
 +          // the transaction is not running because the other half of the tablet is holding a reference
 +          // to the file.
 +          boolean isSplitMutation = false;
 +          // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
 +          // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
 +          // See ACCUMULO-1230.
 +          boolean isLocationMutation = false;
 +          
 +          HashSet<Text> dataFiles = new HashSet<Text>();
 +          HashSet<Text> loadedFiles = new HashSet<Text>();
 +
-           String tidString = new String(columnUpdate.getValue(), Constants.UTF8);
++          String tidString = new String(columnUpdate.getValue(), UTF_8);
 +          int otherTidCount = 0;
 +          
 +          for (ColumnUpdate update : mutation.getUpdates()) {
 +            if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
 +              isSplitMutation = true;
 +            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
 +              isLocationMutation = true;
 +            } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
 +              dataFiles.add(new Text(update.getColumnQualifier()));
 +            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
 +              loadedFiles.add(new Text(update.getColumnQualifier()));
 +              
-               if (!new String(update.getValue(), Constants.UTF8).equals(tidString)) {
++              if (!new String(update.getValue(), UTF_8).equals(tidString)) {
 +                otherTidCount++;
 +              }
 +            }
 +          }
 +          
 +          if (!isSplitMutation && !isLocationMutation) {
 +            long tid = Long.parseLong(tidString);
 +            
 +            try {
 +              if (otherTidCount > 0 || !dataFiles.equals(loadedFiles) || !getArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
 +                violations = addViolation(violations, 8);
 +              }
 +            } catch (Exception ex) {
 +              violations = addViolation(violations, 8);
 +            }
 +          }
 +          
 +          checkedBulk = true;
 +        }
 +      } else {
 +        if (!isValidColumn(columnUpdate)) {
 +          violations = addViolation(violations, 2);
 +        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
 +            && (violations == null || !violations.contains((short) 4))) {
 +          KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
 +          
 +          Text per = KeyExtent.decodePrevEndRow(new Value(columnUpdate.getValue()));
 +          
 +          boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
 +          
 +          if (!prevEndRowLessThanEndRow) {
 +            violations = addViolation(violations, 3);
 +          }
 +        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
 +          if (zooCache == null) {
 +            zooCache = new ZooCache();
 +          }
 +          
 +          if (zooRoot == null) {
 +            zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance());
 +          }
 +          
 +          boolean lockHeld = false;
-           String lockId = new String(columnUpdate.getValue(), Constants.UTF8);
++          String lockId = new String(columnUpdate.getValue(), UTF_8);
 +          
 +          try {
 +            lockHeld = ZooLock.isLockHeld(zooCache, new ZooUtil.LockID(zooRoot, lockId));
 +          } catch (Exception e) {
 +            log.debug("Failed to verify lock was held " + lockId + " " + e.getMessage());
 +          }
 +          
 +          if (!lockHeld) {
 +            violations = addViolation(violations, 7);
 +          }
 +        }
 +        
 +      }
 +    }
 +    
 +    if (violations != null) {
-       log.debug("violating metadata mutation : " + new String(mutation.getRow(), Constants.UTF8));
++      log.debug("violating metadata mutation : " + new String(mutation.getRow(), UTF_8));
 +      for (ColumnUpdate update : mutation.getUpdates()) {
-         log.debug(" update: " + new String(update.getColumnFamily(), Constants.UTF8) + ":" + new String(update.getColumnQualifier(), Constants.UTF8) + " value "
-             + (update.isDeleted() ? "[delete]" : new String(update.getValue(), Constants.UTF8)));
++        log.debug(" update: " + new String(update.getColumnFamily(), UTF_8) + ":" + new String(update.getColumnQualifier(), UTF_8) + " value "
++            + (update.isDeleted() ? "[delete]" : new String(update.getValue(), UTF_8)));
 +      }
 +    }
 +    
 +    return violations;
 +  }
 +  
 +  protected Arbitrator getArbitrator() {
 +    return new ZooArbitrator();
 +  }
 +  
 +  @Override
 +  public String getViolationDescription(short violationCode) {
 +    switch (violationCode) {
 +      case 1:
 +        return "data file size must be a non-negative integer";
 +      case 2:
 +        return "Invalid column name given.";
 +      case 3:
 +        return "Prev end row is greater than or equal to end row.";
 +      case 4:
 +        return "Invalid metadata row format";
 +      case 5:
 +        return "Row can not be less than " + MetadataTable.ID;
 +      case 6:
 +        return "Empty values are not allowed for any " + MetadataTable.NAME + " column";
 +      case 7:
 +        return "Lock not held in zookeeper by writer";
 +      case 8:
 +        return "Bulk load transaction no longer running";
 +    }
 +    return null;
 +  }
 +  
 +  @Override
 +  protected void finalize() {
 +    if (zooCache != null)
 +      zooCache.clear();
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 24b5605,0000000..7c4b8d3
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@@ -1,586 -1,0 +1,587 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.init;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
 +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN;
 +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN;
 +
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Locale;
 +import java.util.Map.Entry;
 +import java.util.UUID;
 +
 +import jline.console.ConsoleReader;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.cli.Help;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.impl.Namespaces;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.conf.SiteConfiguration;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.FileOperations;
 +import org.apache.accumulo.core.file.FileSKVWriter;
 +import org.apache.accumulo.core.iterators.user.VersioningIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.master.thrift.MasterGoalState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.security.SecurityUtil;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.ColumnFQ;
 +import org.apache.accumulo.core.volume.VolumeConfiguration;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 +import org.apache.accumulo.server.Accumulo;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.constraints.MetadataConstraints;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.fs.VolumeManagerImpl;
 +import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
 +import org.apache.accumulo.server.security.AuditedSecurityOperation;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.tables.TableManager;
 +import org.apache.accumulo.server.tablets.TabletTime;
 +import org.apache.accumulo.server.util.TablePropUtil;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.apache.zookeeper.KeeperException;
 +import org.apache.zookeeper.ZooDefs.Ids;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * This class is used to setup the directory structure and the root tablet to get an instance started
 + *
 + */
 +public class Initialize {
 +  private static final Logger log = Logger.getLogger(Initialize.class);
 +  private static final String DEFAULT_ROOT_USER = "root";
 +  public static final String TABLE_TABLETS_TABLET_DIR = "/table_info";
 +
 +  private static ConsoleReader reader = null;
 +  private static IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 +
 +  private static ConsoleReader getConsoleReader() throws IOException {
 +    if (reader == null)
 +      reader = new ConsoleReader();
 +    return reader;
 +  }
 +
 +  /**
 +   * Sets this class's ZooKeeper reader/writer.
 +   *
 +   * @param izoo
 +   *          reader/writer
 +   */
 +  static void setZooReaderWriter(IZooReaderWriter izoo) {
 +    zoo = izoo;
 +  }
 +
 +  /**
 +   * Gets this class's ZooKeeper reader/writer.
 +   *
 +   * @return reader/writer
 +   */
 +  static IZooReaderWriter getZooReaderWriter() {
 +    return zoo;
 +  }
 +
 +  private static HashMap<String,String> initialMetadataConf = new HashMap<String,String>();
 +  static {
 +    initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
 +    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
 +    initialMetadataConf.put(Property.TABLE_WALOG_ENABLED.getKey(), "true");
 +    initialMetadataConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
 +    initialMetadataConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
 +    initialMetadataConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1", MetadataConstraints.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers", "10," + VersioningIterator.class.getName());
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions", "1");
 +    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName());
 +    initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
 +        String.format("%s,%s", TabletsSection.TabletColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME));
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s", TabletsSection.DataFileColumnFamily.NAME,
 +        TabletsSection.LogColumnFamily.NAME, TabletsSection.ServerColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME));
 +    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
 +    initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
 +    initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
 +    initialMetadataConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
 +  }
 +
 +  static boolean checkInit(Configuration conf, VolumeManager fs, SiteConfiguration sconf) throws IOException {
 +    @SuppressWarnings("deprecation")
 +    String fsUri = sconf.get(Property.INSTANCE_DFS_URI);
 +    if (fsUri.equals(""))
 +      fsUri = FileSystem.getDefaultUri(conf).toString();
 +    log.info("Hadoop Filesystem is " + fsUri);
 +    log.info("Accumulo data dirs are " + Arrays.asList(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration())));
 +    log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
 +    log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
 +    if (!zookeeperAvailable()) {
 +      log.fatal("Zookeeper needs to be up and running in order to init. Exiting ...");
 +      return false;
 +    }
 +    if (sconf.get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) {
 +      ConsoleReader c = getConsoleReader();
 +      c.beep();
 +      c.println();
 +      c.println();
 +      c.println("Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
 +      c.println();
 +      c.println();
 +      c.println("You can change the instance secret in accumulo by using:");
 +      c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName() + " oldPassword newPassword.");
 +      c.println("You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. Without this accumulo will not operate correctly");
 +    }
 +    try {
 +      if (isInitialized(fs)) {
 +        printInitializeFailureMessages(sconf);
 +        return false;
 +      }
 +    } catch (IOException e) {
 +      throw new IOException("Failed to check if filesystem already initialized", e);
 +    }
 +
 +    return true;
 +  }
 +
 +  static void printInitializeFailureMessages(SiteConfiguration sconf) {
 +    @SuppressWarnings("deprecation")
 +    Property INSTANCE_DFS_DIR = Property.INSTANCE_DFS_DIR;
 +    @SuppressWarnings("deprecation")
 +    Property INSTANCE_DFS_URI = Property.INSTANCE_DFS_URI;
 +    String instanceDfsDir = sconf.get(INSTANCE_DFS_DIR);
 +    log.fatal("It appears the directories " + Arrays.asList(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()))
 +        + " were previously initialized.");
 +    String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES);
 +    String instanceDfsUri = sconf.get(INSTANCE_DFS_URI);
 +
 +    if (!instanceVolumes.isEmpty()) {
 +      log.fatal("Change the property " + Property.INSTANCE_VOLUMES + " to use different filesystems,");
 +    } else if (!instanceDfsDir.isEmpty()) {
 +      log.fatal("Change the property " + INSTANCE_DFS_URI + " to use a different filesystem,");
 +    } else {
 +      log.fatal("You are using the default URI for the filesystem. Set the property " + Property.INSTANCE_VOLUMES + " to use a different filesystem,");
 +    }
 +    log.fatal("or change the property " + INSTANCE_DFS_DIR + " to use a different directory.");
 +    log.fatal("The current value of " + INSTANCE_DFS_URI + " is |" + instanceDfsUri + "|");
 +    log.fatal("The current value of " + INSTANCE_DFS_DIR + " is |" + instanceDfsDir + "|");
 +    log.fatal("The current value of " + Property.INSTANCE_VOLUMES + " is |" + instanceVolumes + "|");
 +  }
 +
 +  public static boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException {
 +    if (!checkInit(conf, fs, ServerConfiguration.getSiteConfiguration())) {
 +      return false;
 +    }
 +
 +    // prompt user for instance name and root password early, in case they
 +    // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
 +    String instanceNamePath;
 +    try {
 +      instanceNamePath = getInstanceNamePath(opts);
 +    } catch (Exception e) {
 +      log.fatal("Failed to talk to zookeeper", e);
 +      return false;
 +    }
 +    opts.rootpass = getRootPassword(opts);
 +    return initialize(opts, instanceNamePath, fs);
 +  }
 +
 +  public static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) {
 +
 +    UUID uuid = UUID.randomUUID();
 +    // the actual disk locations of the root table and tablets
 +    String[] configuredVolumes = VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration());
 +    final String rootTabletDir = new Path(fs.choose(configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID
 +        + RootTable.ROOT_TABLET_LOCATION).toString();
 +
 +    try {
 +      initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize zookeeper", e);
 +      return false;
 +    }
 +
 +    try {
 +      initFileSystem(opts, fs, uuid, rootTabletDir);
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize filesystem", e);
 +
 +      if (ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
 +        Configuration fsConf = CachedConfiguration.getInstance();
 +
 +        final String defaultFsUri = "file:///";
 +        String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
 +
 +        // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
 +        if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
 +          log.fatal("Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '" + defaultFsUri + "' was found in the Hadoop configuration");
 +          log.fatal("Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
 +        }
 +      }
 +
 +      return false;
 +    }
 +
 +    try {
 +      initSecurity(opts, uuid.toString());
 +    } catch (Exception e) {
 +      log.fatal("Failed to initialize security", e);
 +      return false;
 +    }
 +    return true;
 +  }
 +
 +  private static boolean zookeeperAvailable() {
 +    try {
 +      return zoo.exists("/");
 +    } catch (KeeperException e) {
 +      return false;
 +    } catch (InterruptedException e) {
 +      return false;
 +    }
 +  }
 +
 +  private static void initDirs(VolumeManager fs, UUID uuid, String[] baseDirs, boolean print) throws IOException {
 +    for (String baseDir : baseDirs) {
 +      fs.mkdirs(new Path(new Path(baseDir, ServerConstants.VERSION_DIR), "" + ServerConstants.DATA_VERSION));
 +
 +      // create an instance id
 +      Path iidLocation = new Path(baseDir, ServerConstants.INSTANCE_ID_DIR);
 +      fs.mkdirs(iidLocation);
 +      fs.createNewFile(new Path(iidLocation, uuid.toString()));
 +      if (print)
 +        log.info("Initialized volume " + baseDir);
 +    }
 +  }
 +
 +  private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, String rootTabletDir) throws IOException {
 +    initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()), false);
 +
 +    // initialize initial metadata config in zookeeper
 +    initMetadataConfig();
 +
 +    String tableMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
 +        + TABLE_TABLETS_TABLET_DIR;
 +    String defaultMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
 +        + Constants.DEFAULT_TABLET_LOCATION;
 +
 +    // create table and default tablets directories
 +    createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir);
 +
 +    // populate the root tablet with info about the metadata tablets
 +    String fileName = rootTabletDir + Path.SEPARATOR + "00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
 +    createMetadataFile(fs, fileName, MetadataTable.ID, tableMetadataTabletDir, defaultMetadataTabletDir);
 +  }
 +
 +  /**
 +   * Create an rfile in the default tablet's directory for a new table. This method is used to create the initial root tablet contents, with information about
 +   * the metadata table's tablets
 +   *
 +   * @param volmanager
 +   *          The VolumeManager
 +   * @param fileName
 +   *          The location to create the file
 +   * @param tableId
 +   *          TableID that is being "created"
 +   * @param tableTabletDir
 +   *          The table_info directory for the new table
 +   * @param defaultTabletDir
 +   *          The default_tablet directory for the new table
 +   */
 +  private static void createMetadataFile(VolumeManager volmanager, String fileName, String tableId, String tableTabletDir, String defaultTabletDir)
 +      throws IOException {
 +    FileSystem fs = volmanager.getVolumeByPath(new Path(fileName)).getFileSystem();
 +    FileSKVWriter tabletWriter = FileOperations.getInstance().openWriter(fileName, fs, fs.getConf(), AccumuloConfiguration.getDefaultConfiguration());
 +    tabletWriter.startDefaultLocalityGroup();
 +
 +    Text splitPoint = TabletsSection.getRange().getEndKey().getRow();
 +    createEntriesForTablet(tabletWriter, tableId, tableTabletDir, null, splitPoint);
 +    createEntriesForTablet(tabletWriter, tableId, defaultTabletDir, splitPoint, null);
 +
 +    tabletWriter.close();
 +  }
 +
 +  private static void createEntriesForTablet(FileSKVWriter writer, String tableId, String tabletDir, Text tabletPrevEndRow, Text tabletEndRow)
 +      throws IOException {
 +    Text extent = new Text(KeyExtent.getMetadataEntry(new Text(tableId), tabletEndRow));
-     addEntry(writer, extent, DIRECTORY_COLUMN, new Value(tabletDir.getBytes(Constants.UTF8)));
-     addEntry(writer, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));
++    addEntry(writer, extent, DIRECTORY_COLUMN, new Value(tabletDir.getBytes(UTF_8)));
++    addEntry(writer, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
 +    addEntry(writer, extent, PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(tabletPrevEndRow));
 +  }
 +
 +  private static void addEntry(FileSKVWriter writer, Text row, ColumnFQ col, Value value) throws IOException {
 +    writer.append(new Key(row, col.getColumnFamily(), col.getColumnQualifier(), 0), value);
 +  }
 +
 +  private static void createDirectories(VolumeManager fs, String... dirs) throws IOException {
 +    for (String s : dirs) {
 +      Path dir = new Path(s);
 +      try {
 +        FileStatus fstat = fs.getFileStatus(dir);
 +        // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
 +        @SuppressWarnings("deprecation")
 +        boolean isDirectory = fstat.isDir();
 +        if (!isDirectory) {
 +          log.fatal("location " + dir + " exists but is not a directory");
 +          return;
 +        }
 +      } catch (FileNotFoundException fnfe) {
 +        // attempt to create directory, since it doesn't exist
 +        if (!fs.mkdirs(dir)) {
 +          log.fatal("unable to create directory " + dir);
 +          return;
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath, String rootTabletDir) throws KeeperException, InterruptedException {
 +    // setup basic data in zookeeper
 +    zoo.putPersistentData(Constants.ZROOT, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
 +    zoo.putPersistentData(Constants.ZROOT + Constants.ZINSTANCES, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
 +
 +    // setup instance name
 +    if (opts.clearInstanceName)
 +      zoo.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
-     zoo.putPersistentData(instanceNamePath, uuid.getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(instanceNamePath, uuid.getBytes(UTF_8), NodeExistsPolicy.FAIL);
 +
 +    final byte[] EMPTY_BYTE_ARRAY = new byte[0], ZERO_CHAR_ARRAY = new byte[] {'0'};
 +
 +    // setup the instance
 +    String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
 +    zoo.putPersistentData(zkInstanceRoot, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZNAMESPACES, new byte[0], NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewNamespaceState(uuid, Namespaces.DEFAULT_NAMESPACE_ID, Namespaces.DEFAULT_NAMESPACE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewNamespaceState(uuid, Namespaces.ACCUMULO_NAMESPACE_ID, Namespaces.ACCUMULO_NAMESPACE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewTableState(uuid, RootTable.ID, Namespaces.ACCUMULO_NAMESPACE_ID, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
 +    TableManager.prepareNewTableState(uuid, MetadataTable.ID, Namespaces.ACCUMULO_NAMESPACE_ID, MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
-     zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTabletDir.getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTabletDir.getBytes(UTF_8), NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
-     zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(UTF_8), NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, ZERO_CHAR_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, ZERO_CHAR_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
 +  }
 +
 +  private static String getInstanceNamePath(Opts opts) throws IOException, KeeperException, InterruptedException {
 +    // setup the instance name
 +    String instanceName, instanceNamePath = null;
 +    boolean exists = true;
 +    do {
 +      if (opts.cliInstanceName == null) {
 +        instanceName = getConsoleReader().readLine("Instance name : ");
 +      } else {
 +        instanceName = opts.cliInstanceName;
 +      }
 +      if (instanceName == null)
 +        System.exit(0);
 +      instanceName = instanceName.trim();
 +      if (instanceName.length() == 0)
 +        continue;
 +      instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
 +      if (opts.clearInstanceName) {
 +        exists = false;
 +        break;
 +      } else if (exists = zoo.exists(instanceNamePath)) {
 +        String decision = getConsoleReader().readLine("Instance name \"" + instanceName + "\" exists. Delete existing entry from zookeeper? [Y/N] : ");
 +        if (decision == null)
 +          System.exit(0);
 +        if (decision.length() == 1 && decision.toLowerCase(Locale.ENGLISH).charAt(0) == 'y') {
 +          opts.clearInstanceName = true;
 +          exists = false;
 +        }
 +      }
 +    } while (exists);
 +    return instanceNamePath;
 +  }
 +
 +  private static byte[] getRootPassword(Opts opts) throws IOException {
 +    if (opts.cliPassword != null) {
-       return opts.cliPassword.getBytes(Constants.UTF8);
++      return opts.cliPassword.getBytes(UTF_8);
 +    }
 +    String rootpass;
 +    String confirmpass;
 +    do {
 +      rootpass = getConsoleReader()
 +          .readLine("Enter initial password for " + DEFAULT_ROOT_USER + " (this may not be applicable for your security setup): ", '*');
 +      if (rootpass == null)
 +        System.exit(0);
 +      confirmpass = getConsoleReader().readLine("Confirm initial password for " + DEFAULT_ROOT_USER + ": ", '*');
 +      if (confirmpass == null)
 +        System.exit(0);
 +      if (!rootpass.equals(confirmpass))
 +        log.error("Passwords do not match");
 +    } while (!rootpass.equals(confirmpass));
-     return rootpass.getBytes(Constants.UTF8);
++    return rootpass.getBytes(UTF_8);
 +  }
 +
 +  private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
 +    AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()), DEFAULT_ROOT_USER,
 +        opts.rootpass);
 +  }
 +
 +  public static void initMetadataConfig() throws IOException {
 +    try {
 +      Configuration conf = CachedConfiguration.getInstance();
 +      int max = conf.getInt("dfs.replication.max", 512);
 +      // Hadoop 0.23 switched the min value configuration name
 +      int min = Math.max(conf.getInt("dfs.replication.min", 1), conf.getInt("dfs.namenode.replication.min", 1));
 +      if (max < 5)
 +        setMetadataReplication(max, "max");
 +      if (min > 5)
 +        setMetadataReplication(min, "min");
 +      for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
 +        if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue()))
 +          throw new IOException("Cannot create per-table property " + entry.getKey());
 +        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
 +          throw new IOException("Cannot create per-table property " + entry.getKey());
 +      }
 +    } catch (Exception e) {
 +      log.fatal("error talking to zookeeper", e);
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  private static void setMetadataReplication(int replication, String reason) throws IOException {
 +    String rep = getConsoleReader().readLine(
 +        "Your HDFS replication " + reason + " is not compatible with our default " + MetadataTable.NAME + " replication of 5. What do you want to set your "
 +            + MetadataTable.NAME + " replication to? (" + replication + ") ");
 +    if (rep == null || rep.length() == 0)
 +      rep = Integer.toString(replication);
 +    else
 +      // Lets make sure it's a number
 +      Integer.parseInt(rep);
 +    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
 +  }
 +
 +  public static boolean isInitialized(VolumeManager fs) throws IOException {
 +    for (String baseDir : VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration())) {
 +      if (fs.exists(new Path(baseDir, ServerConstants.INSTANCE_ID_DIR)) || fs.exists(new Path(baseDir, ServerConstants.VERSION_DIR)))
 +        return true;
 +    }
 +
 +    return false;
 +  }
 +
 +  private static void addVolumes(VolumeManager fs) throws IOException {
 +    HashSet<String> initializedDirs = new HashSet<String>();
 +    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()), true)));
 +
 +    HashSet<String> uinitializedDirs = new HashSet<String>();
 +    uinitializedDirs.addAll(Arrays.asList(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration())));
 +    uinitializedDirs.removeAll(initializedDirs);
 +
 +    Path aBasePath = new Path(initializedDirs.iterator().next());
 +    Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR);
 +    Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR);
 +
 +    UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, ServerConfiguration.getSiteConfiguration()));
 +
 +    if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion(versionPath.getFileSystem(CachedConfiguration.getInstance()), versionPath)) {
 +      throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + Accumulo.getAccumuloPersistentVersion(fs));
 +    }
 +
 +    initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true);
 +  }
 +
 +  static class Opts extends Help {
 +    @Parameter(names = "--add-volumes", description = "Initialize any uninitialized volumes listed in instance.volumes")
 +    boolean addVolumes = false;
 +    @Parameter(names = "--reset-security", description = "just update the security information")
 +    boolean resetSecurity = false;
 +    @Parameter(names = "--clear-instance-name", description = "delete any existing instance name without prompting")
 +    boolean clearInstanceName = false;
 +    @Parameter(names = "--instance-name", description = "the instance name, if not provided, will prompt")
 +    String cliInstanceName;
 +    @Parameter(names = "--password", description = "set the password on the command line")
 +    String cliPassword;
 +
 +    byte[] rootpass = null;
 +  }
 +
 +  public static void main(String[] args) {
 +    Opts opts = new Opts();
 +    opts.parseArgs(Initialize.class.getName(), args);
 +
 +    try {
 +      AccumuloConfiguration acuConf = ServerConfiguration.getSiteConfiguration();
 +      SecurityUtil.serverLogin(acuConf);
 +      Configuration conf = CachedConfiguration.getInstance();
 +
 +      VolumeManager fs = VolumeManagerImpl.get(acuConf);
 +
 +      if (opts.resetSecurity) {
 +        if (isInitialized(fs)) {
 +          opts.rootpass = getRootPassword(opts);
 +          initSecurity(opts, HdfsZooInstance.getInstance().getInstanceID());
 +        } else {
 +          log.fatal("Attempted to reset security on accumulo before it was initialized");
 +        }
 +      }
 +
 +      if (opts.addVolumes) {
 +        addVolumes(fs);
 +      }
 +
 +      if (!opts.resetSecurity && !opts.addVolumes)
 +        if (!doInit(opts, conf, fs))
 +          System.exit(-1);
 +    } catch (Exception e) {
 +      log.fatal(e, e);
 +      throw new RuntimeException(e);
 +    }
 +  }
 +}


Mime
View raw message