accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [3/4] git commit: Merge branch '1.6'
Date Sat, 01 Nov 2014 00:30:28 GMT
Merge branch '1.6'

Conflicts:
	server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
	server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/05e572b9
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/05e572b9
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/05e572b9

Branch: refs/heads/master
Commit: 05e572b92ddbf9e29655a6f71ddc8456d338ac57
Parents: eca6022 5e0f6a4
Author: Christopher Tubbs <ctubbsii@apache.org>
Authored: Fri Oct 31 20:27:40 2014 -0400
Committer: Christopher Tubbs <ctubbsii@apache.org>
Committed: Fri Oct 31 20:27:40 2014 -0400

----------------------------------------------------------------------
 .../apache/accumulo/server/ServerConstants.java |   9 +-
 .../apache/accumulo/server/init/Initialize.java | 243 +++++++------------
 .../accumulo/server/util/RandomizeVolumes.java  |  16 +-
 .../java/org/apache/accumulo/master/Master.java |  38 +--
 4 files changed, 115 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/05e572b9/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
index f15991f,51fa47e..6c331e0
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@@ -25,8 -24,6 +25,7 @@@ import java.util.HashSet
  import java.util.List;
  
  import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.conf.SiteConfiguration;
- import org.apache.accumulo.core.metadata.MetadataTable;
  import org.apache.accumulo.core.util.CachedConfiguration;
  import org.apache.accumulo.core.util.Pair;
  import org.apache.accumulo.core.volume.Volume;
@@@ -48,32 -46,11 +47,32 @@@ public class ServerConstants 
    public static final Integer WIRE_VERSION = 3;
  
    /**
 -   * current version (6) reflects the addition of a separate root table (ACCUMULO-1481)
in version 1.6.0
 +   * version (7) reflects the change in the representation of trace information in TraceRepo
     */
 -  public static final int DATA_VERSION = 6;
 -  public static final int PREV_DATA_VERSION = 5;
 -  public static final int TWO_DATA_VERSIONS_AGO = 4;
 +  public static final int DATA_VERSION = 7;
 +  /**
 +   * version (6) reflects the addition of a separate root table (ACCUMULO-1481) in version
1.6.0
 +   */
 +  public static final int MOVE_TO_ROOT_TABLE = 6;
 +  /**
 +   * version (5) moves delete file markers for the metadata table into the root tablet
 +   */
 +  public static final int MOVE_DELETE_MARKERS = 5;
 +  /**
 +   * version (4) moves logging to HDFS in 1.5.0
 +   */
 +  public static final int LOGGING_TO_HDFS = 4;
-   public static final BitSet CAN_UPGRADE = new BitSet(); 
++  public static final BitSet CAN_UPGRADE = new BitSet();
 +  static {
-     for (int i : new int[]{DATA_VERSION, MOVE_TO_ROOT_TABLE, MOVE_DELETE_MARKERS, LOGGING_TO_HDFS})
{
++    for (int i : new int[] {DATA_VERSION, MOVE_TO_ROOT_TABLE, MOVE_DELETE_MARKERS, LOGGING_TO_HDFS})
{
 +      CAN_UPGRADE.set(i);
 +    }
 +  }
 +  public static final BitSet NEEDS_UPGRADE = new BitSet();
 +  static {
 +    NEEDS_UPGRADE.xor(CAN_UPGRADE);
 +    NEEDS_UPGRADE.clear(DATA_VERSION);
 +  }
  
    private static String[] baseUris = null;
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/05e572b9/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index cc2b469,24b5605..1fd9aa3
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@@ -16,11 -16,13 +16,15 @@@
   */
  package org.apache.accumulo.server.init;
  
+ import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
+ import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN;
+ import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN;
+ 
  import java.io.FileNotFoundException;
  import java.io.IOException;
 +import java.nio.charset.StandardCharsets;
  import java.util.Arrays;
 +import java.util.Collections;
  import java.util.HashMap;
  import java.util.HashSet;
  import java.util.Locale;
@@@ -51,13 -49,10 +55,12 @@@ import org.apache.accumulo.core.master.
  import org.apache.accumulo.core.master.thrift.MasterGoalState;
  import org.apache.accumulo.core.metadata.MetadataTable;
  import org.apache.accumulo.core.metadata.RootTable;
- import org.apache.accumulo.core.metadata.schema.MetadataSchema;
++import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
- import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
- import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 +import org.apache.accumulo.core.replication.ReplicationConstants;
  import org.apache.accumulo.core.security.SecurityUtil;
  import org.apache.accumulo.core.util.CachedConfiguration;
+ import org.apache.accumulo.core.util.ColumnFQ;
  import org.apache.accumulo.core.volume.VolumeConfiguration;
  import org.apache.accumulo.core.zookeeper.ZooUtil;
  import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
@@@ -189,13 -183,16 +192,16 @@@ public class Initialize 
      return true;
    }
  
-   @SuppressWarnings("deprecation")
    static void printInitializeFailureMessages(SiteConfiguration sconf) {
-     String instanceDfsDir = sconf.get(Property.INSTANCE_DFS_DIR);
+     @SuppressWarnings("deprecation")
+     Property INSTANCE_DFS_DIR = Property.INSTANCE_DFS_DIR;
+     @SuppressWarnings("deprecation")
+     Property INSTANCE_DFS_URI = Property.INSTANCE_DFS_URI;
+     String instanceDfsDir = sconf.get(INSTANCE_DFS_DIR);
 -    log.fatal("It appears the directories " + Arrays.asList(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()))
 +    log.fatal("It appears the directories " + Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()))
          + " were previously initialized.");
      String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES);
-     String instanceDfsUri = sconf.get(Property.INSTANCE_DFS_URI);
+     String instanceDfsUri = sconf.get(INSTANCE_DFS_URI);
  
      if (!instanceVolumes.isEmpty()) {
        log.fatal("Change the property " + Property.INSTANCE_VOLUMES + " to use different
filesystems,");
@@@ -232,12 -229,12 +238,12 @@@
  
      UUID uuid = UUID.randomUUID();
      // the actual disk locations of the root table and tablets
 -    String[] configuredVolumes = VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration());
 +    String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
-     final Path rootTablet = new Path(fs.choose(configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR
+ Path.SEPARATOR + RootTable.ID
-         + RootTable.ROOT_TABLET_LOCATION);
+     final String rootTabletDir = new Path(fs.choose(configuredVolumes) + Path.SEPARATOR
+ ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID
+         + RootTable.ROOT_TABLET_LOCATION).toString();
  
      try {
-       initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTablet);
+       initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
      } catch (Exception e) {
        log.fatal("Failed to initialize zookeeper", e);
        return false;
@@@ -304,72 -293,75 +302,75 @@@
      }
    }
  
-   // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
-   @SuppressWarnings("deprecation")
-   private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, Path rootTablet)
throws IOException {
-     FileStatus fstat;
- 
+   private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, String rootTabletDir)
throws IOException {
 -    initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()),
false);
 +    initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()),
false);
  
-     // the actual disk locations of the metadata table and tablets
-     final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
+     // initialize initial metadata config in zookeeper
+     initMetadataConfig();
  
      String tableMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
+ Path.SEPARATOR + MetadataTable.ID
          + TABLE_TABLETS_TABLET_DIR;
      String defaultMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
+ Path.SEPARATOR + MetadataTable.ID
          + Constants.DEFAULT_TABLET_LOCATION;
  
-     // initialize initial metadata config in zookeeper
-     initMetadataConfig();
+     // create table and default tablets directories
+     createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir);
  
-     // create metadata table
-     for (Path mtd : metadataTableDirs) {
-       try {
-         fstat = fs.getFileStatus(mtd);
-         if (!fstat.isDir()) {
-           log.fatal("location " + mtd.toString() + " exists but is not a directory");
-           return;
-         }
-       } catch (FileNotFoundException fnfe) {
-         if (!fs.mkdirs(mtd)) {
-           log.fatal("unable to create directory " + mtd.toString());
-           return;
-         }
-       }
-     }
+     // populate the root tablet with info about the metadata tablets
+     String fileName = rootTabletDir + Path.SEPARATOR + "00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
+     createMetadataFile(fs, fileName, MetadataTable.ID, tableMetadataTabletDir, defaultMetadataTabletDir);
+   }
  
-     // create root table and tablet
-     try {
-       fstat = fs.getFileStatus(rootTablet);
-       if (!fstat.isDir()) {
-         log.fatal("location " + rootTablet.toString() + " exists but is not a directory");
-         return;
-       }
-     } catch (FileNotFoundException fnfe) {
-       if (!fs.mkdirs(rootTablet)) {
-         log.fatal("unable to create directory " + rootTablet.toString());
-         return;
-       }
-     }
+   /**
+    * Create an rfile in the default tablet's directory for a new table. This method is used
to create the initial root tablet contents, with information about
+    * the metadata table's tablets
+    *
+    * @param volmanager
+    *          The VolumeManager
+    * @param fileName
+    *          The location to create the file
+    * @param tableId
+    *          TableID that is being "created"
+    * @param tableTabletDir
+    *          The table_info directory for the new table
+    * @param defaultTabletDir
+    *          The default_tablet directory for the new table
+    */
+   private static void createMetadataFile(VolumeManager volmanager, String fileName, String
tableId, String tableTabletDir, String defaultTabletDir)
+       throws IOException {
+     FileSystem fs = volmanager.getVolumeByPath(new Path(fileName)).getFileSystem();
+     FileSKVWriter tabletWriter = FileOperations.getInstance().openWriter(fileName, fs, fs.getConf(),
AccumuloConfiguration.getDefaultConfiguration());
+     tabletWriter.startDefaultLocalityGroup();
  
-     // populate the root tablet with info about the default tablet
-     // the root tablet contains the key extent and locations of all the
-     // metadata tablets
-     initializeTableData(fs, MetadataTable.ID, rootTablet.toString(), tableMetadataTabletDir,
defaultMetadataTabletDir);
+     Text splitPoint = TabletsSection.getRange().getEndKey().getRow();
+     createEntriesForTablet(tabletWriter, tableId, tableTabletDir, null, splitPoint);
+     createEntriesForTablet(tabletWriter, tableId, defaultTabletDir, splitPoint, null);
  
-     createDirectories(fs, tableMetadataTabletDir, defaultMetadataTabletDir);
+     tabletWriter.close();
    }
  
-   @SuppressWarnings("deprecation")
-   private static void createDirectories(VolumeManager fs, String... directories) throws
IOException {
-     // create table and default tablets directories
-     FileStatus fstat;
-     for (String s : directories) {
+   private static void createEntriesForTablet(FileSKVWriter writer, String tableId, String
tabletDir, Text tabletPrevEndRow, Text tabletEndRow)
+       throws IOException {
+     Text extent = new Text(KeyExtent.getMetadataEntry(new Text(tableId), tabletEndRow));
 -    addEntry(writer, extent, DIRECTORY_COLUMN, new Value(tabletDir.getBytes(Constants.UTF8)));
 -    addEntry(writer, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));
++    addEntry(writer, extent, DIRECTORY_COLUMN, new Value(tabletDir.getBytes(StandardCharsets.UTF_8)));
++    addEntry(writer, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(StandardCharsets.UTF_8)));
+     addEntry(writer, extent, PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(tabletPrevEndRow));
+   }
+ 
+   private static void addEntry(FileSKVWriter writer, Text row, ColumnFQ col, Value value)
throws IOException {
+     writer.append(new Key(row, col.getColumnFamily(), col.getColumnQualifier(), 0), value);
+   }
+ 
+   private static void createDirectories(VolumeManager fs, String... dirs) throws IOException
{
+     for (String s : dirs) {
        Path dir = new Path(s);
        try {
-         fstat = fs.getFileStatus(dir);
-         if (!fstat.isDir()) {
-           log.fatal("location " + dir.toString() + " exists but is not a directory");
+         FileStatus fstat = fs.getFileStatus(dir);
+         // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
+         @SuppressWarnings("deprecation")
+         boolean isDirectory = fstat.isDir();
+         if (!isDirectory) {
+           log.fatal("location " + dir + " exists but is not a directory");
            return;
          }
        } catch (FileNotFoundException fnfe) {
@@@ -475,11 -399,11 +408,12 @@@
      zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, EMPTY_BYTE_ARRAY,
NodeExistsPolicy.FAIL);
-     zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTablet.toString().getBytes(),
NodeExistsPolicy.FAIL);
 -    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTabletDir.getBytes(Constants.UTF8),
NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_PATH, rootTabletDir.getBytes(StandardCharsets.UTF_8),
NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
-     zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(StandardCharsets.UTF_8),
NodeExistsPolicy.FAIL);
 -    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(Constants.UTF8),
NodeExistsPolicy.FAIL);
++    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(StandardCharsets.UTF_8),
++        NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
      zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
@@@ -570,28 -492,6 +504,22 @@@
        log.fatal("error talking to zookeeper", e);
        throw new IOException(e);
      }
-   }
- 
-   protected static void initMetadataConfig() throws IOException {
-     initMetadataConfig(RootTable.ID);
-     initMetadataConfig(MetadataTable.ID);
- 
 +    // ACCUMULO-3077 Set the combiner on accumulo.metadata during init to reduce the likelihood
of a race
 +    // condition where a tserver compacts away Status updates because it didn't see the
Combiner configured
 +    IteratorSetting setting = new IteratorSetting(9, ReplicationTableUtil.COMBINER_NAME,
StatusCombiner.class);
-     Combiner.setColumns(setting, Collections.singletonList(new Column(MetadataSchema.ReplicationSection.COLF)));
++    Combiner.setColumns(setting, Collections.singletonList(new Column(ReplicationSection.COLF)));
 +    try {
 +      for (IteratorScope scope : IteratorScope.values()) {
 +        String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(),
setting.getName());
 +        for (Entry<String,String> prop : setting.getOptions().entrySet()) {
 +          TablePropUtil.setTableProperty(MetadataTable.ID, root + ".opt." + prop.getKey(),
prop.getValue());
 +        }
 +        TablePropUtil.setTableProperty(MetadataTable.ID, root, setting.getPriority() + ","
+ setting.getIteratorClass());
 +      }
 +    } catch (Exception e) {
 +      log.fatal("Error talking to ZooKeeper", e);
 +      throw new IOException(e);
 +    }
    }
  
    private static void setMetadataReplication(int replication, String reason) throws IOException
{
@@@ -617,11 -517,10 +545,10 @@@
  
    private static void addVolumes(VolumeManager fs) throws IOException {
      HashSet<String> initializedDirs = new HashSet<String>();
-     initializedDirs
-         .addAll(Arrays.asList(ServerConstants.checkBaseUris(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()),
true)));
 -    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration()),
true)));
++    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()),
true)));
  
      HashSet<String> uinitializedDirs = new HashSet<String>();
 -    uinitializedDirs.addAll(Arrays.asList(VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration())));
 +    uinitializedDirs.addAll(Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
      uinitializedDirs.removeAll(initializedDirs);
  
      Path aBasePath = new Path(initializedDirs.iterator().next());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/05e572b9/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index 4c5326e,dd540f2..9f298af
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@@ -16,13 -16,12 +16,12 @@@
   */
  package org.apache.accumulo.server.util;
  
++import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
++
  import java.io.IOException;
 +import java.nio.charset.StandardCharsets;
  import java.util.Map.Entry;
  
--import org.apache.accumulo.core.Constants;
--
--import org.apache.accumulo.server.security.SystemCredentials;
  import org.apache.accumulo.core.cli.ClientOnRequiredTable;
  import org.apache.accumulo.core.client.AccumuloException;
  import org.apache.accumulo.core.client.AccumuloSecurityException;
@@@ -40,10 -39,10 +39,10 @@@ import org.apache.accumulo.core.securit
  import org.apache.accumulo.server.ServerConstants;
  import org.apache.accumulo.server.fs.VolumeManager;
  import org.apache.accumulo.server.fs.VolumeManagerImpl;
++import org.apache.accumulo.server.security.SystemCredentials;
  import org.apache.accumulo.server.tables.TableManager;
  import org.apache.hadoop.fs.Path;
  import org.apache.log4j.Logger;
--import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
  
  public class RandomizeVolumes {
    private static final Logger log = Logger.getLogger(RandomizeVolumes.class);
@@@ -106,9 -105,9 +105,10 @@@
        }
        Key key = entry.getKey();
        Mutation m = new Mutation(key.getRow());
--      
--      String newLocation = vm.choose(ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR
+ Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
 -      m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(Constants.UTF8)));
++
++      String newLocation = vm.choose(ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR
+ Path.SEPARATOR + tableId + Path.SEPARATOR
++          + directory;
 +      m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(StandardCharsets.UTF_8)));
        if (log.isTraceEnabled()) {
          log.trace("Replacing " + oldLocation + " with " + newLocation);
        }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/05e572b9/server/master/src/main/java/org/apache/accumulo/master/Master.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/Master.java
index 0617d87,9537f4e..e5f3955
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@@ -300,53 -289,20 +301,53 @@@ public class Master implements LiveTSer
          IZooReaderWriter zoo = ZooReaderWriter.getInstance();
          final String zooRoot = ZooUtil.getRoot(instance);
  
 -        if (accumuloPersistentVersion == ServerConstants.TWO_DATA_VERSIONS_AGO) {
 -          log.debug("Handling updates for version " + ServerConstants.TWO_DATA_VERSIONS_AGO);
 +        log.debug("Handling updates for version " + accumuloPersistentVersion);
  
 -          log.debug("Cleaning out remnants of logger role.");
 -          zoo.recursiveDelete(zooRoot + "/loggers", NodeMissingPolicy.SKIP);
 -          zoo.recursiveDelete(zooRoot + "/dead/loggers", NodeMissingPolicy.SKIP);
 +        log.debug("Cleaning out remnants of logger role.");
 +        zoo.recursiveDelete(zooRoot + "/loggers", NodeMissingPolicy.SKIP);
 +        zoo.recursiveDelete(zooRoot + "/dead/loggers", NodeMissingPolicy.SKIP);
  
 -          final byte[] zero = new byte[] {'0'};
 -          log.debug("Initializing recovery area.");
 -          zoo.putPersistentData(zooRoot + Constants.ZRECOVERY, zero, NodeExistsPolicy.SKIP);
 +        final byte[] zero = new byte[] {'0'};
 +        log.debug("Initializing recovery area.");
 +        zoo.putPersistentData(zooRoot + Constants.ZRECOVERY, zero, NodeExistsPolicy.SKIP);
  
 -          for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
 -            log.debug("Prepping table " + id + " for compaction cancellations.");
 -            zoo.putPersistentData(zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_COMPACT_CANCEL_ID,
zero, NodeExistsPolicy.SKIP);
 +        for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
 +          log.debug("Prepping table " + id + " for compaction cancellations.");
 +          zoo.putPersistentData(zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_COMPACT_CANCEL_ID,
zero, NodeExistsPolicy.SKIP);
 +        }
 +
 +        @SuppressWarnings("deprecation")
 +        String zpath = zooRoot + Constants.ZCONFIG + "/" + Property.TSERV_WAL_SYNC_METHOD.getKey();
 +        // is the entire instance set to use flushing vs sync?
 +        boolean flushDefault = false;
 +        try {
 +          byte data[] = zoo.getData(zpath, null);
 +          if (new String(data, StandardCharsets.UTF_8).endsWith("flush")) {
 +            flushDefault = true;
 +          }
 +        } catch (KeeperException.NoNodeException ex) {
 +          // skip
-         } 
++        }
 +        for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
 +          log.debug("Converting table " + id + " WALog setting to Durability");
 +          try {
 +            @SuppressWarnings("deprecation")
 +            String path = zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_CONF
+ "/" + Property.TABLE_WALOG_ENABLED.getKey();
 +            byte[] data = zoo.getData(path, null);
 +            boolean useWAL = Boolean.parseBoolean(new String(data, StandardCharsets.UTF_8));
 +            zoo.recursiveDelete(path, NodeMissingPolicy.FAIL);
 +            path = zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_CONF + "/"
+ Property.TABLE_DURABILITY.getKey();
 +            if (useWAL) {
 +              if (flushDefault) {
 +                zoo.putPersistentData(path, "flush".getBytes(), NodeExistsPolicy.SKIP);
 +              } else {
 +                zoo.putPersistentData(path, "sync".getBytes(), NodeExistsPolicy.SKIP);
 +              }
 +            } else {
 +              zoo.putPersistentData(path, "none".getBytes(), NodeExistsPolicy.SKIP);
 +            }
 +          } catch (KeeperException.NoNodeException ex) {
 +            // skip it
            }
          }
  
@@@ -383,8 -340,8 +385,8 @@@
  
          // rename metadata table
          log.debug("Upgrade renaming table " + MetadataTable.OLD_NAME + " (ID: " + MetadataTable.ID
+ ") to " + MetadataTable.NAME);
-         zoo.putPersistentData(tables + "/" + MetadataTable.ID + Constants.ZTABLE_NAME, Tables.qualify(MetadataTable.NAME).getSecond().getBytes(StandardCharsets.UTF_8),
-             NodeExistsPolicy.OVERWRITE);
 -        zoo.putPersistentData(tables + "/" + MetadataTable.ID + Constants.ZTABLE_NAME, Tables.qualify(MetadataTable.NAME).getSecond().getBytes(Constants.UTF8),
 -            NodeExistsPolicy.OVERWRITE);
++        zoo.putPersistentData(tables + "/" + MetadataTable.ID + Constants.ZTABLE_NAME,
++            Tables.qualify(MetadataTable.NAME).getSecond().getBytes(StandardCharsets.UTF_8),
NodeExistsPolicy.OVERWRITE);
  
          moveRootTabletToRootTable(zoo);
  
@@@ -418,26 -375,24 +420,29 @@@
          // sanity check that we passed the Fate verification prior to ZooKeeper upgrade,
and that Fate still hasn't been started.
          // Change both to use Guava's Verify once we use Guava 17.
          if (!haveUpgradedZooKeeper) {
-           throw new IllegalStateException("We should only attempt to upgrade Accumulo's
metadata table if we've already upgraded ZooKeeper. Please save all logs and file a bug.");
+           throw new IllegalStateException(
+               "We should only attempt to upgrade Accumulo's metadata table if we've already
upgraded ZooKeeper. Please save all logs and file a bug.");
          }
          if (null != fate) {
-           throw new IllegalStateException("Access to Fate should not have been initialized
prior to the Master finishing upgrades. Please save all logs and file a bug.");
+           throw new IllegalStateException(
+               "Access to Fate should not have been initialized prior to the Master finishing
upgrades. Please save all logs and file a bug.");
          }
          Runnable upgradeTask = new Runnable() {
 +          int version = accumuloPersistentVersion;
++
            @Override
            public void run() {
              try {
                log.info("Starting to upgrade metadata table.");
 -              if (accumuloPersistentVersion == ServerConstants.TWO_DATA_VERSIONS_AGO) {
 +              if (version == ServerConstants.MOVE_DELETE_MARKERS - 1) {
                  log.info("Updating Delete Markers in metadata table for version 1.4");
                  MetadataTableUtil.moveMetaDeleteMarkersFrom14(instance, SystemCredentials.get());
 -              } else {
 +                version++;
 +              }
-               if (version == ServerConstants.MOVE_TO_ROOT_TABLE - 1){
++              if (version == ServerConstants.MOVE_TO_ROOT_TABLE - 1) {
                  log.info("Updating Delete Markers in metadata table.");
                  MetadataTableUtil.moveMetaDeleteMarkers(instance, SystemCredentials.get());
 +                version++;
                }
                log.info("Updating persistent data version.");
                Accumulo.updateAccumuloVersion(fs, accumuloPersistentVersion);
@@@ -1072,11 -1017,9 +1074,11 @@@
        throw new IOException(e);
      }
  
 +    ZooKeeperInitialization.ensureZooKeeperInitialized(zReaderWriter, zroot);
 +
      Processor<Iface> processor = new Processor<Iface>(RpcWrapper.service(new
MasterClientServiceHandler(this)));
-     ServerAddress sa = TServerUtils.startServer(getConfiguration(), hostname, Property.MASTER_CLIENTPORT,
processor, "Master",
-         "Master Client Service Handler", null, Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK,
Property.GENERAL_MAX_MESSAGE_SIZE);
 -    ServerAddress sa = TServerUtils.startServer(getSystemConfiguration(), hostname, Property.MASTER_CLIENTPORT,
processor, "Master",
 -        "Master Client Service Handler", null, Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK,
Property.GENERAL_MAX_MESSAGE_SIZE);
++    ServerAddress sa = TServerUtils.startServer(getConfiguration(), hostname, Property.MASTER_CLIENTPORT,
processor, "Master", "Master Client Service Handler",
++        null, Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
      clientService = sa.server;
      String address = sa.address.toString();
      log.info("Setting master lock data to " + address);


Mime
View raw message