accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [31/53] [abbrv] ACCUMULO-658 consistent package names to avoid overlapped sealed jars
Date Fri, 06 Sep 2013 18:22:59 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
deleted file mode 100644
index 83a8a41..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.tserver.logger.LogEvents;
-import org.apache.accumulo.tserver.logger.LogFileKey;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * A map reduce job that takes a set of walogs and filters out all non metadata table events.
- */
-public class FilterMeta extends Configured implements Tool {
-  
-  public static class FilterMapper extends Mapper<LogFileKey,LogFileValue,LogFileKey,LogFileValue> {
-    private Set<Integer> tabletIds;
-    
-    @Override
-    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
-      tabletIds = new HashSet<Integer>();
-    }
-    
-    @Override
-    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
-      if (key.event == LogEvents.OPEN) {
-        context.write(key, value);
-      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
-        tabletIds.add(key.tid);
-        context.write(key, value);
-      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.contains(key.tid)) {
-        context.write(key, value);
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    
-    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
-    
-    Job job = new Job(getConf(), jobName);
-    job.setJarByClass(this.getClass());
-    
-    Path paths[] = new Path[args.length - 1];
-    for (int i = 0; i < paths.length; i++) {
-      paths[i] = new Path(args[i]);
-    }
-
-    job.setInputFormatClass(LogFileInputFormat.class);
-    LogFileInputFormat.setInputPaths(job, paths);
-    
-    job.setOutputFormatClass(LogFileOutputFormat.class);
-    LogFileOutputFormat.setOutputPath(job, new Path(args[args.length - 1]));
-
-    job.setMapperClass(FilterMapper.class);
-    
-    job.setNumReduceTasks(0);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new FilterMeta(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
deleted file mode 100644
index f0a8268..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Finds tablet creation events.
- */
-public class FindTablet {
-  
-  static public class Opts extends ClientOpts {
-    @Parameter(names = {"-r", "--row"}, required = true, description = "find tablets that contain this row")
-    String row = null;
-    
-    @Parameter(names = "--tableId", required = true, description = "table id")
-    String tableId = null;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(FindTablet.class.getName(), args);
-    
-    findContainingTablets(opts);
-  }
-  
-  private static void findContainingTablets(Opts opts) throws Exception {
-    Range range = new KeyExtent(new Text(opts.tableId), null, null).toMetadataRange();
-    
-    Scanner scanner = opts.getConnector().createScanner("createEvents", opts.auths);
-    scanner.setRange(range);
-    
-    Text row = new Text(opts.row);
-    for (Entry<Key,Value> entry : scanner) {
-      KeyExtent ke = new KeyExtent(entry.getKey().getRow(), new Value(TextUtil.getBytes(entry.getKey().getColumnFamily())));
-      if (ke.contains(row)) {
-        System.out.println(entry.getKey().getColumnQualifier() + " " + ke + " " + entry.getValue());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
deleted file mode 100644
index e42731a..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.tserver.logger.LogEvents;
-import org.apache.accumulo.tserver.logger.LogFileKey;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Logger;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A map reduce job that takes write ahead logs containing mutations for the metadata table and indexes them into Accumulo tables for analysis.
- * 
- */
-
-public class IndexMeta extends Configured implements Tool {
-  
-  public static class IndexMapper extends Mapper<LogFileKey,LogFileValue,Text,Mutation> {
-    private static final Text CREATE_EVENTS_TABLE = new Text("createEvents");
-    private static final Text TABLET_EVENTS_TABLE = new Text("tabletEvents");
-    private Map<Integer,KeyExtent> tabletIds = new HashMap<Integer,KeyExtent>();
-    private String uuid = null;
-    
-    @Override
-    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
-      tabletIds = new HashMap<Integer,KeyExtent>();
-      uuid = null;
-    }
-    
-    @Override
-    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
-      if (key.event == LogEvents.OPEN) {
-        uuid = key.tserverSession;
-      } else if (key.event == LogEvents.DEFINE_TABLET) {
-        if (key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
-          tabletIds.put(key.tid, new KeyExtent(key.tablet));
-        }
-      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.containsKey(key.tid)) {
-        for (Mutation m : value.mutations) {
-          index(context, m, uuid, tabletIds.get(key.tid));
-        }
-      }
-    }
-    
-    void index(Context context, Mutation m, String logFile, KeyExtent metaTablet) throws IOException, InterruptedException {
-      List<ColumnUpdate> columnsUpdates = m.getUpdates();
-      
-      Text prevRow = null;
-      long timestamp = 0;
-      
-      if (m.getRow().length > 0 && m.getRow()[0] == '~') {
-        return;
-      }
-      
-      for (ColumnUpdate cu : columnsUpdates) {
-        if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
-          prevRow = new Text(cu.getValue());
-        }
-        
-        timestamp = cu.getTimestamp();
-      }
-      
-      byte[] serMut = WritableUtils.toByteArray(m);
-      
-      if (prevRow != null) {
-        Mutation createEvent = new Mutation(new Text(m.getRow()));
-        createEvent.put(prevRow, new Text(String.format("%020d", timestamp)), new Value(metaTablet.toString().getBytes()));
-        context.write(CREATE_EVENTS_TABLE, createEvent);
-      }
-      
-      Mutation tabletEvent = new Mutation(new Text(m.getRow()));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mut"), new Value(serMut));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mtab"), new Value(metaTablet.toString().getBytes()));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("log"), new Value(logFile.getBytes()));
-      context.write(TABLET_EVENTS_TABLE, tabletEvent);
-    }
-  }
-  
-  static class Opts extends ClientOpts {
-    @Parameter(description = "<logfile> { <logfile> ...}")
-    List<String> logFiles = new ArrayList<String>();
-  }
-  
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(IndexMeta.class.getName(), args);
-    
-    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
-    
-    Job job = new Job(getConf(), jobName);
-    job.setJarByClass(this.getClass());
-    
-    List<String> logFiles = Arrays.asList(args).subList(4, args.length);
-    Path paths[] = new Path[logFiles.size()];
-    int count = 0;
-    for (String logFile : logFiles) {
-      paths[count++] = new Path(logFile);
-    }
-    
-    job.setInputFormatClass(LogFileInputFormat.class);
-    LogFileInputFormat.setInputPaths(job, paths);
-    
-    job.setNumReduceTasks(0);
-    
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job, opts.instance, opts.zookeepers);
-    AccumuloOutputFormat.setConnectorInfo(job, opts.principal, opts.getToken());
-    AccumuloOutputFormat.setCreateTables(job, false);
-    
-    job.setMapperClass(IndexMapper.class);
-    
-    Connector conn = opts.getConnector();
-    
-    try {
-      conn.tableOperations().create("createEvents");
-    } catch (TableExistsException tee) {
-      Logger.getLogger(IndexMeta.class).warn("Table createEvents exists");
-    }
-    
-    try {
-      conn.tableOperations().create("tabletEvents");
-    } catch (TableExistsException tee) {
-      Logger.getLogger(IndexMeta.class).warn("Table tabletEvents exists");
-    }
-    
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new IndexMeta(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileInputFormat.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileInputFormat.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileInputFormat.java
deleted file mode 100644
index 0b206ba..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileInputFormat.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.EOFException;
-import java.io.IOException;
-
-import org.apache.accumulo.tserver.logger.LogFileKey;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-
-/**
- * Input format for Accumulo write ahead logs
- */
-public class LogFileInputFormat extends FileInputFormat<LogFileKey,LogFileValue> {
-  
-  private static class LogFileRecordReader extends RecordReader<LogFileKey,LogFileValue> {
-    
-    private FSDataInputStream fsdis;
-    private LogFileKey key;
-    private LogFileValue value;
-    private long length;
-    
-    @Override
-    public void close() throws IOException {
-      fsdis.close();
-    }
-    
-    @Override
-    public LogFileKey getCurrentKey() throws IOException, InterruptedException {
-      return key;
-    }
-    
-    @Override
-    public LogFileValue getCurrentValue() throws IOException, InterruptedException {
-      return value;
-    }
-    
-    @Override
-    public float getProgress() throws IOException, InterruptedException {
-      float progress = (length - fsdis.getPos()) / (float) length;
-      if (progress < 0)
-        return 0;
-      return progress;
-    }
-    
-    @Override
-    public void initialize(InputSplit is, TaskAttemptContext context) throws IOException, InterruptedException {
-      FileSplit fileSplit = (FileSplit) is;
-      
-      Configuration conf = new Configuration();
-      FileSystem fs = FileSystem.get(conf);
-      
-      key = new LogFileKey();
-      value = new LogFileValue();
-      
-      fsdis = fs.open(fileSplit.getPath());
-      FileStatus status = fs.getFileStatus(fileSplit.getPath());
-      length = status.getLen();
-    }
-
-    @Override
-    public boolean nextKeyValue() throws IOException, InterruptedException {
-      if (key == null)
-        return false;
-      
-      try {
-        key.readFields(fsdis);
-        value.readFields(fsdis);
-        return true;
-      } catch (EOFException ex) {
-        key = null;
-        value = null;
-        return false;
-      }
-    }
-    
-  }
-
-  
-  @Override
-  public RecordReader<LogFileKey,LogFileValue> createRecordReader(InputSplit arg0, TaskAttemptContext arg1) throws IOException, InterruptedException {
-    return new LogFileRecordReader();
-  }
-  
-  @Override
-  protected boolean isSplitable(JobContext context, Path filename) {
-    return false;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileOutputFormat.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileOutputFormat.java
deleted file mode 100644
index f8dcc9e..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/LogFileOutputFormat.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.IOException;
-
-import org.apache.accumulo.tserver.logger.LogFileKey;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-
-/**
- * Output format for Accumulo write ahead logs.
- */
-public class LogFileOutputFormat extends FileOutputFormat<LogFileKey,LogFileValue> {
-  
-  private static class LogFileRecordWriter extends RecordWriter<LogFileKey,LogFileValue> {
-    
-    private FSDataOutputStream out;
-    
-    public LogFileRecordWriter(Path outputPath) throws IOException {
-      Configuration conf = new Configuration();
-      FileSystem fs = FileSystem.get(conf);
-      
-      out = fs.create(outputPath);
-    }
-    
-    @Override
-    public void close(TaskAttemptContext arg0) throws IOException, InterruptedException {
-      out.close();
-    }
-    
-    @Override
-    public void write(LogFileKey key, LogFileValue val) throws IOException, InterruptedException {
-      key.write(out);
-      val.write(out);
-    }
-    
-  }
-  
-  @Override
-  public RecordWriter<LogFileKey,LogFileValue> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
-    Path outputPath = getDefaultWorkFile(context, "");
-    return new LogFileRecordWriter(outputPath);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
deleted file mode 100644
index 499b6bd..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Looks up and prints mutations indexed by IndexMeta
- */
-public class PrintEvents {
-  
-  static class Opts extends ClientOpts {
-    @Parameter(names = {"-t", "--tableId"}, description = "table id", required = true)
-    String tableId;
-    @Parameter(names = {"-e", "--endRow"}, description = "end row")
-    String endRow;
-    @Parameter(names = {"-t", "--time"}, description = "time, in milliseconds", required = true)
-    long time;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(PrintEvents.class.getName(), args);
-    
-    Connector conn = opts.getConnector();
-    
-    printEvents(conn, opts.tableId, opts.endRow, opts.time);
-  }
-  
-  private static void printEvents(Connector conn, String tableId, String endRow, Long time) throws Exception {
-    Scanner scanner = conn.createScanner("tabletEvents", new Authorizations());
-    String metaRow = tableId + (endRow == null ? "<" : ";" + endRow);
-    scanner.setRange(new Range(new Key(metaRow, String.format("%020d", time)), true, new Key(metaRow).followingKey(PartialKey.ROW), false));
-    int count = 0;
-    
-    String lastLog = null;
-    
-    loop1: for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnQualifier().toString().equals("log")) {
-        if (lastLog == null || !lastLog.equals(entry.getValue().toString()))
-          System.out.println("Log : " + entry.getValue());
-        lastLog = entry.getValue().toString();
-      } else if (entry.getKey().getColumnQualifier().toString().equals("mut")) {
-        DataInputStream dis = new DataInputStream(new ByteArrayInputStream(entry.getValue().get()));
-        Mutation m = new Mutation();
-        m.readFields(dis);
-        
-        LogFileValue lfv = new LogFileValue();
-        lfv.mutations = Collections.singletonList(m);
-        
-        System.out.println(LogFileValue.format(lfv, 1));
-        
-        List<ColumnUpdate> columnsUpdates = m.getUpdates();
-        for (ColumnUpdate cu : columnsUpdates) {
-          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
-            System.out.println("Saw change to prevrow, stopping printing events.");
-            break loop1;
-          }
-        }
-        count++;
-      }
-    }
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java b/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
deleted file mode 100644
index 4d404ed..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Provides programs to analyze metadata mutations written to write ahead logs.  
- * 
- * <p>
- * These programs can be used when write ahead logs are archived.   The best way to find
- * which write ahead logs contain metadata mutations is to grep the tablet server logs.  
- * Grep for events where walogs were added to metadata tablets, then take the unique set 
- * of walogs.
- *
- * <p>
- * To use these programs, use IndexMeta to index the metadata mutations in walogs into 
- * Accumulo tables.  Then use FindTable and PrintEvents to analyze those indexes.  
- * FilterMetaiallows filtering walogs down to just metadata events.  This is useful for the
- * case where the walogs need to be exported from the cluster for analysis.
- *
- * @since 1.5
- */
-package org.apache.accumulo.server.metanalysis;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/utils/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
deleted file mode 100644
index d7837b8..0000000
--- a/server/utils/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.master.LiveTServerSet;
-import org.apache.accumulo.master.LiveTServerSet.Listener;
-import org.apache.accumulo.master.state.DistributedStoreException;
-import org.apache.accumulo.master.state.MetaDataTableScanner;
-import org.apache.accumulo.master.state.TServerInstance;
-import org.apache.accumulo.master.state.TabletLocationState;
-import org.apache.accumulo.master.state.TabletState;
-import org.apache.accumulo.master.state.ZooTabletStateStore;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-public class FindOfflineTablets {
-  private static final Logger log = Logger.getLogger(FindOfflineTablets.class);
-
-  /**
-   * @param args
-   */
-  public static void main(String[] args) throws Exception {
-    ClientOpts opts = new ClientOpts();
-    opts.parseArgs(FindOfflineTablets.class.getName(), args);
-    Instance instance = opts.getInstance();
-    SystemCredentials creds = SystemCredentials.get();
-
-    findOffline(instance, creds, null);
-  }
-
-  static int findOffline(Instance instance, Credentials creds, String tableName) throws AccumuloException, TableNotFoundException {
-
-    final AtomicBoolean scanning = new AtomicBoolean(false);
-
-    LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() {
-      @Override
-      public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
-        if (!deleted.isEmpty() && scanning.get())
-          log.warn("Tablet servers deleted while scanning: " + deleted);
-        if (!added.isEmpty() && scanning.get())
-          log.warn("Tablet servers added while scanning: " + added);
-      }
-    });
-    tservers.startListeningForTabletServerChanges();
-    scanning.set(true);
-
-    Iterator<TabletLocationState> zooScanner;
-    try {
-      zooScanner = new ZooTabletStateStore().iterator();
-    } catch (DistributedStoreException e) {
-      throw new AccumuloException(e);
-    }
-
-    int offline = 0;
-
-    System.out.println("Scanning zookeeper");
-    if ((offline = checkTablets(zooScanner, tservers)) > 0)
-      return offline;
-
-    if (RootTable.NAME.equals(tableName))
-      return 0;
-
-    System.out.println("Scanning " + RootTable.NAME);
-    Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(instance, creds, MetadataSchema.TabletsSection.getRange(), RootTable.NAME);
-    if ((offline = checkTablets(rootScanner, tservers)) > 0)
-      return offline;
-
-    if (MetadataTable.NAME.equals(tableName))
-      return 0;
-
-    System.out.println("Scanning " + MetadataTable.NAME);
-
-    Range range = MetadataSchema.TabletsSection.getRange();
-    if (tableName != null) {
-      String tableId = Tables.getTableId(instance, tableName);
-      range = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    }
-
-    Iterator<TabletLocationState> metaScanner = new MetaDataTableScanner(instance, creds, range, MetadataTable.NAME);
-    return checkTablets(metaScanner, tservers);
-  }
-
-  private static int checkTablets(Iterator<TabletLocationState> scanner, LiveTServerSet tservers) {
-    int offline = 0;
-
-    while (scanner.hasNext()) {
-      TabletLocationState locationState = scanner.next();
-      TabletState state = locationState.getState(tservers.getCurrentServers());
-      if (state != null && state != TabletState.HOSTED
-          && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE) {
-        System.out.println(locationState + " is " + state + "  #walogs:" + locationState.walogs.size());
-        offline++;
-      }
-    }
-
-    return offline;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FilterMeta.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FilterMeta.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FilterMeta.java
new file mode 100644
index 0000000..973de1a
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FilterMeta.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.utils.metanalysis;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.tserver.logger.LogEvents;
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * A map reduce job that takes a set of walogs and filters out all non metadata table events.
+ */
+public class FilterMeta extends Configured implements Tool {
+  
+  public static class FilterMapper extends Mapper<LogFileKey,LogFileValue,LogFileKey,LogFileValue> {
+    private Set<Integer> tabletIds;
+    
+    @Override
+    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
+      tabletIds = new HashSet<Integer>();
+    }
+    
+    @Override
+    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
+      if (key.event == LogEvents.OPEN) {
+        context.write(key, value);
+      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
+        tabletIds.add(key.tid);
+        context.write(key, value);
+      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.contains(key.tid)) {
+        context.write(key, value);
+      }
+    }
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    
+    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
+    
+    Job job = new Job(getConf(), jobName);
+    job.setJarByClass(this.getClass());
+    
+    Path paths[] = new Path[args.length - 1];
+    for (int i = 0; i < paths.length; i++) {
+      paths[i] = new Path(args[i]);
+    }
+
+    job.setInputFormatClass(LogFileInputFormat.class);
+    LogFileInputFormat.setInputPaths(job, paths);
+    
+    job.setOutputFormatClass(LogFileOutputFormat.class);
+    LogFileOutputFormat.setOutputPath(job, new Path(args[args.length - 1]));
+
+    job.setMapperClass(FilterMapper.class);
+    
+    job.setNumReduceTasks(0);
+
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(CachedConfiguration.getInstance(), new FilterMeta(), args);
+    System.exit(res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FindTablet.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FindTablet.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FindTablet.java
new file mode 100644
index 0000000..773c41a
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/FindTablet.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.utils.metanalysis;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.TextUtil;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.hadoop.io.Text;
+
+import com.beust.jcommander.Parameter;
+
+/**
+ * Finds tablet creation events.
+ */
+public class FindTablet {
+  
+  static public class Opts extends ClientOpts {
+    @Parameter(names = {"-r", "--row"}, required = true, description = "find tablets that contain this row")
+    String row = null;
+    
+    @Parameter(names = "--tableId", required = true, description = "table id")
+    String tableId = null;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(FindTablet.class.getName(), args);
+    
+    findContainingTablets(opts);
+  }
+  
+  private static void findContainingTablets(Opts opts) throws Exception {
+    Range range = new KeyExtent(new Text(opts.tableId), null, null).toMetadataRange();
+    
+    Scanner scanner = opts.getConnector().createScanner("createEvents", opts.auths);
+    scanner.setRange(range);
+    
+    Text row = new Text(opts.row);
+    for (Entry<Key,Value> entry : scanner) {
+      KeyExtent ke = new KeyExtent(entry.getKey().getRow(), new Value(TextUtil.getBytes(entry.getKey().getColumnFamily())));
+      if (ke.contains(row)) {
+        System.out.println(entry.getKey().getColumnQualifier() + " " + ke + " " + entry.getValue());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/IndexMeta.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/IndexMeta.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/IndexMeta.java
new file mode 100644
index 0000000..6a84142
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/IndexMeta.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.utils.metanalysis;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.tserver.logger.LogEvents;
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Logger;
+
+import com.beust.jcommander.Parameter;
+
+/**
+ * A map reduce job that takes write ahead logs containing mutations for the metadata table and indexes them into Accumulo tables for analysis.
+ * 
+ */
+
+public class IndexMeta extends Configured implements Tool {
+  
+  public static class IndexMapper extends Mapper<LogFileKey,LogFileValue,Text,Mutation> {
+    private static final Text CREATE_EVENTS_TABLE = new Text("createEvents");
+    private static final Text TABLET_EVENTS_TABLE = new Text("tabletEvents");
+    private Map<Integer,KeyExtent> tabletIds = new HashMap<Integer,KeyExtent>();
+    private String uuid = null;
+    
+    @Override
+    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
+      tabletIds = new HashMap<Integer,KeyExtent>();
+      uuid = null;
+    }
+    
+    @Override
+    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
+      if (key.event == LogEvents.OPEN) {
+        uuid = key.tserverSession;
+      } else if (key.event == LogEvents.DEFINE_TABLET) {
+        if (key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
+          tabletIds.put(key.tid, new KeyExtent(key.tablet));
+        }
+      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.containsKey(key.tid)) {
+        for (Mutation m : value.mutations) {
+          index(context, m, uuid, tabletIds.get(key.tid));
+        }
+      }
+    }
+    
+    void index(Context context, Mutation m, String logFile, KeyExtent metaTablet) throws IOException, InterruptedException {
+      List<ColumnUpdate> columnsUpdates = m.getUpdates();
+      
+      Text prevRow = null;
+      long timestamp = 0;
+      
+      if (m.getRow().length > 0 && m.getRow()[0] == '~') {
+        return;
+      }
+      
+      for (ColumnUpdate cu : columnsUpdates) {
+        if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
+          prevRow = new Text(cu.getValue());
+        }
+        
+        timestamp = cu.getTimestamp();
+      }
+      
+      byte[] serMut = WritableUtils.toByteArray(m);
+      
+      if (prevRow != null) {
+        Mutation createEvent = new Mutation(new Text(m.getRow()));
+        createEvent.put(prevRow, new Text(String.format("%020d", timestamp)), new Value(metaTablet.toString().getBytes()));
+        context.write(CREATE_EVENTS_TABLE, createEvent);
+      }
+      
+      Mutation tabletEvent = new Mutation(new Text(m.getRow()));
+      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mut"), new Value(serMut));
+      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mtab"), new Value(metaTablet.toString().getBytes()));
+      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("log"), new Value(logFile.getBytes()));
+      context.write(TABLET_EVENTS_TABLE, tabletEvent);
+    }
+  }
+  
+  static class Opts extends ClientOpts {
+    @Parameter(description = "<logfile> { <logfile> ...}")
+    List<String> logFiles = new ArrayList<String>();
+  }
+  
+  @Override
+  public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(IndexMeta.class.getName(), args);
+    
+    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
+    
+    Job job = new Job(getConf(), jobName);
+    job.setJarByClass(this.getClass());
+    
+    List<String> logFiles = Arrays.asList(args).subList(4, args.length);
+    Path paths[] = new Path[logFiles.size()];
+    int count = 0;
+    for (String logFile : logFiles) {
+      paths[count++] = new Path(logFile);
+    }
+    
+    job.setInputFormatClass(LogFileInputFormat.class);
+    LogFileInputFormat.setInputPaths(job, paths);
+    
+    job.setNumReduceTasks(0);
+    
+    job.setOutputFormatClass(AccumuloOutputFormat.class);
+    AccumuloOutputFormat.setZooKeeperInstance(job, opts.instance, opts.zookeepers);
+    AccumuloOutputFormat.setConnectorInfo(job, opts.principal, opts.getToken());
+    AccumuloOutputFormat.setCreateTables(job, false);
+    
+    job.setMapperClass(IndexMapper.class);
+    
+    Connector conn = opts.getConnector();
+    
+    try {
+      conn.tableOperations().create("createEvents");
+    } catch (TableExistsException tee) {
+      Logger.getLogger(IndexMeta.class).warn("Table createEvents exists");
+    }
+    
+    try {
+      conn.tableOperations().create("tabletEvents");
+    } catch (TableExistsException tee) {
+      Logger.getLogger(IndexMeta.class).warn("Table tabletEvents exists");
+    }
+    
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(CachedConfiguration.getInstance(), new IndexMeta(), args);
+    System.exit(res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileInputFormat.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileInputFormat.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileInputFormat.java
new file mode 100644
index 0000000..603bf71
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileInputFormat.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.utils.metanalysis;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+
+/**
+ * Input format for Accumulo write ahead logs
+ */
+public class LogFileInputFormat extends FileInputFormat<LogFileKey,LogFileValue> {
+  
+  private static class LogFileRecordReader extends RecordReader<LogFileKey,LogFileValue> {
+    
+    private FSDataInputStream fsdis;
+    private LogFileKey key;
+    private LogFileValue value;
+    private long length;
+    
+    @Override
+    public void close() throws IOException {
+      fsdis.close();
+    }
+    
+    @Override
+    public LogFileKey getCurrentKey() throws IOException, InterruptedException {
+      return key;
+    }
+    
+    @Override
+    public LogFileValue getCurrentValue() throws IOException, InterruptedException {
+      return value;
+    }
+    
+    @Override
+    public float getProgress() throws IOException, InterruptedException {
+      float progress = (length - fsdis.getPos()) / (float) length;
+      if (progress < 0)
+        return 0;
+      return progress;
+    }
+    
+    @Override
+    public void initialize(InputSplit is, TaskAttemptContext context) throws IOException, InterruptedException {
+      FileSplit fileSplit = (FileSplit) is;
+      
+      Configuration conf = new Configuration();
+      FileSystem fs = FileSystem.get(conf);
+      
+      key = new LogFileKey();
+      value = new LogFileValue();
+      
+      fsdis = fs.open(fileSplit.getPath());
+      FileStatus status = fs.getFileStatus(fileSplit.getPath());
+      length = status.getLen();
+    }
+
+    @Override
+    public boolean nextKeyValue() throws IOException, InterruptedException {
+      if (key == null)
+        return false;
+      
+      try {
+        key.readFields(fsdis);
+        value.readFields(fsdis);
+        return true;
+      } catch (EOFException ex) {
+        key = null;
+        value = null;
+        return false;
+      }
+    }
+    
+  }
+
+  
+  @Override
+  public RecordReader<LogFileKey,LogFileValue> createRecordReader(InputSplit arg0, TaskAttemptContext arg1) throws IOException, InterruptedException {
+    return new LogFileRecordReader();
+  }
+  
+  @Override
+  protected boolean isSplitable(JobContext context, Path filename) {
+    return false;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileOutputFormat.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileOutputFormat.java
new file mode 100644
index 0000000..cccb89e
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/LogFileOutputFormat.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.utils.metanalysis;
+
+import java.io.IOException;
+
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+
+/**
+ * Output format for Accumulo write ahead logs.
+ */
+public class LogFileOutputFormat extends FileOutputFormat<LogFileKey,LogFileValue> {
+  
+  private static class LogFileRecordWriter extends RecordWriter<LogFileKey,LogFileValue> {
+    
+    private FSDataOutputStream out;
+    
+    public LogFileRecordWriter(Path outputPath) throws IOException {
+      Configuration conf = new Configuration();
+      FileSystem fs = FileSystem.get(conf);
+      
+      out = fs.create(outputPath);
+    }
+    
+    @Override
+    public void close(TaskAttemptContext arg0) throws IOException, InterruptedException {
+      out.close();
+    }
+    
+    @Override
+    public void write(LogFileKey key, LogFileValue val) throws IOException, InterruptedException {
+      key.write(out);
+      val.write(out);
+    }
+    
+  }
+  
+  @Override
+  public RecordWriter<LogFileKey,LogFileValue> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
+    Path outputPath = getDefaultWorkFile(context, "");
+    return new LogFileRecordWriter(outputPath);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/PrintEvents.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/PrintEvents.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/PrintEvents.java
new file mode 100644
index 0000000..fc6c18a
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/PrintEvents.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.utils.metanalysis;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.io.Text;
+
+import com.beust.jcommander.Parameter;
+
+/**
+ * Looks up and prints mutations indexed by IndexMeta
+ */
+public class PrintEvents {
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names = {"-t", "--tableId"}, description = "table id", required = true)
+    String tableId;
+    @Parameter(names = {"-e", "--endRow"}, description = "end row")
+    String endRow;
+    @Parameter(names = {"-t", "--time"}, description = "time, in milliseconds", required = true)
+    long time;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(PrintEvents.class.getName(), args);
+    
+    Connector conn = opts.getConnector();
+    
+    printEvents(conn, opts.tableId, opts.endRow, opts.time);
+  }
+  
+  private static void printEvents(Connector conn, String tableId, String endRow, Long time) throws Exception {
+    Scanner scanner = conn.createScanner("tabletEvents", new Authorizations());
+    String metaRow = tableId + (endRow == null ? "<" : ";" + endRow);
+    scanner.setRange(new Range(new Key(metaRow, String.format("%020d", time)), true, new Key(metaRow).followingKey(PartialKey.ROW), false));
+    int count = 0;
+    
+    String lastLog = null;
+    
+    loop1: for (Entry<Key,Value> entry : scanner) {
+      if (entry.getKey().getColumnQualifier().toString().equals("log")) {
+        if (lastLog == null || !lastLog.equals(entry.getValue().toString()))
+          System.out.println("Log : " + entry.getValue());
+        lastLog = entry.getValue().toString();
+      } else if (entry.getKey().getColumnQualifier().toString().equals("mut")) {
+        DataInputStream dis = new DataInputStream(new ByteArrayInputStream(entry.getValue().get()));
+        Mutation m = new Mutation();
+        m.readFields(dis);
+        
+        LogFileValue lfv = new LogFileValue();
+        lfv.mutations = Collections.singletonList(m);
+        
+        System.out.println(LogFileValue.format(lfv, 1));
+        
+        List<ColumnUpdate> columnsUpdates = m.getUpdates();
+        for (ColumnUpdate cu : columnsUpdates) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
+            System.out.println("Saw change to prevrow, stopping printing events.");
+            break loop1;
+          }
+        }
+        count++;
+      }
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/package-info.java
----------------------------------------------------------------------
diff --git a/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/package-info.java b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/package-info.java
new file mode 100644
index 0000000..3c15884
--- /dev/null
+++ b/server/utils/src/main/java/org/apache/accumulo/utils/metanalysis/package-info.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Provides programs to analyze metadata mutations written to write ahead logs.  
+ * 
+ * <p>
+ * These programs can be used when write ahead logs are archived.   The best way to find
+ * which write ahead logs contain metadata mutations is to grep the tablet server logs.  
+ * Grep for events where walogs were added to metadata tablets, then take the unique set 
+ * of walogs.
+ *
+ * <p>
+ * To use these programs, use IndexMeta to index the metadata mutations in walogs into 
+ * Accumulo tables.  Then use FindTable and PrintEvents to analyze those indexes.  
+ * FilterMetaiallows filtering walogs down to just metadata events.  This is useful for the
+ * case where the walogs need to be exported from the cluster for analysis.
+ *
+ * @since 1.5
+ */
+package org.apache.accumulo.utils.metanalysis;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
index b04bbaf..5716311 100644
--- a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
+++ b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
@@ -22,7 +22,7 @@ import java.util.TreeMap;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.server.tabletserver.InMemoryMap;
+import org.apache.accumulo.tserver.InMemoryMap;
 import org.apache.hadoop.io.Text;
 
 abstract class MemoryUsageTest {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
index 79dc48d..fa7f86f 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
@@ -23,7 +23,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.FastFormat;
-import org.apache.accumulo.server.tabletserver.NativeMap;
+import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.JCommander;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
index a63b777..d9a8eba 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.tabletserver.NativeMap;
+import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
 public class NativeMapPerformanceTest {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
index 2d0bd45..4a3d6bd 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.server.tabletserver.NativeMap;
+import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
index 0135557..3f904e7 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
@@ -31,7 +31,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.server.tabletserver.NativeMap;
+import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
 public class NativeMapTest {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
index f39ecf8..10cbb0e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
@@ -47,20 +47,20 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.master.state.Assignment;
-import org.apache.accumulo.master.state.TServerInstance;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.master.state.Assignment;
+import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tabletserver.TabletServer;
-import org.apache.accumulo.server.tabletserver.TabletTime;
+import org.apache.accumulo.server.tablets.TabletTime;
 import org.apache.accumulo.server.util.FileUtil;
 import org.apache.accumulo.server.util.MasterMetadataUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.tserver.TabletServer;
 import org.apache.hadoop.io.Text;
 
 public class SplitRecoveryTest extends FunctionalTest {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
index 38a2e6d..3545170 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
@@ -41,7 +41,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Da
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.util.Stat;
-import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.hadoop.io.Text;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
index 0ff0948..cd1b1a3 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
@@ -59,13 +59,13 @@ import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.master.state.Assignment;
-import org.apache.accumulo.master.state.MetaDataStateStore;
-import org.apache.accumulo.master.state.MetaDataTableScanner;
-import org.apache.accumulo.master.state.TServerInstance;
-import org.apache.accumulo.master.state.TabletLocationState;
 import org.apache.accumulo.server.client.ClientServiceHandler;
 import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.master.state.Assignment;
+import org.apache.accumulo.server.master.state.MetaDataStateStore;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StopTabletServer.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StopTabletServer.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StopTabletServer.java
index e319bb7..337d318 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StopTabletServer.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StopTabletServer.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 import org.apache.zookeeper.KeeperException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
index 58d9ee3..dfd416f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -35,10 +35,10 @@ import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.server.tabletserver.TabletServer;
 import org.apache.accumulo.start.Main;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.VerifyIngest;
+import org.apache.accumulo.tserver.TabletServer;
 import org.junit.Test;
 
 public class HalfDeadTServerIT extends ConfigurableMacIT {
@@ -134,8 +134,8 @@ public class HalfDeadTServerIT extends ConfigurableMacIT {
       c.tableOperations().create("test_ingest");
       assertEquals(1, c.instanceOperations().getTabletServers().size());
       int rows = 100 * 1000;
-      ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD,
-          "--rows", rows + "");
+      ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows
+          + "");
       UtilWaitThread.sleep(500);
       
       // block I/O with some side-channel trickiness


Mime
View raw message