accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [20/54] [partial] ACCUMULO-658, ACCUMULO-656 Split server into separate modules
Date Fri, 01 Nov 2013 00:55:59 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
deleted file mode 100644
index 606941d..0000000
--- a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.client;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.ServerClient;
-import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
-import org.apache.accumulo.core.client.impl.Translator;
-import org.apache.accumulo.core.client.impl.thrift.ClientService;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.thrift.TKeyExtent;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.file.FileUtil;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.NamingThreadFactory;
-import org.apache.accumulo.core.util.StopWatch;
-import org.apache.accumulo.core.util.ThriftUtil;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.trace.instrument.TraceRunnable;
-import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TServiceClient;
-
-public class BulkImporter {
-  
-  private static final Logger log = Logger.getLogger(BulkImporter.class);
-  
-  public static List<String> bulkLoad(AccumuloConfiguration conf, Instance instance, Credentials creds, long tid, String tableId, List<String> files,
-      String errorDir, boolean setTime) throws IOException, AccumuloException, AccumuloSecurityException, ThriftTableOperationException {
-    AssignmentStats stats = new BulkImporter(conf, instance, creds, tid, tableId, setTime).importFiles(files, new Path(errorDir));
-    List<String> result = new ArrayList<String>();
-    for (Path p : stats.completeFailures.keySet()) {
-      result.add(p.toString());
-    }
-    return result;
-  }
-  
-  private StopWatch<Timers> timer;
-  
-  private static enum Timers {
-    EXAMINE_MAP_FILES, QUERY_METADATA, IMPORT_MAP_FILES, SLEEP, TOTAL
-  }
-  
-  private Instance instance;
-  private Credentials credentials;
-  private String tableId;
-  private long tid;
-  private AccumuloConfiguration acuConf;
-  private boolean setTime;
-  
-  public BulkImporter(AccumuloConfiguration conf, Instance instance, Credentials credentials, long tid, String tableId, boolean setTime) {
-    this.instance = instance;
-    this.credentials = credentials;
-    this.tid = tid;
-    this.tableId = tableId;
-    this.acuConf = conf;
-    this.setTime = setTime;
-  }
-  
-  public AssignmentStats importFiles(List<String> files, Path failureDir) throws IOException, AccumuloException, AccumuloSecurityException,
-      ThriftTableOperationException {
-    
-    int numThreads = acuConf.getCount(Property.TSERV_BULK_PROCESS_THREADS);
-    int numAssignThreads = acuConf.getCount(Property.TSERV_BULK_ASSIGNMENT_THREADS);
-    
-    timer = new StopWatch<Timers>(Timers.class);
-    timer.start(Timers.TOTAL);
-    
-    Configuration conf = CachedConfiguration.getInstance();
-    VolumeManagerImpl.get(acuConf);
-    final VolumeManager fs = VolumeManagerImpl.get(acuConf);
-
-    Set<Path> paths = new HashSet<Path>();
-    for (String file : files) {
-      paths.add(new Path(file));
-    }
-    AssignmentStats assignmentStats = new AssignmentStats(paths.size());
-    
-    final Map<Path,List<KeyExtent>> completeFailures = Collections.synchronizedSortedMap(new TreeMap<Path,List<KeyExtent>>());
-    
-    ClientService.Client client = null;
-    final TabletLocator locator = TabletLocator.getLocator(instance, new Text(tableId));
-    
-    try {
-      final Map<Path,List<TabletLocation>> assignments = Collections.synchronizedSortedMap(new TreeMap<Path,List<TabletLocation>>());
-      
-      timer.start(Timers.EXAMINE_MAP_FILES);
-      ExecutorService threadPool = Executors.newFixedThreadPool(numThreads, new NamingThreadFactory("findOverlapping"));
-      
-      for (Path path : paths) {
-        final Path mapFile = path;
-        Runnable getAssignments = new Runnable() {
-          @Override
-          public void run() {
-            List<TabletLocation> tabletsToAssignMapFileTo = Collections.emptyList();
-            try {
-              tabletsToAssignMapFileTo = findOverlappingTablets(instance.getConfiguration(), fs, locator, mapFile, credentials);
-            } catch (Exception ex) {
-              log.warn("Unable to find tablets that overlap file " + mapFile.toString());
-            }
-            log.debug("Map file " + mapFile + " found to overlap " + tabletsToAssignMapFileTo.size() + " tablets");
-            if (tabletsToAssignMapFileTo.size() == 0) {
-              List<KeyExtent> empty = Collections.emptyList();
-              completeFailures.put(mapFile, empty);
-            } else
-              assignments.put(mapFile, tabletsToAssignMapFileTo);
-          }
-        };
-        threadPool.submit(new TraceRunnable(new LoggingRunnable(log, getAssignments)));
-      }
-      threadPool.shutdown();
-      while (!threadPool.isTerminated()) {
-        try {
-          threadPool.awaitTermination(60, TimeUnit.SECONDS);
-        } catch (InterruptedException e) {
-          throw new RuntimeException(e);
-        }
-      }
-      timer.stop(Timers.EXAMINE_MAP_FILES);
-      
-      assignmentStats.attemptingAssignments(assignments);
-      Map<Path,List<KeyExtent>> assignmentFailures = assignMapFiles(acuConf, instance, conf, credentials, fs, tableId, assignments, paths, numAssignThreads,
-          numThreads);
-      assignmentStats.assignmentsFailed(assignmentFailures);
-      
-      Map<Path,Integer> failureCount = new TreeMap<Path,Integer>();
-      
-      for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet())
-        failureCount.put(entry.getKey(), 1);
-      
-      long sleepTime = 2 * 1000;
-      while (assignmentFailures.size() > 0) {
-        sleepTime = Math.min(sleepTime * 2, 60 * 1000);
-        locator.invalidateCache();
-        // assumption about assignment failures is that it caused by a split
-        // happening or a missing location
-        //
-        // for splits we need to find children key extents that cover the
-        // same key range and are contiguous (no holes, no overlap)
-        
-        timer.start(Timers.SLEEP);
-        UtilWaitThread.sleep(sleepTime);
-        timer.stop(Timers.SLEEP);
-        
-        log.debug("Trying to assign " + assignmentFailures.size() + " map files that previously failed on some key extents");
-        assignments.clear();
-        
-        // for failed key extents, try to find children key extents to
-        // assign to
-        for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet()) {
-          Iterator<KeyExtent> keListIter = entry.getValue().iterator();
-          
-          List<TabletLocation> tabletsToAssignMapFileTo = new ArrayList<TabletLocation>();
-          
-          while (keListIter.hasNext()) {
-            KeyExtent ke = keListIter.next();
-            
-            try {
-              timer.start(Timers.QUERY_METADATA);
-              tabletsToAssignMapFileTo.addAll(findOverlappingTablets(instance.getConfiguration(), fs, locator, entry.getKey(), ke, credentials));
-              timer.stop(Timers.QUERY_METADATA);
-              keListIter.remove();
-            } catch (Exception ex) {
-              log.warn("Exception finding overlapping tablets, will retry tablet " + ke);
-            }
-          }
-          
-          if (tabletsToAssignMapFileTo.size() > 0)
-            assignments.put(entry.getKey(), tabletsToAssignMapFileTo);
-        }
-        
-        assignmentStats.attemptingAssignments(assignments);
-        Map<Path,List<KeyExtent>> assignmentFailures2 = assignMapFiles(acuConf, instance, conf, credentials, fs, tableId, assignments, paths, numAssignThreads,
-            numThreads);
-        assignmentStats.assignmentsFailed(assignmentFailures2);
-        
-        // merge assignmentFailures2 into assignmentFailures
-        for (Entry<Path,List<KeyExtent>> entry : assignmentFailures2.entrySet()) {
-          assignmentFailures.get(entry.getKey()).addAll(entry.getValue());
-          
-          Integer fc = failureCount.get(entry.getKey());
-          if (fc == null)
-            fc = 0;
-          
-          failureCount.put(entry.getKey(), fc + 1);
-        }
-        
-        // remove map files that have no more key extents to assign
-        Iterator<Entry<Path,List<KeyExtent>>> afIter = assignmentFailures.entrySet().iterator();
-        while (afIter.hasNext()) {
-          Entry<Path,List<KeyExtent>> entry = afIter.next();
-          if (entry.getValue().size() == 0)
-            afIter.remove();
-        }
-        
-        Set<Entry<Path,Integer>> failureIter = failureCount.entrySet();
-        for (Entry<Path,Integer> entry : failureIter) {
-          int retries = acuConf.getCount(Property.TSERV_BULK_RETRY);
-          if (entry.getValue() > retries && assignmentFailures.get(entry.getKey()) != null) {
-            log.error("Map file " + entry.getKey() + " failed more than " + retries + " times, giving up.");
-            completeFailures.put(entry.getKey(), assignmentFailures.get(entry.getKey()));
-            assignmentFailures.remove(entry.getKey());
-          }
-        }
-      }
-      assignmentStats.assignmentsAbandoned(completeFailures);
-      Set<Path> failedFailures = processFailures(completeFailures);
-      assignmentStats.unrecoveredMapFiles(failedFailures);
-      
-      timer.stop(Timers.TOTAL);
-      printReport();
-      return assignmentStats;
-    } finally {
-      if (client != null)
-        ServerClient.close(client);
-      locator.invalidateCache();
-    }
-  }
-  
-  private void printReport() {
-    long totalTime = 0;
-    for (Timers t : Timers.values()) {
-      if (t == Timers.TOTAL)
-        continue;
-      
-      totalTime += timer.get(t);
-    }
-    
-    log.debug("BULK IMPORT TIMING STATISTICS");
-    log.debug(String.format("Examine map files    : %,10.2f secs %6.2f%s", timer.getSecs(Timers.EXAMINE_MAP_FILES), 100.0 * timer.get(Timers.EXAMINE_MAP_FILES)
-        / timer.get(Timers.TOTAL), "%"));
-    log.debug(String.format("Query %-14s : %,10.2f secs %6.2f%s", MetadataTable.NAME, timer.getSecs(Timers.QUERY_METADATA),
-        100.0 * timer.get(Timers.QUERY_METADATA) / timer.get(Timers.TOTAL), "%"));
-    log.debug(String.format("Import Map Files     : %,10.2f secs %6.2f%s", timer.getSecs(Timers.IMPORT_MAP_FILES), 100.0 * timer.get(Timers.IMPORT_MAP_FILES)
-        / timer.get(Timers.TOTAL), "%"));
-    log.debug(String.format("Sleep                : %,10.2f secs %6.2f%s", timer.getSecs(Timers.SLEEP),
-        100.0 * timer.get(Timers.SLEEP) / timer.get(Timers.TOTAL), "%"));
-    log.debug(String.format("Misc                 : %,10.2f secs %6.2f%s", (timer.get(Timers.TOTAL) - totalTime) / 1000.0, 100.0
-        * (timer.get(Timers.TOTAL) - totalTime) / timer.get(Timers.TOTAL), "%"));
-    log.debug(String.format("Total                : %,10.2f secs", timer.getSecs(Timers.TOTAL)));
-  }
-  
-  private Set<Path> processFailures(Map<Path,List<KeyExtent>> completeFailures) {
-    // we should check if map file was not assigned to any tablets, then we
-    // should just move it; not currently being done?
-    
-    Set<Entry<Path,List<KeyExtent>>> es = completeFailures.entrySet();
-    
-    if (completeFailures.size() == 0)
-      return Collections.emptySet();
-    
-    log.debug("The following map files failed ");
-    
-    for (Entry<Path,List<KeyExtent>> entry : es) {
-      List<KeyExtent> extents = entry.getValue();
-      
-      for (KeyExtent keyExtent : extents)
-        log.debug("\t" + entry.getKey() + " -> " + keyExtent);
-    }
-    
-    return Collections.emptySet();
-  }
-  
-  private class AssignmentInfo {
-    public AssignmentInfo(KeyExtent keyExtent, Long estSize) {
-      this.ke = keyExtent;
-      this.estSize = estSize;
-    }
-    
-    KeyExtent ke;
-    long estSize;
-  }
-  
-  private static List<KeyExtent> extentsOf(List<TabletLocation> locations) {
-    List<KeyExtent> result = new ArrayList<KeyExtent>(locations.size());
-    for (TabletLocation tl : locations)
-      result.add(tl.tablet_extent);
-    return result;
-  }
-  
-  private Map<Path,List<AssignmentInfo>> estimateSizes(final AccumuloConfiguration acuConf, final Configuration conf, final VolumeManager vm,
-      Map<Path,List<TabletLocation>> assignments, Collection<Path> paths, int numThreads) {
-    
-    long t1 = System.currentTimeMillis();
-    final Map<Path,Long> mapFileSizes = new TreeMap<Path,Long>();
-    
-    try {
-      for (Path path : paths) {
-        FileSystem fs = vm.getFileSystemByPath(path);
-        mapFileSizes.put(path, fs.getContentSummary(path).getLength());
-      }
-    } catch (IOException e) {
-      log.error("Failed to get map files in for " + paths + ": " + e.getMessage(), e);
-      throw new RuntimeException(e);
-    }
-    
-    final Map<Path,List<AssignmentInfo>> ais = Collections.synchronizedMap(new TreeMap<Path,List<AssignmentInfo>>());
-    
-    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads, new NamingThreadFactory("estimateSizes"));
-    
-    for (final Entry<Path,List<TabletLocation>> entry : assignments.entrySet()) {
-      if (entry.getValue().size() == 1) {
-        TabletLocation tabletLocation = entry.getValue().get(0);
-        
-        // if the tablet completely contains the map file, there is no
-        // need to estimate its
-        // size
-        ais.put(entry.getKey(), Collections.singletonList(new AssignmentInfo(tabletLocation.tablet_extent, mapFileSizes.get(entry.getKey()))));
-        continue;
-      }
-      
-      Runnable estimationTask = new Runnable() {
-        @Override
-        public void run() {
-          Map<KeyExtent,Long> estimatedSizes = null;
-          
-          try {
-            FileSystem fs = vm.getFileSystemByPath(entry.getKey());
-            estimatedSizes = FileUtil.estimateSizes(acuConf, entry.getKey(), mapFileSizes.get(entry.getKey()), extentsOf(entry.getValue()), conf, fs);
-          } catch (IOException e) {
-            log.warn("Failed to estimate map file sizes " + e.getMessage());
-          }
-          
-          if (estimatedSizes == null) {
-            // estimation failed, do a simple estimation
-            estimatedSizes = new TreeMap<KeyExtent,Long>();
-            long estSize = (long) (mapFileSizes.get(entry.getKey()) / (double) entry.getValue().size());
-            for (TabletLocation tl : entry.getValue())
-              estimatedSizes.put(tl.tablet_extent, estSize);
-          }
-          
-          List<AssignmentInfo> assignmentInfoList = new ArrayList<AssignmentInfo>(estimatedSizes.size());
-          
-          for (Entry<KeyExtent,Long> entry2 : estimatedSizes.entrySet())
-            assignmentInfoList.add(new AssignmentInfo(entry2.getKey(), entry2.getValue()));
-          
-          ais.put(entry.getKey(), assignmentInfoList);
-        }
-      };
-      
-      threadPool.submit(new TraceRunnable(new LoggingRunnable(log, estimationTask)));
-    }
-    
-    threadPool.shutdown();
-    
-    while (!threadPool.isTerminated()) {
-      try {
-        threadPool.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-        throw new RuntimeException(e);
-      }
-    }
-    
-    long t2 = System.currentTimeMillis();
-    
-    log.debug(String.format("Estimated map files sizes in %6.2f secs", (t2 - t1) / 1000.0));
-    
-    return ais;
-  }
-  
-  private static Map<KeyExtent,String> locationsOf(Map<Path,List<TabletLocation>> assignments) {
-    Map<KeyExtent,String> result = new HashMap<KeyExtent,String>();
-    for (List<TabletLocation> entry : assignments.values()) {
-      for (TabletLocation tl : entry) {
-        result.put(tl.tablet_extent, tl.tablet_location);
-      }
-    }
-    return result;
-  }
-  
-  private Map<Path,List<KeyExtent>> assignMapFiles(AccumuloConfiguration acuConf, Instance instance, Configuration conf, Credentials credentials,
-      VolumeManager fs, String tableId, Map<Path,List<TabletLocation>> assignments, Collection<Path> paths, int numThreads, int numMapThreads) {
-    timer.start(Timers.EXAMINE_MAP_FILES);
-    Map<Path,List<AssignmentInfo>> assignInfo = estimateSizes(acuConf, conf, fs, assignments, paths, numMapThreads);
-    timer.stop(Timers.EXAMINE_MAP_FILES);
-    
-    Map<Path,List<KeyExtent>> ret;
-    
-    timer.start(Timers.IMPORT_MAP_FILES);
-    ret = assignMapFiles(credentials, tableId, assignInfo, locationsOf(assignments), numThreads);
-    timer.stop(Timers.IMPORT_MAP_FILES);
-    
-    return ret;
-  }
-  
-  private class AssignmentTask implements Runnable {
-    final Map<Path,List<KeyExtent>> assignmentFailures;
-    String location;
-    Credentials credentials;
-    private Map<KeyExtent,List<PathSize>> assignmentsPerTablet;
-    
-    public AssignmentTask(Credentials credentials, Map<Path,List<KeyExtent>> assignmentFailures, String tableName, String location,
-        Map<KeyExtent,List<PathSize>> assignmentsPerTablet) {
-      this.assignmentFailures = assignmentFailures;
-      this.location = location;
-      this.assignmentsPerTablet = assignmentsPerTablet;
-      this.credentials = credentials;
-    }
-    
-    private void handleFailures(Collection<KeyExtent> failures, String message) {
-      for (KeyExtent ke : failures) {
-        List<PathSize> mapFiles = assignmentsPerTablet.get(ke);
-        synchronized (assignmentFailures) {
-          for (PathSize pathSize : mapFiles) {
-            List<KeyExtent> existingFailures = assignmentFailures.get(pathSize.path);
-            if (existingFailures == null) {
-              existingFailures = new ArrayList<KeyExtent>();
-              assignmentFailures.put(pathSize.path, existingFailures);
-            }
-            
-            existingFailures.add(ke);
-          }
-        }
-        
-        log.info("Could not assign " + mapFiles.size() + " map files to tablet " + ke + " because : " + message + ".  Will retry ...");
-      }
-    }
-    
-    @Override
-    public void run() {
-      HashSet<Path> uniqMapFiles = new HashSet<Path>();
-      for (List<PathSize> mapFiles : assignmentsPerTablet.values())
-        for (PathSize ps : mapFiles)
-          uniqMapFiles.add(ps.path);
-      
-      log.debug("Assigning " + uniqMapFiles.size() + " map files to " + assignmentsPerTablet.size() + " tablets at " + location);
-      
-      try {
-        List<KeyExtent> failures = assignMapFiles(credentials, location, assignmentsPerTablet);
-        handleFailures(failures, "Not Serving Tablet");
-      } catch (AccumuloException e) {
-        handleFailures(assignmentsPerTablet.keySet(), e.getMessage());
-      } catch (AccumuloSecurityException e) {
-        handleFailures(assignmentsPerTablet.keySet(), e.getMessage());
-      }
-    }
-    
-  }
-  
-  private class PathSize {
-    public PathSize(Path mapFile, long estSize) {
-      this.path = mapFile;
-      this.estSize = estSize;
-    }
-    
-    Path path;
-    long estSize;
-    
-    @Override
-    public String toString() {
-      return path + " " + estSize;
-    }
-  }
-  
-  private Map<Path,List<KeyExtent>> assignMapFiles(Credentials credentials, String tableName, Map<Path,List<AssignmentInfo>> assignments,
-      Map<KeyExtent,String> locations, int numThreads) {
-    
-    // group assignments by tablet
-    Map<KeyExtent,List<PathSize>> assignmentsPerTablet = new TreeMap<KeyExtent,List<PathSize>>();
-    for (Entry<Path,List<AssignmentInfo>> entry : assignments.entrySet()) {
-      Path mapFile = entry.getKey();
-      List<AssignmentInfo> tabletsToAssignMapFileTo = entry.getValue();
-      
-      for (AssignmentInfo ai : tabletsToAssignMapFileTo) {
-        List<PathSize> mapFiles = assignmentsPerTablet.get(ai.ke);
-        if (mapFiles == null) {
-          mapFiles = new ArrayList<PathSize>();
-          assignmentsPerTablet.put(ai.ke, mapFiles);
-        }
-        
-        mapFiles.add(new PathSize(mapFile, ai.estSize));
-      }
-    }
-    
-    // group assignments by tabletserver
-    
-    Map<Path,List<KeyExtent>> assignmentFailures = Collections.synchronizedMap(new TreeMap<Path,List<KeyExtent>>());
-    
-    TreeMap<String,Map<KeyExtent,List<PathSize>>> assignmentsPerTabletServer = new TreeMap<String,Map<KeyExtent,List<PathSize>>>();
-    
-    for (Entry<KeyExtent,List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
-      KeyExtent ke = entry.getKey();
-      String location = locations.get(ke);
-      
-      if (location == null) {
-        for (PathSize pathSize : entry.getValue()) {
-          synchronized (assignmentFailures) {
-            List<KeyExtent> failures = assignmentFailures.get(pathSize.path);
-            if (failures == null) {
-              failures = new ArrayList<KeyExtent>();
-              assignmentFailures.put(pathSize.path, failures);
-            }
-            
-            failures.add(ke);
-          }
-        }
-        
-        log.warn("Could not assign " + entry.getValue().size() + " map files to tablet " + ke + " because it had no location, will retry ...");
-        
-        continue;
-      }
-      
-      Map<KeyExtent,List<PathSize>> apt = assignmentsPerTabletServer.get(location);
-      if (apt == null) {
-        apt = new TreeMap<KeyExtent,List<PathSize>>();
-        assignmentsPerTabletServer.put(location, apt);
-      }
-      
-      apt.put(entry.getKey(), entry.getValue());
-    }
-    
-    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads, new NamingThreadFactory("submit"));
-    
-    for (Entry<String,Map<KeyExtent,List<PathSize>>> entry : assignmentsPerTabletServer.entrySet()) {
-      String location = entry.getKey();
-      threadPool.submit(new AssignmentTask(credentials, assignmentFailures, tableName, location, entry.getValue()));
-    }
-    
-    threadPool.shutdown();
-    
-    while (!threadPool.isTerminated()) {
-      try {
-        threadPool.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-        throw new RuntimeException(e);
-      }
-    }
-    
-    return assignmentFailures;
-  }
-  
-  private List<KeyExtent> assignMapFiles(Credentials credentials, String location, Map<KeyExtent,List<PathSize>> assignmentsPerTablet)
-      throws AccumuloException, AccumuloSecurityException {
-    try {
-      long timeInMillis = instance.getConfiguration().getTimeInMillis(Property.TSERV_BULK_TIMEOUT);
-      TabletClientService.Iface client = ThriftUtil.getTServerClient(location, timeInMillis);
-      try {
-        HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files = new HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>>();
-        for (Entry<KeyExtent,List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
-          HashMap<String,org.apache.accumulo.core.data.thrift.MapFileInfo> tabletFiles = new HashMap<String,org.apache.accumulo.core.data.thrift.MapFileInfo>();
-          files.put(entry.getKey(), tabletFiles);
-          
-          for (PathSize pathSize : entry.getValue()) {
-            org.apache.accumulo.core.data.thrift.MapFileInfo mfi = new org.apache.accumulo.core.data.thrift.MapFileInfo(pathSize.estSize);
-            tabletFiles.put(pathSize.path.toString(), mfi);
-          }
-        }
-        
-        log.debug("Asking " + location + " to bulk load " + files);
-        List<TKeyExtent> failures = client.bulkImport(Tracer.traceInfo(), credentials.toThrift(instance), tid, Translator.translate(files, Translator.KET),
-            setTime);
-        
-        return Translator.translate(failures, Translator.TKET);
-      } finally {
-        ThriftUtil.returnClient((TServiceClient) client);
-      }
-    } catch (ThriftSecurityException e) {
-      throw new AccumuloSecurityException(e.user, e.code, e);
-    } catch (Throwable t) {
-      t.printStackTrace();
-      throw new AccumuloException(t);
-    }
-  }
-  
-  public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, VolumeManager fs, TabletLocator locator, Path file,
-      Credentials credentials) throws Exception {
-    return findOverlappingTablets(acuConf, fs, locator, file, null, null, credentials);
-  }
-  
-  public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, VolumeManager fs, TabletLocator locator, Path file,
-      KeyExtent failed,
-      Credentials credentials) throws Exception {
-    locator.invalidateCache(failed);
-    Text start = failed.getPrevEndRow();
-    if (start != null)
-      start = Range.followingPrefix(start);
-    return findOverlappingTablets(acuConf, fs, locator, file, start, failed.getEndRow(), credentials);
-  }
-  
-  final static byte[] byte0 = {0};
-  
-  public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, VolumeManager vm, TabletLocator locator, Path file, Text startRow,
-      Text endRow, Credentials credentials) throws Exception {
-    List<TabletLocation> result = new ArrayList<TabletLocation>();
-    Collection<ByteSequence> columnFamilies = Collections.emptyList();
-    String filename = file.toString();
-    // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
-    FileSystem fs = vm.getFileSystemByPath(file);
-    FileSKVIterator reader = FileOperations.getInstance().openReader(filename, true, fs, fs.getConf(), acuConf);
-    try {
-      Text row = startRow;
-      if (row == null)
-        row = new Text();
-      while (true) {
-        // log.debug(filename + " Seeking to row " + row);
-        reader.seek(new Range(row, null), columnFamilies, false);
-        if (!reader.hasTop()) {
-          // log.debug(filename + " not found");
-          break;
-        }
-        row = reader.getTopKey().getRow();
-        TabletLocation tabletLocation = locator.locateTablet(credentials, row, false, true);
-        // log.debug(filename + " found row " + row + " at location " + tabletLocation);
-        result.add(tabletLocation);
-        row = tabletLocation.tablet_extent.getEndRow();
-        if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
-          row = new Text(row);
-          row.append(byte0, 0, byte0.length);
-        } else
-          break;
-      }
-    } finally {
-      reader.close();
-    }
-    // log.debug(filename + " to be sent to " + result);
-    return result;
-  }
-  
-  public static class AssignmentStats {
-    private Map<KeyExtent,Integer> counts;
-    private int numUniqueMapFiles;
-    private Map<Path,List<KeyExtent>> completeFailures = null;
-    private Set<Path> failedFailures = null;
-    
-    AssignmentStats(int fileCount) {
-      counts = new HashMap<KeyExtent,Integer>();
-      numUniqueMapFiles = fileCount;
-    }
-    
-    void attemptingAssignments(Map<Path,List<TabletLocation>> assignments) {
-      for (Entry<Path,List<TabletLocation>> entry : assignments.entrySet()) {
-        for (TabletLocation tl : entry.getValue()) {
-          
-          Integer count = getCount(tl.tablet_extent);
-          
-          counts.put(tl.tablet_extent, count + 1);
-        }
-      }
-    }
-    
-    void assignmentsFailed(Map<Path,List<KeyExtent>> assignmentFailures) {
-      for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet()) {
-        for (KeyExtent ke : entry.getValue()) {
-          
-          Integer count = getCount(ke);
-          
-          counts.put(ke, count - 1);
-        }
-      }
-    }
-    
-    void assignmentsAbandoned(Map<Path,List<KeyExtent>> completeFailures) {
-      this.completeFailures = completeFailures;
-    }
-    
-    void tabletSplit(KeyExtent parent, Collection<KeyExtent> children) {
-      Integer count = getCount(parent);
-      
-      counts.remove(parent);
-      
-      for (KeyExtent keyExtent : children)
-        counts.put(keyExtent, count);
-    }
-    
-    private Integer getCount(KeyExtent parent) {
-      Integer count = counts.get(parent);
-      
-      if (count == null) {
-        count = 0;
-      }
-      return count;
-    }
-    
-    void unrecoveredMapFiles(Set<Path> failedFailures) {
-      this.failedFailures = failedFailures;
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      int totalAssignments = 0;
-      int tabletsImportedTo = 0;
-      
-      int min = Integer.MAX_VALUE, max = Integer.MIN_VALUE;
-      
-      for (Entry<KeyExtent,Integer> entry : counts.entrySet()) {
-        totalAssignments += entry.getValue();
-        if (entry.getValue() > 0)
-          tabletsImportedTo++;
-        
-        if (entry.getValue() < min)
-          min = entry.getValue();
-        
-        if (entry.getValue() > max)
-          max = entry.getValue();
-      }
-      
-      double stddev = 0;
-      
-      for (Entry<KeyExtent,Integer> entry : counts.entrySet())
-        stddev += Math.pow(entry.getValue() - totalAssignments / (double) counts.size(), 2);
-      
-      stddev = stddev / counts.size();
-      stddev = Math.sqrt(stddev);
-      
-      Set<KeyExtent> failedTablets = new HashSet<KeyExtent>();
-      for (List<KeyExtent> ft : completeFailures.values())
-        failedTablets.addAll(ft);
-      
-      sb.append("BULK IMPORT ASSIGNMENT STATISTICS\n");
-      sb.append(String.format("# of map files            : %,10d%n", numUniqueMapFiles));
-      sb.append(String.format("# map files with failures : %,10d %6.2f%s%n", completeFailures.size(), completeFailures.size() * 100.0 / numUniqueMapFiles, "%"));
-      sb.append(String.format("# failed failed map files : %,10d %s%n", failedFailures.size(), failedFailures.size() > 0 ? " <-- THIS IS BAD" : ""));
-      sb.append(String.format("# of tablets              : %,10d%n", counts.size()));
-      sb.append(String.format("# tablets imported to     : %,10d %6.2f%s%n", tabletsImportedTo, tabletsImportedTo * 100.0 / counts.size(), "%"));
-      sb.append(String.format("# tablets with failures   : %,10d %6.2f%s%n", failedTablets.size(), failedTablets.size() * 100.0 / counts.size(), "%"));
-      sb.append(String.format("min map files per tablet  : %,10d%n", min));
-      sb.append(String.format("max map files per tablet  : %,10d%n", max));
-      sb.append(String.format("avg map files per tablet  : %,10.2f (std dev = %.2f)%n", totalAssignments / (double) counts.size(), stddev));
-      return sb.toString();
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
deleted file mode 100644
index 3f1aaa2..0000000
--- a/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.client;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.Callable;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.ClientService;
-import org.apache.accumulo.core.client.impl.thrift.ConfigurationType;
-import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
-import org.apache.accumulo.core.client.impl.thrift.TDiskUsage;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.util.TableDiskUsage;
-import org.apache.accumulo.server.zookeeper.TransactionWatcher;
-import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
-import org.apache.accumulo.trace.thrift.TInfo;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-public class ClientServiceHandler implements ClientService.Iface {
-  private static final Logger log = Logger.getLogger(ClientServiceHandler.class);
-  private static SecurityOperation security = AuditedSecurityOperation.getInstance();
-  protected final TransactionWatcher transactionWatcher;
-  private final Instance instance;
-  private final VolumeManager fs;
-  
-  public ClientServiceHandler(Instance instance, TransactionWatcher transactionWatcher, VolumeManager fs) {
-    this.instance = instance;
-    this.transactionWatcher = transactionWatcher;
-    this.fs = fs;
-  }
-  
-  protected String checkTableId(String tableName, TableOperation operation) throws ThriftTableOperationException {
-    String tableId = Tables.getNameToIdMap(instance).get(tableName);
-    if (tableId == null) {
-      // maybe the table exist, but the cache was not updated yet... so try to clear the cache and check again
-      Tables.clearCache(instance);
-      tableId = Tables.getNameToIdMap(instance).get(tableName);
-      if (tableId == null)
-        throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.NOTFOUND, null);
-    }
-    return tableId;
-  }
-  
-  @Override
-  public String getInstanceId() {
-    return instance.getInstanceID();
-  }
-  
-  @Override
-  public String getRootTabletLocation() {
-    return instance.getRootTabletLocation();
-  }
-  
-  @Override
-  public String getZooKeepers() {
-    return instance.getZooKeepers();
-  }
-  
-  @Override
-  public void ping(TCredentials credentials) {
-    // anybody can call this; no authentication check
-    log.info("Master reports: I just got pinged!");
-  }
-  
-  @Override
-  public boolean authenticate(TInfo tinfo, TCredentials credentials) throws ThriftSecurityException {
-    try {
-      return security.authenticateUser(credentials, credentials);
-    } catch (ThriftSecurityException e) {
-      log.error(e);
-      throw e;
-    }
-  }
-  
-  @Override
-  public boolean authenticateUser(TInfo tinfo, TCredentials credentials, TCredentials toAuth) throws ThriftSecurityException {
-    try {
-      return security.authenticateUser(credentials, toAuth);
-    } catch (ThriftSecurityException e) {
-      log.error(e);
-      throw e;
-    }
-  }
-  
-  @Override
-  public void changeAuthorizations(TInfo tinfo, TCredentials credentials, String user, List<ByteBuffer> authorizations) throws ThriftSecurityException {
-    security.changeAuthorizations(credentials, user, new Authorizations(authorizations));
-  }
-  
-  @Override
-  public void changeLocalUserPassword(TInfo tinfo, TCredentials credentials, String principal, ByteBuffer password) throws ThriftSecurityException {
-    PasswordToken token = new PasswordToken(password);
-    Credentials toChange = new Credentials(principal, token);
-    security.changePassword(credentials, toChange);
-  }
-  
-  @Override
-  public void createLocalUser(TInfo tinfo, TCredentials credentials, String principal, ByteBuffer password) throws ThriftSecurityException {
-    PasswordToken token = new PasswordToken(password);
-    Credentials newUser = new Credentials(principal, token);
-    security.createUser(credentials, newUser, new Authorizations());
-  }
-  
-  @Override
-  public void dropLocalUser(TInfo tinfo, TCredentials credentials, String user) throws ThriftSecurityException {
-    security.dropUser(credentials, user);
-  }
-  
-  @Override
-  public List<ByteBuffer> getUserAuthorizations(TInfo tinfo, TCredentials credentials, String user) throws ThriftSecurityException {
-    return security.getUserAuthorizations(credentials, user).getAuthorizationsBB();
-  }
-  
-  @Override
-  public void grantSystemPermission(TInfo tinfo, TCredentials credentials, String user, byte permission) throws ThriftSecurityException {
-    security.grantSystemPermission(credentials, user, SystemPermission.getPermissionById(permission));
-  }
-  
-  @Override
-  public void grantTablePermission(TInfo tinfo, TCredentials credentials, String user, String tableName, byte permission) throws ThriftSecurityException,
-      ThriftTableOperationException {
-    String tableId = checkTableId(tableName, TableOperation.PERMISSION);
-    security.grantTablePermission(credentials, user, tableId, TablePermission.getPermissionById(permission));
-  }
-  
-  @Override
-  public void revokeSystemPermission(TInfo tinfo, TCredentials credentials, String user, byte permission) throws ThriftSecurityException {
-    security.revokeSystemPermission(credentials, user, SystemPermission.getPermissionById(permission));
-  }
-  
-  @Override
-  public void revokeTablePermission(TInfo tinfo, TCredentials credentials, String user, String tableName, byte permission) throws ThriftSecurityException,
-      ThriftTableOperationException {
-    String tableId = checkTableId(tableName, TableOperation.PERMISSION);
-    security.revokeTablePermission(credentials, user, tableId, TablePermission.getPermissionById(permission));
-  }
-  
-  @Override
-  public boolean hasSystemPermission(TInfo tinfo, TCredentials credentials, String user, byte sysPerm) throws ThriftSecurityException {
-    return security.hasSystemPermission(credentials, user, SystemPermission.getPermissionById(sysPerm));
-  }
-  
-  @Override
-  public boolean hasTablePermission(TInfo tinfo, TCredentials credentials, String user, String tableName, byte tblPerm) throws ThriftSecurityException,
-      ThriftTableOperationException {
-    String tableId = checkTableId(tableName, TableOperation.PERMISSION);
-    return security.hasTablePermission(credentials, user, tableId, TablePermission.getPermissionById(tblPerm));
-  }
-  
-  @Override
-  public Set<String> listLocalUsers(TInfo tinfo, TCredentials credentials) throws ThriftSecurityException {
-    return security.listUsers(credentials);
-  }
-  
-  private static Map<String,String> conf(TCredentials credentials, AccumuloConfiguration conf) throws TException {
-    security.authenticateUser(credentials, credentials);
-    conf.invalidateCache();
-    
-    Map<String,String> result = new HashMap<String,String>();
-    for (Entry<String,String> entry : conf) {
-      String key = entry.getKey();
-      if (!Property.isSensitive(key))
-        result.put(key, entry.getValue());
-    }
-    return result;
-  }
-  
-  @Override
-  public Map<String,String> getConfiguration(TInfo tinfo, TCredentials credentials, ConfigurationType type) throws TException {
-    switch (type) {
-      case CURRENT:
-        return conf(credentials, new ServerConfiguration(instance).getConfiguration());
-      case SITE:
-        return conf(credentials, ServerConfiguration.getSiteConfiguration());
-      case DEFAULT:
-        return conf(credentials, AccumuloConfiguration.getDefaultConfiguration());
-    }
-    throw new RuntimeException("Unexpected configuration type " + type);
-  }
-  
-  @Override
-  public Map<String,String> getTableConfiguration(TInfo tinfo, TCredentials credentials, String tableName) throws TException, ThriftTableOperationException {
-    String tableId = checkTableId(tableName, null);
-    return conf(credentials, new ServerConfiguration(instance).getTableConfiguration(tableId));
-  }
-  
-  @Override
-  public List<String> bulkImportFiles(TInfo tinfo, final TCredentials credentials, final long tid, final String tableId, final List<String> files,
-      final String errorDir, final boolean setTime) throws ThriftSecurityException, ThriftTableOperationException, TException {
-    try {
-      if (!security.canPerformSystemActions(credentials))
-        throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-      return transactionWatcher.run(Constants.BULK_ARBITRATOR_TYPE, tid, new Callable<List<String>>() {
-        @Override
-        public List<String> call() throws Exception {
-          return BulkImporter.bulkLoad(new ServerConfiguration(instance).getConfiguration(), instance, new Credentials(credentials.getPrincipal(),
-              AuthenticationTokenSerializer.deserialize(credentials.getTokenClassName(), credentials.getToken())), tid, tableId, files, errorDir, setTime);
-        }
-      });
-    } catch (AccumuloSecurityException e) {
-      throw e.asThriftException();
-    } catch (Exception ex) {
-      throw new TException(ex);
-    }
-  }
-  
-  @Override
-  public boolean isActive(TInfo tinfo, long tid) throws TException {
-    return transactionWatcher.isActive(tid);
-  }
-  
-  @SuppressWarnings({"rawtypes", "unchecked"})
-  @Override
-  public boolean checkClass(TInfo tinfo, TCredentials credentials, String className, String interfaceMatch) throws TException {
-    security.authenticateUser(credentials, credentials);
-    
-    ClassLoader loader = getClass().getClassLoader();
-    Class shouldMatch;
-    try {
-      shouldMatch = loader.loadClass(interfaceMatch);
-      Class test = AccumuloVFSClassLoader.loadClass(className, shouldMatch);
-      test.newInstance();
-      return true;
-    } catch (ClassCastException e) {
-      log.warn("Error checking object types", e);
-      return false;
-    } catch (ClassNotFoundException e) {
-      log.warn("Error checking object types", e);
-      return false;
-    } catch (InstantiationException e) {
-      log.warn("Error checking object types", e);
-      return false;
-    } catch (IllegalAccessException e) {
-      log.warn("Error checking object types", e);
-      return false;
-    }
-  }
-  
-  @Override
-  public boolean checkTableClass(TInfo tinfo, TCredentials credentials, String tableName, String className, String interfaceMatch) throws TException,
-      ThriftTableOperationException, ThriftSecurityException {
-    
-    security.authenticateUser(credentials, credentials);
-    
-    String tableId = checkTableId(tableName, null);
-    
-    ClassLoader loader = getClass().getClassLoader();
-    Class<?> shouldMatch;
-    try {
-      shouldMatch = loader.loadClass(interfaceMatch);
-      
-      new ServerConfiguration(instance).getTableConfiguration(tableId);
-      
-      String context = new ServerConfiguration(instance).getTableConfiguration(tableId).get(Property.TABLE_CLASSPATH);
-      
-      ClassLoader currentLoader;
-      
-      if (context != null && !context.equals("")) {
-        currentLoader = AccumuloVFSClassLoader.getContextManager().getClassLoader(context);
-      } else {
-        currentLoader = AccumuloVFSClassLoader.getClassLoader();
-      }
-      
-      Class<?> test = currentLoader.loadClass(className).asSubclass(shouldMatch);
-      test.newInstance();
-      return true;
-    } catch (Exception e) {
-      log.warn("Error checking object types", e);
-      return false;
-    }
-  }
-  
-  @Override
-  public List<TDiskUsage> getDiskUsage(Set<String> tables, TCredentials credentials) throws ThriftTableOperationException, ThriftSecurityException, TException {
-    try {
-      AuthenticationToken token = AuthenticationTokenSerializer.deserialize(credentials.getTokenClassName(), credentials.getToken());
-      Connector conn = instance.getConnector(credentials.getPrincipal(), token);
-      
-      HashSet<String> tableIds = new HashSet<String>();
-      
-      for (String table : tables) {
-        // ensure that table table exists
-        String tableId = checkTableId(table, null);
-        tableIds.add(tableId);
-        if (!security.canScan(credentials, tableId))
-          throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-      }
-      
-      // use the same set of tableIds that were validated above to avoid race conditions
-      Map<TreeSet<String>,Long> diskUsage = TableDiskUsage.getDiskUsage(new ServerConfiguration(instance).getConfiguration(), tableIds, fs, conn);
-      List<TDiskUsage> retUsages = new ArrayList<TDiskUsage>();
-      for (Map.Entry<TreeSet<String>,Long> usageItem : diskUsage.entrySet()) {
-        retUsages.add(new TDiskUsage(new ArrayList<String>(usageItem.getKey()), usageItem.getValue()));
-      }
-      return retUsages;
-      
-    } catch (AccumuloSecurityException e) {
-      throw e.asThriftException();
-    } catch (AccumuloException e) {
-      throw new TException(e);
-    } catch (IOException e) {
-      throw new TException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
deleted file mode 100644
index 9e6bbe7..0000000
--- a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.client;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.impl.ConnectorImpl;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.StringUtil;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-/**
- * An implementation of Instance that looks in HDFS and ZooKeeper to find the master and root tablet location.
- * 
- */
-public class HdfsZooInstance implements Instance {
-  
-  public static class AccumuloNotInitializedException extends RuntimeException {
-    private static final long serialVersionUID = 1L;
-    
-    public AccumuloNotInitializedException(String string) {
-      super(string);
-    }
-  }
-  
-  private HdfsZooInstance() {
-    AccumuloConfiguration acuConf = ServerConfiguration.getSiteConfiguration();
-    zooCache = new ZooCache(acuConf.get(Property.INSTANCE_ZK_HOST), (int) acuConf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT));
-  }
-  
-  private static HdfsZooInstance cachedHdfsZooInstance = null;
-  
-  public static synchronized Instance getInstance() {
-    if (cachedHdfsZooInstance == null)
-      cachedHdfsZooInstance = new HdfsZooInstance();
-    return cachedHdfsZooInstance;
-  }
-  
-  private static ZooCache zooCache;
-  private static String instanceId = null;
-  private static final Logger log = Logger.getLogger(HdfsZooInstance.class);
-  
-  @Override
-  public String getRootTabletLocation() {
-    String zRootLocPath = ZooUtil.getRoot(this) + RootTable.ZROOT_TABLET_LOCATION;
-    
-    OpTimer opTimer = new OpTimer(log, Level.TRACE).start("Looking up root tablet location in zoocache.");
-    
-    byte[] loc = zooCache.get(zRootLocPath);
-    
-    opTimer.stop("Found root tablet at " + (loc == null ? null : new String(loc)) + " in %DURATION%");
-    
-    if (loc == null) {
-      return null;
-    }
-    
-    return new String(loc).split("\\|")[0];
-  }
-  
-  @Override
-  public List<String> getMasterLocations() {
-    
-    String masterLocPath = ZooUtil.getRoot(this) + Constants.ZMASTER_LOCK;
-    
-    OpTimer opTimer = new OpTimer(log, Level.TRACE).start("Looking up master location in zoocache.");
-    
-    byte[] loc = ZooLock.getLockData(zooCache, masterLocPath, null);
-    
-    opTimer.stop("Found master at " + (loc == null ? null : new String(loc)) + " in %DURATION%");
-    
-    if (loc == null) {
-      return Collections.emptyList();
-    }
-    
-    return Collections.singletonList(new String(loc));
-  }
-  
-  @Override
-  public String getInstanceID() {
-    if (instanceId == null)
-      _getInstanceID();
-    return instanceId;
-  }
-  
-  private static synchronized void _getInstanceID() {
-    if (instanceId == null) {
-      String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
-      instanceId = instanceIdFromFile;
-    }
-  }
-  
-  @Override
-  public String getInstanceName() {
-    return ZooKeeperInstance.lookupInstanceName(zooCache, UUID.fromString(getInstanceID()));
-  }
-  
-  @Override
-  public String getZooKeepers() {
-    return ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST);
-  }
-  
-  @Override
-  public int getZooKeepersSessionTimeOut() {
-    return (int) ServerConfiguration.getSiteConfiguration().getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
-  }
-  
-  @Override
-  public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
-    return new ConnectorImpl(this, new Credentials(principal, token));
-  }
-  
-  @Deprecated
-  @Override
-  public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(user, new PasswordToken(pass));
-  }
-  
-  @Deprecated
-  @Override
-  public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(user, ByteBufferUtil.toBytes(pass));
-  }
-  
-  @Deprecated
-  @Override
-  public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(user, TextUtil.getBytes(new Text(pass.toString())));
-  }
-  
-  private AccumuloConfiguration conf = null;
-  
-  @Override
-  public AccumuloConfiguration getConfiguration() {
-    if (conf == null)
-      conf = new ServerConfiguration(this).getConfiguration();
-    return conf;
-  }
-  
-  @Override
-  public void setConfiguration(AccumuloConfiguration conf) {
-    this.conf = conf;
-  }
-  
-  public static void main(String[] args) {
-    Instance instance = HdfsZooInstance.getInstance();
-    System.out.println("Instance Name: " + instance.getInstanceName());
-    System.out.println("Instance ID: " + instance.getInstanceID());
-    System.out.println("ZooKeepers: " + instance.getZooKeepers());
-    System.out.println("Masters: " + StringUtil.join(instance.getMasterLocations(), ", "));
-  }
-
-  @Override
-  public void close() throws AccumuloException {
-    try {
-      zooCache.close();
-    } catch (InterruptedException e) {
-      throw new AccumuloException("Issues closing ZooKeeper, try again");
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java b/server/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
deleted file mode 100644
index 442294f..0000000
--- a/server/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import org.apache.accumulo.server.client.HdfsZooInstance;
-
-public class ConfigSanityCheck {
-  
-  /**
-   * @param args
-   */
-  public static void main(String[] args) {
-    new ServerConfiguration(HdfsZooInstance.getInstance()).getConfiguration();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/conf/ServerConfiguration.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/ServerConfiguration.java b/server/src/main/java/org/apache/accumulo/server/conf/ServerConfiguration.java
deleted file mode 100644
index b2acd1a..0000000
--- a/server/src/main/java/org/apache/accumulo/server/conf/ServerConfiguration.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import java.security.SecurityPermission;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigSanityCheck;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.accumulo.core.data.KeyExtent;
-
-public class ServerConfiguration {
-  
-  private static final Map<String,TableConfiguration> tableInstances = new HashMap<String,TableConfiguration>(1);
-  private static SecurityPermission CONFIGURATION_PERMISSION = new SecurityPermission("configurationPermission");
-  
-  public static synchronized SiteConfiguration getSiteConfiguration() {
-    checkPermissions();
-    return SiteConfiguration.getInstance(getDefaultConfiguration());
-  }
-  
-  private static void checkPermissions() {
-    SecurityManager sm = System.getSecurityManager();
-    if (sm != null) {
-      sm.checkPermission(CONFIGURATION_PERMISSION);
-    }
-  }
-  
-  private static synchronized ZooConfiguration getZooConfiguration(Instance instance) {
-    checkPermissions();
-    return ZooConfiguration.getInstance(instance, getSiteConfiguration());
-  }
-  
-  public static synchronized DefaultConfiguration getDefaultConfiguration() {
-    checkPermissions();
-    return DefaultConfiguration.getInstance();
-  }
-  
-  public static synchronized AccumuloConfiguration getSystemConfiguration(Instance instance) {
-    return getZooConfiguration(instance);
-  }
-
-  public static TableConfiguration getTableConfiguration(Instance instance, String tableId) {
-    checkPermissions();
-    synchronized (tableInstances) {
-      TableConfiguration conf = tableInstances.get(tableId);
-      if (conf == null) {
-        conf = new TableConfiguration(instance.getInstanceID(), tableId, getSystemConfiguration(instance));
-        ConfigSanityCheck.validate(conf);
-        tableInstances.put(tableId, conf);
-      }
-      return conf;
-    }
-  }
-  
-  static void removeTableIdInstance(String tableId) {
-    synchronized (tableInstances) {
-      tableInstances.remove(tableId);
-    }
-  }
-  
-  static void expireAllTableObservers() {
-    synchronized (tableInstances) {
-      for (Entry<String,TableConfiguration> entry : tableInstances.entrySet()) {
-        entry.getValue().expireAllObservers();
-      }
-    }
-  }
-  
-  private final Instance instance;
-  
-  public ServerConfiguration(Instance instance) {
-    this.instance = instance;
-  }
-  
-  public TableConfiguration getTableConfiguration(String tableId) {
-    return getTableConfiguration(instance, tableId);
-  }
-  
-  public TableConfiguration getTableConfiguration(KeyExtent extent) {
-    return getTableConfiguration(extent.getTableId().toString());
-  }
-
-  public synchronized AccumuloConfiguration getConfiguration() {
-    return getZooConfiguration(instance);
-  }
-  
-  public Instance getInstance() {
-    return instance;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java b/server/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
deleted file mode 100644
index c407309..0000000
--- a/server/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-
-class TableConfWatcher implements Watcher {
-  static {
-    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN);
-    Logger.getLogger("org.apache.hadoop.io.compress").setLevel(Level.WARN);
-  }
-  
-  private static final Logger log = Logger.getLogger(TableConfWatcher.class);
-  private Instance instance = null;
-  
-  TableConfWatcher(Instance instance) {
-    this.instance = instance;
-  }
-  
-  @Override
-  public void process(WatchedEvent event) {
-    String path = event.getPath();
-    if (log.isTraceEnabled())
-      log.trace("WatchEvent : " + path + " " + event.getState() + " " + event.getType());
-    
-    String tablesPrefix = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/";
-    
-    String tableId = null;
-    String key = null;
-    
-    if (path != null) {
-      if (path.startsWith(tablesPrefix)) {
-        tableId = path.substring(tablesPrefix.length());
-        if (tableId.contains("/")) {
-          tableId = tableId.substring(0, tableId.indexOf('/'));
-          if (path.startsWith(tablesPrefix + tableId + Constants.ZTABLE_CONF + "/"))
-            key = path.substring((tablesPrefix + tableId + Constants.ZTABLE_CONF + "/").length());
-        }
-      }
-      
-      if (tableId == null) {
-        log.warn("Zookeeper told me about a path I was not watching " + path + " state=" + event.getState() + " type=" + event.getType());
-        return;
-      }
-    }
-    
-    switch (event.getType()) {
-      case NodeDataChanged:
-        if (log.isTraceEnabled())
-          log.trace("EventNodeDataChanged " + event.getPath());
-        if (key != null)
-          ServerConfiguration.getTableConfiguration(instance, tableId).propertyChanged(key);
-        break;
-      case NodeChildrenChanged:
-        ServerConfiguration.getTableConfiguration(instance, tableId).propertiesChanged(key);
-        break;
-      case NodeDeleted:
-        if (key == null) {
-          // only remove the AccumuloConfiguration object when a
-          // table node is deleted, not when a tables property is
-          // deleted.
-          ServerConfiguration.removeTableIdInstance(tableId);
-        }
-        break;
-      case None:
-        switch (event.getState()) {
-          case Expired:
-            ServerConfiguration.expireAllTableObservers();
-            break;
-          case SyncConnected:
-            break;
-          case Disconnected:
-            break;
-          default:
-            log.warn("EventNone event not handled path = " + event.getPath() + " state=" + event.getState());
-        }
-        break;
-      case NodeCreated:
-        switch (event.getState()) {
-          case SyncConnected:
-            break;
-          default:
-            log.warn("Event NodeCreated event not handled path = " + event.getPath() + " state=" + event.getState());
-        }
-        break;
-      default:
-        log.warn("Event not handled path = " + event.getPath() + " state=" + event.getState() + " type = " + event.getType());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java b/server/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
deleted file mode 100644
index 4c58153..0000000
--- a/server/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationObserver;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.log4j.Logger;
-
-public class TableConfiguration extends AccumuloConfiguration {
-  private static final Logger log = Logger.getLogger(TableConfiguration.class);
-  
-  private static ZooCache tablePropCache = null;
-  private final String instanceId;
-  private final AccumuloConfiguration parent;
-  
-  private String table = null;
-  private Set<ConfigurationObserver> observers;
-  
-  public TableConfiguration(String instanceId, String table, AccumuloConfiguration parent) {
-    this.instanceId = instanceId;
-    this.table = table;
-    this.parent = parent;
-    
-    this.observers = Collections.synchronizedSet(new HashSet<ConfigurationObserver>());
-  }
-  
-  private static ZooCache getTablePropCache() {
-    Instance inst = HdfsZooInstance.getInstance();
-    if (tablePropCache == null)
-      synchronized (TableConfiguration.class) {
-        if (tablePropCache == null)
-          tablePropCache = new ZooCache(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut(), new TableConfWatcher(inst));
-      }
-    return tablePropCache;
-  }
-  
-  public void addObserver(ConfigurationObserver co) {
-    if (table == null) {
-      String err = "Attempt to add observer for non-table configuration";
-      log.error(err);
-      throw new RuntimeException(err);
-    }
-    iterator();
-    observers.add(co);
-  }
-  
-  public void removeObserver(ConfigurationObserver configObserver) {
-    if (table == null) {
-      String err = "Attempt to remove observer for non-table configuration";
-      log.error(err);
-      throw new RuntimeException(err);
-    }
-    observers.remove(configObserver);
-  }
-  
-  public void expireAllObservers() {
-    Collection<ConfigurationObserver> copy = Collections.unmodifiableCollection(observers);
-    for (ConfigurationObserver co : copy)
-      co.sessionExpired();
-  }
-  
-  public void propertyChanged(String key) {
-    Collection<ConfigurationObserver> copy = Collections.unmodifiableCollection(observers);
-    for (ConfigurationObserver co : copy)
-      co.propertyChanged(key);
-  }
-  
-  public void propertiesChanged(String key) {
-    Collection<ConfigurationObserver> copy = Collections.unmodifiableCollection(observers);
-    for (ConfigurationObserver co : copy)
-      co.propertiesChanged();
-  }
-  
-  @Override
-  public String get(Property property) {
-    String key = property.getKey();
-    String value = get(key);
-    
-    if (value == null || !property.getType().isValidFormat(value)) {
-      if (value != null)
-        log.error("Using default value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
-      value = parent.get(property);
-    }
-    return value;
-  }
-  
-  private String get(String key) {
-    String zPath = ZooUtil.getRoot(instanceId) + Constants.ZTABLES + "/" + table + Constants.ZTABLE_CONF + "/" + key;
-    byte[] v = getTablePropCache().get(zPath);
-    String value = null;
-    if (v != null)
-      value = new String(v);
-    return value;
-  }
-  
-  @Override
-  public Iterator<Entry<String,String>> iterator() {
-    TreeMap<String,String> entries = new TreeMap<String,String>();
-    
-    for (Entry<String,String> parentEntry : parent)
-      entries.put(parentEntry.getKey(), parentEntry.getValue());
-    
-    List<String> children = getTablePropCache().getChildren(ZooUtil.getRoot(instanceId) + Constants.ZTABLES + "/" + table + Constants.ZTABLE_CONF);
-    if (children != null) {
-      for (String child : children) {
-        String value = get(child);
-        if (child != null && value != null)
-          entries.put(child, value);
-      }
-    }
-    
-    return entries.entrySet().iterator();
-  }
-  
-  public String getTableId() {
-    return table;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java b/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
deleted file mode 100644
index 18381c7..0000000
--- a/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance.AccumuloNotInitializedException;
-import org.apache.log4j.Logger;
-
-public class ZooConfiguration extends AccumuloConfiguration {
-  private static final Logger log = Logger.getLogger(ZooConfiguration.class);
-  
-  private final AccumuloConfiguration parent;
-  private static ZooConfiguration instance = null;
-  private static String instanceId = null;
-  private static ZooCache propCache = null;
-  private final Map<String,String> fixedProps = Collections.synchronizedMap(new HashMap<String,String>());
-  
-  private ZooConfiguration(AccumuloConfiguration parent) {
-    this.parent = parent;
-  }
-  
-  synchronized public static ZooConfiguration getInstance(Instance inst, AccumuloConfiguration parent) {
-    if (instance == null) {
-      propCache = new ZooCache(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
-      instance = new ZooConfiguration(parent);
-      instanceId = inst.getInstanceID();
-    }
-    return instance;
-  }
-  
-  synchronized public static ZooConfiguration getInstance(AccumuloConfiguration parent) {
-    if (instance == null) {
-      propCache = new ZooCache(parent.get(Property.INSTANCE_ZK_HOST), (int) parent.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT));
-      instance = new ZooConfiguration(parent);
-      String deprecatedInstanceIdFromHdfs = ZooUtil.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
-      instanceId = deprecatedInstanceIdFromHdfs;
-    }
-    return instance;
-  }
-  
-  @Override
-  public void invalidateCache() {
-    if (propCache != null)
-      propCache.clear();
-  }
-  
-  private String _get(Property property) {
-    String key = property.getKey();
-    String value = null;
-    
-    if (Property.isValidZooPropertyKey(key)) {
-      try {
-        value = get(key);
-      } catch (AccumuloNotInitializedException e) {
-        log.warn("failed to lookup property in zookeeper: " + key, e);
-      }
-    }
-    
-    if (value == null || !property.getType().isValidFormat(value)) {
-      if (value != null)
-        log.error("Using parent value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
-      value = parent.get(property);
-    }
-    return value;
-  }
-  
-  @Override
-  public String get(Property property) {
-    if (Property.isFixedZooPropertyKey(property)) {
-      if (fixedProps.containsKey(property.getKey())) {
-        return fixedProps.get(property.getKey());
-      } else {
-        synchronized (fixedProps) {
-          String val = _get(property);
-          fixedProps.put(property.getKey(), val);
-          return val;
-        }
-        
-      }
-    } else {
-      return _get(property);
-    }
-  }
-  
-  private String get(String key) {
-    String zPath = ZooUtil.getRoot(instanceId) + Constants.ZCONFIG + "/" + key;
-    byte[] v = propCache.get(zPath);
-    String value = null;
-    if (v != null)
-      value = new String(v);
-    return value;
-  }
-  
-  @Override
-  public Iterator<Entry<String,String>> iterator() {
-    TreeMap<String,String> entries = new TreeMap<String,String>();
-    
-    for (Entry<String,String> parentEntry : parent)
-      entries.put(parentEntry.getKey(), parentEntry.getValue());
-    
-    List<String> children = propCache.getChildren(ZooUtil.getRoot(instanceId) + Constants.ZCONFIG);
-    if (children != null) {
-      for (String child : children) {
-        String value = get(child);
-        if (child != null && value != null)
-          entries.put(child, value);
-      }
-    }
-    
-    /*
-     * //this code breaks the shells ability to show updates just made //the code is probably not needed as fixed props are only obtained through get
-     * 
-     * for(Property prop : Property.getFixedProperties()) get(prop);
-     * 
-     * for(Entry<String, String> fprop : fixedProps.entrySet()) entries.put(fprop.getKey(), fprop.getValue());
-     */
-    
-    return entries.entrySet().iterator();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/constraints/ConstraintChecker.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/ConstraintChecker.java b/server/src/main/java/org/apache/accumulo/server/constraints/ConstraintChecker.java
deleted file mode 100644
index 6cbdc19..0000000
--- a/server/src/main/java/org/apache/accumulo/server/constraints/ConstraintChecker.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.constraints;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.constraints.Constraint.Environment;
-import org.apache.accumulo.core.constraints.Violations;
-import org.apache.accumulo.core.data.ComparableBytes;
-import org.apache.accumulo.core.data.ConstraintViolationSummary;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
-import org.apache.log4j.Logger;
-
-public class ConstraintChecker {
-  
-  private ArrayList<Constraint> constrains;
-  private static final Logger log = Logger.getLogger(ConstraintChecker.class);
-  
-  private ClassLoader loader;
-  private TableConfiguration conf;
-
-  private AtomicLong lastCheck = new AtomicLong(0);
-
-  public ConstraintChecker(TableConfiguration conf) {
-    constrains = new ArrayList<Constraint>();
-    
-    this.conf = conf;
-
-    try {
-      String context = conf.get(Property.TABLE_CLASSPATH);
-
-      if (context != null && !context.equals("")) {
-        loader = AccumuloVFSClassLoader.getContextManager().getClassLoader(context);
-      } else {
-        loader = AccumuloVFSClassLoader.getClassLoader();
-      }
-      
-      for (Entry<String,String> entry : conf) {
-        if (entry.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())) {
-          String className = entry.getValue();
-          Class<? extends Constraint> clazz = loader.loadClass(className).asSubclass(Constraint.class);
-          log.debug("Loaded constraint " + clazz.getName() + " for " + conf.getTableId());
-          constrains.add(clazz.newInstance());
-        }
-      }
-      
-      lastCheck.set(System.currentTimeMillis());
-
-    } catch (Throwable e) {
-      constrains.clear();
-      loader = null;
-      constrains.add(new UnsatisfiableConstraint((short) -1, "Failed to load constraints, not accepting mutations."));
-      log.error("Failed to load constraints " + conf.getTableId() + " " + e.toString(), e);
-    }
-  }
-  
-  public boolean classLoaderChanged() {
-    
-    if (constrains.size() == 0)
-      return false;
-
-    try {
-      String context = conf.get(Property.TABLE_CLASSPATH);
-      
-      ClassLoader currentLoader;
-      
-      if (context != null && !context.equals("")) {
-        currentLoader = AccumuloVFSClassLoader.getContextManager().getClassLoader(context);
-      } else {
-        currentLoader = AccumuloVFSClassLoader.getClassLoader();
-      }
-      
-      return currentLoader != loader;
-    } catch (Exception e) {
-      log.debug("Failed to check " + e.getMessage());
-      return true;
-    }
-  }
-
-  public Violations check(Environment env, Mutation m) {
-    
-    Violations violations = null;
-    
-    if (!env.getExtent().contains(new ComparableBytes(m.getRow()))) {
-      violations = new Violations();
-      
-      ConstraintViolationSummary cvs = new ConstraintViolationSummary(SystemConstraint.class.getName(), (short) -1, "Mutation outside of tablet extent", 1);
-      violations.add(cvs);
-      
-      // do not bother with further checks since this mutation does not go with this tablet
-      return violations;
-    }
-    
-    for (Constraint constraint : constrains) {
-      List<Short> violationCodes = null;
-      Throwable throwable = null;
-      
-      try {
-        violationCodes = constraint.check(env, m);
-      } catch (Throwable t) {
-        throwable = t;
-        log.warn("CONSTRAINT FAILED : " + throwable.getMessage(), t);
-      }
-      
-      if (violationCodes != null) {
-        for (Short vcode : violationCodes) {
-          ConstraintViolationSummary cvs = new ConstraintViolationSummary(constraint.getClass().getName(), vcode, constraint.getViolationDescription(vcode), 1);
-          if (violations == null)
-            violations = new Violations();
-          violations.add(cvs);
-        }
-      } else if (throwable != null) {
-        // constraint failed in some way, do not allow mutation to pass
-        
-        short vcode;
-        String msg;
-        
-        if (throwable instanceof NullPointerException) {
-          vcode = -1;
-          msg = "threw NullPointerException";
-        } else if (throwable instanceof ArrayIndexOutOfBoundsException) {
-          vcode = -2;
-          msg = "threw ArrayIndexOutOfBoundsException";
-        } else if (throwable instanceof NumberFormatException) {
-          vcode = -3;
-          msg = "threw NumberFormatException";
-        } else if (throwable instanceof IOException) {
-          vcode = -4;
-          msg = "threw IOException (or subclass of)";
-        } else {
-          vcode = -100;
-          msg = "threw some Exception";
-        }
-        
-        ConstraintViolationSummary cvs = new ConstraintViolationSummary(constraint.getClass().getName(), vcode, "CONSTRAINT FAILED : " + msg, 1);
-        if (violations == null)
-          violations = new Violations();
-        violations.add(cvs);
-      }
-    }
-    
-    return violations;
-  }
-}


Mime
View raw message