accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1310457 [3/3] - in /accumulo/trunk: core/src/main/java/org/apache/accumulo/core/client/impl/ core/src/main/java/org/apache/accumulo/core/conf/ core/src/main/java/org/apache/accumulo/core/data/ server/src/main/java/org/apache/accumulo/serve...
Date Fri, 06 Apr 2012 16:35:31 GMT
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/functional/PermissionsTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/functional/PermissionsTest.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/functional/PermissionsTest.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/functional/PermissionsTest.java
Fri Apr  6 16:35:29 2012
@@ -31,6 +31,7 @@ import org.apache.accumulo.core.client.A
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
@@ -140,7 +141,7 @@ public class PermissionsTest {
             throw new IllegalStateException("Should NOT be able to set a table property");
           } catch (AccumuloSecurityException e) {
             if (e.getErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || ServerConfiguration.getTableConfiguration(tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
+                || ServerConfiguration.getTableConfiguration(root_conn.getInstance(), tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
               throw e;
           }
           root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(),
"003.14159%");
@@ -149,7 +150,8 @@ public class PermissionsTest {
             throw new IllegalStateException("Should NOT be able to remove a table property");
           } catch (AccumuloSecurityException e) {
             if (e.getErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || !ServerConfiguration.getTableConfiguration(tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
+                || !ServerConfiguration.getTableConfiguration(root_conn.getInstance(), tableId).get(Property.TABLE_BLOOM_ERRORRATE)
+                    .equals("003.14159%"))
               throw e;
           }
           String table2 = tableName + "2";
@@ -227,11 +229,12 @@ public class PermissionsTest {
           String table2 = tableName + "2";
           root_conn.tableOperations().create(tableName);
           tableId = Tables.getNameToIdMap(root_conn.getInstance()).get(tableName);
+          Instance instance = root_conn.getInstance();
           test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(),
"003.14159%");
-          if (!ServerConfiguration.getTableConfiguration(tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
+          if (!ServerConfiguration.getTableConfiguration(instance, tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
             throw new IllegalStateException("Should be able to set a table property");
           test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
-          if (ServerConfiguration.getTableConfiguration(tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
+          if (ServerConfiguration.getTableConfiguration(instance, tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
             throw new IllegalStateException("Should be able to remove a table property");
           test_user_conn.tableOperations().rename(tableName, table2);
           if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/scan/CollectTabletStats.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/scan/CollectTabletStats.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/scan/CollectTabletStats.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/scan/CollectTabletStats.java
Fri Apr  6 16:35:29 2012
@@ -65,10 +65,12 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Stat;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
@@ -108,7 +110,8 @@ public class CollectTabletStats {
           + " [-i <iterations>] [-t <num threads>] [-l|-f] [-c <column fams>]
<instance> <zookeepers> <user> <pass> <table> <auths>
<batch size>");
       return;
     }
-    
+    final FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+
     String instance = processedArgs[0];
     String zookeepers = processedArgs[1];
     String user = processedArgs[2];
@@ -116,8 +119,8 @@ public class CollectTabletStats {
     final String tableName = processedArgs[4];
     final String auths[] = processedArgs[5].split(",");
     final int batchSize = Integer.parseInt(processedArgs[6]);
-    
     ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
+    final ServerConfiguration sconf = new ServerConfiguration(zki);
     
     String tableId = Tables.getNameToIdMap(zki).get(tableName);
     
@@ -163,7 +166,7 @@ public class CollectTabletStats {
         final List<String> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
           public int runTest() throws Exception {
-            return readFiles(files, ke, columns);
+            return readFiles(fs, sconf.getConfiguration(), files, ke, columns);
           }
           
         };
@@ -182,7 +185,7 @@ public class CollectTabletStats {
         final List<String> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
           public int runTest() throws Exception {
-            return readFilesUsingIterStack(files, auths, ke, columns, false);
+            return readFilesUsingIterStack(fs, sconf, files, auths, ke, columns, false);
           }
         };
         
@@ -199,7 +202,7 @@ public class CollectTabletStats {
         final List<String> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
           public int runTest() throws Exception {
-            return readFilesUsingIterStack(files, auths, ke, columns, true);
+            return readFilesUsingIterStack(fs, sconf, files, auths, ke, columns, true);
           }
         };
         
@@ -426,7 +429,7 @@ public class CollectTabletStats {
   
   private static SortedKeyValueIterator<Key,Value> createScanIterator(KeyExtent ke,
Collection<SortedKeyValueIterator<Key,Value>> mapfiles,
       Authorizations authorizations, byte[] defaultLabels, HashSet<Column> columnSet,
List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      boolean useTableIterators) throws IOException {
+      boolean useTableIterators, TableConfiguration conf) throws IOException {
     
     SortedMapIterator smi = new SortedMapIterator(new TreeMap<Key,Value>());
     
@@ -441,23 +444,18 @@ public class CollectTabletStats {
     VisibilityFilter visFilter = new VisibilityFilter(colFilter, authorizations, defaultLabels);
     
     if (useTableIterators)
-      return IteratorUtil.loadIterators(IteratorScope.scan, visFilter, ke, ServerConfiguration.getTableConfiguration(ke.getTableId().toString()),
ssiList,
-          ssio, null);
+      return IteratorUtil.loadIterators(IteratorScope.scan, visFilter, ke, conf, ssiList,
ssio, null);
     return visFilter;
   }
   
-  private static int readFiles(List<String> files, KeyExtent ke, String[] columns)
throws Exception {
-    
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.get(conf);
-    AccumuloConfiguration acuconf = ServerConfiguration.getSystemConfiguration();
+  private static int readFiles(FileSystem fs, AccumuloConfiguration aconf, List<String>
files, KeyExtent ke, String[] columns) throws Exception {
     
     int count = 0;
     
     HashSet<ByteSequence> columnSet = createColumnBSS(columns);
     
     for (String file : files) {
-      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf,
acuconf);
+      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, fs.getConf(),
aconf);
       Range range = new Range(ke.getPrevEndRow(), false, ke.getEndRow(), true);
       reader.seek(range, columnSet, columnSet.size() == 0 ? false : true);
       while (reader.hasTop() && !range.afterEndKey(reader.getTopKey())) {
@@ -478,22 +476,22 @@ public class CollectTabletStats {
     return columnSet;
   }
   
-  private static int readFilesUsingIterStack(List<String> files, String auths[], KeyExtent
ke, String[] columns, boolean useTableIterators) throws Exception {
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.get(conf);
-    AccumuloConfiguration acuconf = ServerConfiguration.getSystemConfiguration();
+  private static int readFilesUsingIterStack(FileSystem fs, ServerConfiguration aconf, List<String>
files, String auths[], KeyExtent ke, String[] columns,
+      boolean useTableIterators)
+      throws Exception {
     
     SortedKeyValueIterator<Key,Value> reader;
     
     List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<SortedKeyValueIterator<Key,Value>>(files.size());
     
     for (String file : files) {
-      readers.add(FileOperations.getInstance().openReader(file, false, fs, conf, acuconf));
+      readers.add(FileOperations.getInstance().openReader(file, false, fs, fs.getConf(),
aconf.getConfiguration()));
     }
     
     List<IterInfo> emptyIterinfo = Collections.emptyList();
     Map<String,Map<String,String>> emptySsio = Collections.emptyMap();
-    reader = createScanIterator(ke, readers, new Authorizations(auths), new byte[] {}, new
HashSet<Column>(), emptyIterinfo, emptySsio, useTableIterators);
+    TableConfiguration tconf = aconf.getTableConfiguration(ke.getTableId().toString());
+    reader = createScanIterator(ke, readers, new Authorizations(auths), new byte[] {}, new
HashSet<Column>(), emptyIterinfo, emptySsio, useTableIterators, tconf);
     
     HashSet<ByteSequence> columnSet = createColumnBSS(columns);
     

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/thrift/NullTserver.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/thrift/NullTserver.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/thrift/NullTserver.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/performance/thrift/NullTserver.java
Fri Apr  6 16:35:29 2012
@@ -26,6 +26,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.accumulo.cloudtrace.thrift.TInfo;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -50,6 +51,7 @@ import org.apache.accumulo.core.tabletse
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.client.ClientServiceHandler;
+import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.Assignment;
 import org.apache.accumulo.server.master.state.MetaDataStateStore;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
@@ -74,8 +76,8 @@ public class NullTserver {
     
     private long updateSession = 1;
     
-    ThriftClientHandler(TransactionWatcher watcher) {
-      super(watcher);
+    ThriftClientHandler(Instance instance, TransactionWatcher watcher) {
+      super(instance, watcher);
     }
     
     @Override
@@ -196,7 +198,7 @@ public class NullTserver {
     int port = Integer.parseInt(args[3]);
     
     TransactionWatcher watcher = new TransactionWatcher();
-    ThriftClientHandler tch = new ThriftClientHandler(watcher);
+    ThriftClientHandler tch = new ThriftClientHandler(HdfsZooInstance.getInstance(), watcher);
     TabletClientService.Processor processor = new TabletClientService.Processor(tch);
     TServerUtils.startTServer(port, processor, "NullTServer", "null tserver", 2, 1000);
     

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
Fri Apr  6 16:35:29 2012
@@ -66,8 +66,7 @@ import org.apache.zookeeper.Watcher.Even
 public class TraceServer implements Watcher {
   
   final private static Logger log = Logger.getLogger(TraceServer.class);
-  final private AccumuloConfiguration conf;
-  final private Instance instance;
+  final private ServerConfiguration serverConfiguration;
   final private TServer server;
   private BatchWriter writer = null;
   private Connector connector;
@@ -148,13 +147,13 @@ public class TraceServer implements Watc
     
   }
   
-  public TraceServer(Instance instance, String hostname) throws Exception {
-    this.instance = instance;
-    conf = ServerConfiguration.getSystemConfiguration();
+  public TraceServer(ServerConfiguration serverConfiguration, String hostname) throws Exception
{
+    this.serverConfiguration = serverConfiguration;
+    AccumuloConfiguration conf = serverConfiguration.getConfiguration();
     table = conf.get(Property.TRACE_TABLE);
     while (true) {
       try {
-        connector = instance.getConnector(conf.get(Property.TRACE_USER), conf.get(Property.TRACE_PASSWORD).getBytes());
+        connector = serverConfiguration.getInstance().getConnector(conf.get(Property.TRACE_USER),
conf.get(Property.TRACE_PASSWORD).getBytes());
         if (!connector.tableOperations().exists(table)) {
           connector.tableOperations().create(table);
         }
@@ -217,18 +216,19 @@ public class TraceServer implements Watc
   
 
   private void registerInZooKeeper(String name) throws Exception {
-    String root = ZooUtil.getRoot(instance) + Constants.ZTRACERS;
+    String root = ZooUtil.getRoot(serverConfiguration.getInstance()) + Constants.ZTRACERS;
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     String path = zoo.putEphemeralSequential(root + "/trace-", name.getBytes());
     zoo.exists(path, this);
   }
   
   public static void main(String[] args) throws Exception {
-    FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), ServerConfiguration.getSiteConfiguration());
-    Accumulo.init(fs, "tracer");
-    String hostname = Accumulo.getLocalAddress(args);
     Instance instance = HdfsZooInstance.getInstance();
-    TraceServer server = new TraceServer(instance, hostname);
+    ServerConfiguration conf = new ServerConfiguration(instance);
+    FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), conf.getConfiguration());
+    Accumulo.init(fs, conf, "tracer");
+    String hostname = Accumulo.getLocalAddress(args);
+    TraceServer server = new TraceServer(conf, hostname);
     Accumulo.enableTracing(hostname, "tserver");
     server.run();
     log.info("tracer stopping");

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
Fri Apr  6 16:35:29 2012
@@ -21,11 +21,12 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.Writer;
@@ -35,8 +36,11 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
+import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -102,15 +106,15 @@ public class CheckForMetadataProblems {
       sawProblems = true;
   }
   
-  public static void checkMetadataTableEntries(boolean offline, boolean patch) throws Exception
{
+  public static void checkMetadataTableEntries(ServerConfiguration conf, FileSystem fs, boolean
offline, boolean patch) throws Exception {
     Map<String,TreeSet<KeyExtent>> tables = new HashMap<String,TreeSet<KeyExtent>>();
     
     Scanner scanner;
     
     if (offline) {
-      scanner = new OfflineMetadataScanner();
+      scanner = new OfflineMetadataScanner(conf.getConfiguration(), fs);
     } else {
-      scanner = new ScannerImpl(HdfsZooInstance.getInstance(), new AuthInfo(user, ByteBuffer.wrap(pass),
HdfsZooInstance.getInstance().getInstanceID()),
+      scanner = new ScannerImpl(conf.getInstance(), new AuthInfo(user, ByteBuffer.wrap(pass),
conf.getInstance().getInstanceID()),
           Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
     }
     
@@ -215,12 +219,16 @@ public class CheckForMetadataProblems {
   public static void main(String[] args) throws Exception {
     args = processOptions(args);
     
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    Instance instance = HdfsZooInstance.getInstance();
+    ServerConfiguration conf = new ServerConfiguration(instance);
+
     if (args.length == 2) {
       user = args[0];
       pass = args[1].getBytes();
-      checkMetadataTableEntries(offline, fix);
+      checkMetadataTableEntries(conf, fs, offline, fix);
     } else if (args.length == 0 && offline) {
-      checkMetadataTableEntries(offline, fix);
+      checkMetadataTableEntries(conf, fs, offline, fix);
     } else {
       System.out.println("Usage: " + CheckForMetadataProblems.class.getName() + " (--offline)|([--debug]
[--fix] <username> <password>)");
       System.exit(-1);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/DumpTabletsOnServer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/DumpTabletsOnServer.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/DumpTabletsOnServer.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/DumpTabletsOnServer.java
Fri Apr  6 16:35:29 2012
@@ -46,12 +46,13 @@ public class DumpTabletsOnServer {
       System.exit(-1);
     }
     Instance instance = HdfsZooInstance.getInstance();
+    ServerConfiguration conf = new ServerConfiguration(instance);
     String tableId = Tables.getTableId(instance, args[1]);
     if (tableId == null) {
       System.err.println("Cannot find table " + args[1] + " in zookeeper");
       System.exit(-1);
     }
-    List<TabletStats> onlineTabletsForTable = ThriftUtil.getTServerClient(args[0],
ServerConfiguration.getSystemConfiguration()).getTabletStats(null,
+    List<TabletStats> onlineTabletsForTable = ThriftUtil.getTServerClient(args[0],
conf.getConfiguration()).getTabletStats(null,
         SecurityConstants.getSystemCredentials(), tableId);
     for (TabletStats stats : onlineTabletsForTable) {
       print("%s", stats.extent);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/FileSystemMonitor.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/FileSystemMonitor.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/FileSystemMonitor.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/FileSystemMonitor.java
Fri Apr  6 16:35:29 2012
@@ -30,8 +30,8 @@ import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
@@ -134,8 +134,8 @@ public class FileSystemMonitor {
     }
   }
   
-  public static void start(Property prop) {
-    if (ServerConfiguration.getSystemConfiguration().getBoolean(prop)) {
+  public static void start(AccumuloConfiguration conf, Property prop) {
+    if (conf.getBoolean(prop)) {
       if (new File(PROC_MOUNTS).exists()) {
         try {
           new FileSystemMonitor(PROC_MOUNTS, 60000);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
Fri Apr  6 16:35:29 2012
@@ -56,7 +56,7 @@ public class ListInstances {
     if (args.length == 1) {
       zooKeepers = args[0];
     } else {
-      zooKeepers = ServerConfiguration.getSystemConfiguration().get(Property.INSTANCE_ZK_HOST);
+      zooKeepers = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST);
     }
     
     System.out.println("INFO : Using ZooKeepers " + zooKeepers);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
Fri Apr  6 16:35:29 2012
@@ -51,6 +51,7 @@ import org.apache.accumulo.core.client.i
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.ThriftScanner;
 import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Column;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -231,7 +232,7 @@ public class MetadataTable extends org.a
   }
   
   public static void updateTabletFlushID(KeyExtent extent, long flushID, AuthInfo credentials,
ZooLock zooLock) {
-    if (!extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       ColumnFQ.put(m, Constants.METADATA_FLUSH_COLUMN, new Value((flushID + "").getBytes()));
       update(credentials, zooLock, m);
@@ -239,7 +240,7 @@ public class MetadataTable extends org.a
   }
   
   public static void updateTabletCompactID(KeyExtent extent, long compactID, AuthInfo credentials,
ZooLock zooLock) {
-    if (!extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       ColumnFQ.put(m, Constants.METADATA_COMPACT_COLUMN, new Value((compactID + "").getBytes()));
       update(credentials, zooLock, m);
@@ -322,12 +323,13 @@ public class MetadataTable extends org.a
     return results;
   }
   
-  public static boolean getBatchFromRootTablet(AuthInfo credentials, Text startRow, SortedMap<Key,Value>
results, SortedSet<Column> columns,
+  public static boolean getBatchFromRootTablet(AccumuloConfiguration conf, AuthInfo credentials,
Text startRow, SortedMap<Key,Value> results,
+      SortedSet<Column> columns,
       boolean skipStartRow, int size) throws AccumuloSecurityException {
     while (true) {
       try {
         return ThriftScanner.getBatchFromServer(credentials, startRow, Constants.ROOT_TABLET_EXTENT,
HdfsZooInstance.getInstance().getRootTabletLocation(),
-            results, columns, skipStartRow, size, Constants.NO_AUTHS, true, ServerConfiguration.getSystemConfiguration());
+            results, columns, skipStartRow, size, Constants.NO_AUTHS, true, conf);
       } catch (NotServingTabletException e) {
         UtilWaitThread.sleep(100);
       } catch (AccumuloException e) {
@@ -341,24 +343,24 @@ public class MetadataTable extends org.a
    * convenience method for reading a metadata tablet's data file entries from the root tablet
    * 
    */
-  public static SortedMap<Key,Value> getRootMetadataDataFileEntries(KeyExtent extent,
AuthInfo credentials) {
+  public static SortedMap<Key,Value> getRootMetadataDataFileEntries(AccumuloConfiguration
conf, KeyExtent extent, AuthInfo credentials) {
     SortedSet<Column> columns = new TreeSet<Column>();
     columns.add(new Column(TextUtil.getBytes(Constants.METADATA_DATAFILE_COLUMN_FAMILY),
null, null));
-    return getRootMetadataDataEntries(extent, columns, credentials);
+    return getRootMetadataDataEntries(conf, extent, columns, credentials);
   }
   
-  public static SortedMap<Key,Value> getRootMetadataDataEntries(KeyExtent extent, SortedSet<Column>
columns, AuthInfo credentials) {
+  public static SortedMap<Key,Value> getRootMetadataDataEntries(AccumuloConfiguration
conf, KeyExtent extent, SortedSet<Column> columns, AuthInfo credentials) {
     
     try {
       SortedMap<Key,Value> entries = new TreeMap<Key,Value>();
       
       Text metadataEntry = extent.getMetadataEntry();
       Text startRow;
-      boolean more = getBatchFromRootTablet(credentials, metadataEntry, entries, columns,
false, Constants.SCAN_BATCH_SIZE);
+      boolean more = getBatchFromRootTablet(conf, credentials, metadataEntry, entries, columns,
false, Constants.SCAN_BATCH_SIZE);
       
       while (more) {
         startRow = entries.lastKey().getRow(); // set end row
-        more = getBatchFromRootTablet(credentials, startRow, entries, columns, false, Constants.SCAN_BATCH_SIZE);
+        more = getBatchFromRootTablet(conf, credentials, startRow, entries, columns, false,
Constants.SCAN_BATCH_SIZE);
       }
       
       Iterator<Key> iter = entries.keySet().iterator();
@@ -813,7 +815,7 @@ public class MetadataTable extends org.a
       return;
     // entries should be a complete log set, so we should only need to write the first entry
     LogEntry entry = entries.get(0);
-    if (entry.extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (entry.extent.isRootTablet()) {
       String root = getZookeeperLogLocation();
       while (true) {
         try {
@@ -856,7 +858,7 @@ public class MetadataTable extends org.a
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
     TreeMap<String,DataFileValue> sizes = new TreeMap<String,DataFileValue>();
     
-    if (extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (extent.isRootTablet()) {
       getRootLogEntries(result);
       FileSystem fs = TraceFileSystem.wrap(FileUtil.getFileSystem(CachedConfiguration.getInstance(),
ServerConfiguration.getSiteConfiguration()));
       FileStatus[] files = fs.listStatus(new Path(ServerConstants.getRootTabletDir()));
@@ -992,7 +994,7 @@ public class MetadataTable extends org.a
   
   public static void removeUnusedWALEntries(KeyExtent extent, List<LogEntry> logEntries,
ZooLock zooLock) {
     for (LogEntry entry : logEntries) {
-      if (entry.extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+      if (entry.extent.isRootTablet()) {
         String root = getZookeeperLogLocation();
         while (true) {
           try {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
Fri Apr  6 16:35:29 2012
@@ -31,6 +31,7 @@ import java.util.Set;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.ScannerOptions;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Column;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
@@ -47,9 +48,9 @@ import org.apache.accumulo.core.util.Cac
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.util.MetadataTable.LogEntry;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -58,11 +59,13 @@ public class OfflineMetadataScanner exte
   
   private Set<String> allFiles = new HashSet<String>();
   private Range range = new Range();
+  private final FileSystem fs;
+  private final AccumuloConfiguration conf;
   
-  private List<SortedKeyValueIterator<Key,Value>> openMapFiles(Collection<String>
files, FileSystem fs, Configuration conf) throws IOException {
+  private List<SortedKeyValueIterator<Key,Value>> openMapFiles(Collection<String>
files, FileSystem fs, AccumuloConfiguration conf) throws IOException {
     List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
     for (String file : files) {
-      FileSKVIterator reader = FileOperations.getInstance().openReader(file, true, fs, conf,
ServerConfiguration.getSystemConfiguration());
+      FileSKVIterator reader = FileOperations.getInstance().openReader(file, true, fs, fs.getConf(),
conf);
       readers.add(reader);
     }
     return readers;
@@ -112,9 +115,10 @@ public class OfflineMetadataScanner exte
     
   }
   
-  public OfflineMetadataScanner() throws IOException {
+  public OfflineMetadataScanner(AccumuloConfiguration conf, FileSystem fs) throws IOException
{
     super();
-    
+    this.fs = fs;
+    this.conf = conf;
     List<LogEntry> rwal;
     try {
       rwal = MetadataTable.getLogEntries(null, Constants.ROOT_TABLET_EXTENT);
@@ -126,9 +130,6 @@ public class OfflineMetadataScanner exte
       throw new RuntimeException("Root tablet has write ahead logs, can not scan offline");
     }
     
-    Configuration conf = CachedConfiguration.getInstance();
-    FileSystem fs = FileSystem.get(conf);
-    
     FileStatus[] rootFiles = fs.listStatus(new Path(ServerConstants.getRootTabletDir()));
     
     for (FileStatus rootFile : rootFiles) {
@@ -188,8 +189,6 @@ public class OfflineMetadataScanner exte
     final SortedKeyValueIterator<Key,Value> ssi;
     final List<SortedKeyValueIterator<Key,Value>> readers;
     try {
-      Configuration conf = CachedConfiguration.getInstance();
-      FileSystem fs = FileSystem.get(conf);
       readers = openMapFiles(allFiles, fs, conf);
       ssi = createSystemIter(range, readers, new HashSet<Column>(getFetchedColumns()));
     } catch (IOException e) {
@@ -252,7 +251,9 @@ public class OfflineMetadataScanner exte
   }
   
   public static void main(String[] args) throws IOException {
-    OfflineMetadataScanner scanner = new OfflineMetadataScanner();
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    ServerConfiguration conf = new ServerConfiguration(HdfsZooInstance.getInstance());
+    OfflineMetadataScanner scanner = new OfflineMetadataScanner(conf.getConfiguration(),
fs);
     scanner.setRange(Constants.METADATA_KEYSPACE);
     for (Entry<Key,Value> entry : scanner)
       System.out.println(entry.getKey() + " " + entry.getValue());

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
Fri Apr  6 16:35:29 2012
@@ -33,13 +33,13 @@ import java.util.concurrent.ThreadPoolEx
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.TBufferedSocket;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.thrift.metrics.ThriftMetrics;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.log4j.Logger;
@@ -88,18 +88,19 @@ public class TServerUtils {
    * @throws UnknownHostException
    *           when we don't know our own address
    */
-  public static ServerPort startServer(Property portHintProperty, TProcessor processor, String
serverName, String threadName, Property portSearchProperty,
+  public static ServerPort startServer(AccumuloConfiguration conf, Property portHintProperty,
TProcessor processor, String serverName, String threadName,
+      Property portSearchProperty,
       Property minThreadProperty, Property timeBetweenThreadChecksProperty) throws UnknownHostException
{
-    int portHint = ServerConfiguration.getSystemConfiguration().getPort(portHintProperty);
+    int portHint = conf.getPort(portHintProperty);
     int minThreads = 2;
     if (minThreadProperty != null)
-      minThreads = ServerConfiguration.getSystemConfiguration().getCount(minThreadProperty);
+      minThreads = conf.getCount(minThreadProperty);
     long timeBetweenThreadChecks = 1000;
     if (timeBetweenThreadChecksProperty != null)
-      timeBetweenThreadChecks = ServerConfiguration.getSystemConfiguration().getTimeInMillis(timeBetweenThreadChecksProperty);
+      timeBetweenThreadChecks = conf.getTimeInMillis(timeBetweenThreadChecksProperty);
     boolean portSearch = false;
     if (portSearchProperty != null)
-      portSearch = ServerConfiguration.getSystemConfiguration().getBoolean(portSearchProperty);
+      portSearch = conf.getBoolean(portSearchProperty);
     Random random = new Random();
     for (int j = 0; j < 100; j++) {
       

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
Fri Apr  6 16:35:29 2012
@@ -34,9 +34,10 @@ public class TableDiskUsage {
     FileSystem fs = FileSystem.get(new Configuration());
     
     Instance instance = HdfsZooInstance.getInstance();
+    ServerConfiguration conf = new ServerConfiguration(instance);
     Connector conn = instance.getConnector("root", "secret");
     
-    org.apache.accumulo.core.util.TableDiskUsage.printDiskUsage(ServerConfiguration.getSystemConfiguration(),
Arrays.asList(args), fs, conn);
+    org.apache.accumulo.core.util.TableDiskUsage.printDiskUsage(conf.getConfiguration(),
Arrays.asList(args), fs, conn);
   }
   
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
Fri Apr  6 16:35:29 2012
@@ -40,6 +40,7 @@ import org.apache.accumulo.core.client.C
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.thrift.InitialMultiScan;
@@ -127,13 +128,14 @@ public class VerifyTabletAssignments {
     }
     
     Connector conn = instance.getConnector(user, passw.getBytes());
-    
+    ServerConfiguration conf = new ServerConfiguration(instance);
     for (String table : conn.tableOperations().list())
-      checkTable(user, passw, table, null, cl.hasOption(verboseOption.getOpt()));
+      checkTable(conf.getConfiguration(), user, passw, table, null, cl.hasOption(verboseOption.getOpt()));
     
   }
   
-  private static void checkTable(final String user, final String pass, String table, HashSet<KeyExtent>
check, boolean verbose) throws AccumuloException,
+  private static void checkTable(final AccumuloConfiguration conf, final String user, final
String pass, String table, HashSet<KeyExtent> check, boolean verbose)
+      throws AccumuloException,
       AccumuloSecurityException, TableNotFoundException, InterruptedException {
     
     if (check == null)
@@ -176,7 +178,7 @@ public class VerifyTabletAssignments {
         @Override
         public void run() {
           try {
-            checkTabletServer(user, ByteBuffer.wrap(pass.getBytes()), entry, failures);
+            checkTabletServer(conf, user, ByteBuffer.wrap(pass.getBytes()), entry, failures);
           } catch (Exception e) {
             System.err.println("Failure on ts " + entry.getKey() + " " + e.getMessage());
             e.printStackTrace();
@@ -194,7 +196,7 @@ public class VerifyTabletAssignments {
     while (!tp.awaitTermination(1, TimeUnit.HOURS)) {}
     
     if (failures.size() > 0)
-      checkTable(user, pass, table, failures, verbose);
+      checkTable(conf, user, pass, table, failures, verbose);
   }
   
   private static void checkFailures(String server, HashSet<KeyExtent> failures, MultiScanResult
scanResult) {
@@ -205,9 +207,10 @@ public class VerifyTabletAssignments {
     }
   }
   
-  private static void checkTabletServer(final String user, final ByteBuffer pass, Entry<String,List<KeyExtent>>
entry, HashSet<KeyExtent> failures)
+  private static void checkTabletServer(AccumuloConfiguration conf, final String user, final
ByteBuffer pass, Entry<String,List<KeyExtent>> entry,
+      HashSet<KeyExtent> failures)
       throws ThriftSecurityException, TException, NoSuchScanIDException {
-    TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), ServerConfiguration.getSystemConfiguration());
+    TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), conf);
     
     AuthInfo st = new AuthInfo(user, pass, HdfsZooInstance.getInstance().getInstanceID());
     Map<TKeyExtent,List<TRange>> batch = new TreeMap<TKeyExtent,List<TRange>>();

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java?rev=1310457&r1=1310456&r2=1310457&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
(original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
Fri Apr  6 16:35:29 2012
@@ -30,13 +30,12 @@ import org.apache.accumulo.core.client.C
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
@@ -96,12 +95,12 @@ public class TestConfirmDeletes {
   private void test1(String[] metadata, String[] deletes, int expectedInitial, int expected)
throws Exception {
     Instance instance = new MockInstance();
     FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
-    AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
+    ServerConfiguration conf = new ServerConfiguration(instance);
     
     load(instance, metadata, deletes);
 
     SimpleGarbageCollector gc = new SimpleGarbageCollector();
-    gc.init(fs, instance, auth, aconf);
+    gc.init(fs, conf, auth);
     SortedSet<String> candidates = gc.getCandidates();
     Assert.assertEquals(expectedInitial, candidates.size());
     gc.confirmDeletes(candidates);



Mime
View raw message