From commits-return-22525-archive-asf-public=cust-asf.ponee.io@accumulo.apache.org Wed Jan 23 19:42:27 2019 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 330F318066C for ; Wed, 23 Jan 2019 19:42:25 +0100 (CET) Received: (qmail 23165 invoked by uid 500); 23 Jan 2019 18:42:24 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 23156 invoked by uid 99); 23 Jan 2019 18:42:24 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 23 Jan 2019 18:42:24 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 9D376854FF; Wed, 23 Jan 2019 18:42:23 +0000 (UTC) Date: Wed, 23 Jan 2019 18:42:23 +0000 To: "commits@accumulo.apache.org" Subject: [accumulo] branch master updated: Replace use of CachedConfiguration (#912) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <154826894316.27760.1664074379787965786@gitbox.apache.org> From: mmiller@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: accumulo X-Git-Refname: refs/heads/master X-Git-Reftype: branch X-Git-Oldrev: 1bb6226aa9c10060d7e01c66dbc5145c269aaecf X-Git-Newrev: b161b6600a89bca090dd2ce4138c132dfa35d672 X-Git-Rev: b161b6600a89bca090dd2ce4138c132dfa35d672 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. mmiller pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/accumulo.git The following commit(s) were added to refs/heads/master by this push: new b161b66 Replace use of CachedConfiguration (#912) b161b66 is described below commit b161b6600a89bca090dd2ce4138c132dfa35d672 Author: Mike Miller AuthorDate: Wed Jan 23 13:42:18 2019 -0500 Replace use of CachedConfiguration (#912) * Make some ServerConstants methods take ServerContext * Removed method that uses CachedConfigruation from VolumeConfiguraiton * Removed CachedConfigruation from util classes and Initialize * Add hadoop conf to LogCloser * Pull hadoop conf from server context in ITs --- .../accumulo/core/clientImpl/OfflineIterator.java | 3 +- .../org/apache/accumulo/core/summary/Gatherer.java | 3 +- .../accumulo/core/volume/VolumeConfiguration.java | 9 --- .../apache/accumulo/fate/zookeeper/ZooUtil.java | 8 --- hadoop-mapreduce/pom.xml | 5 ++ .../its/mapred/AccumuloFileOutputFormatIT.java | 3 +- .../accumulo/hadoop/its/mapred/TokenFileIT.java | 3 +- .../its/mapreduce/AccumuloFileOutputFormatIT.java | 3 +- .../accumulo/hadoop/its/mapreduce/TokenFileIT.java | 3 +- .../miniclusterImpl/MiniAccumuloClusterImpl.java | 3 +- .../apache/accumulo/server/ServerConstants.java | 26 ++++--- .../org/apache/accumulo/server/fs/VolumeUtil.java | 16 ++--- .../apache/accumulo/server/init/Initialize.java | 82 +++++++++++----------- .../server/master/recovery/HadoopLogCloser.java | 7 +- .../accumulo/server/master/recovery/LogCloser.java | 4 +- .../org/apache/accumulo/server/util/FileUtil.java | 10 ++- .../accumulo/server/util/MetadataTableUtil.java | 11 ++- .../accumulo/server/util/RandomizeVolumes.java | 6 +- .../apache/accumulo/server/util/ZooKeeperMain.java | 2 +- .../org/apache/accumulo/server/util/ZooZap.java | 6 +- .../accumulo/server/init/InitializeTest.java | 14 ++-- .../accumulo/gc/GarbageCollectWriteAheadLogs.java | 2 +- .../apache/accumulo/gc/SimpleGarbageCollector.java | 4 +- .../java/org/apache/accumulo/master/Master.java | 7 +- .../apache/accumulo/master/TabletGroupWatcher.java | 2 +- .../accumulo/master/recovery/RecoveryManager.java | 8 +-- .../master/tableOps/bulkVer1/BulkImport.java | 2 +- .../master/tableOps/bulkVer2/PrepBulkImport.java | 2 +- .../accumulo/master/tableOps/create/ChooseDir.java | 2 +- .../accumulo/master/tableOps/delete/CleanUp.java | 2 +- .../tableOps/tableImport/CreateImportDir.java | 2 +- .../tableImport/PopulateMetadataTable.java | 2 +- .../accumulo/tserver/BulkFailedCopyProcessor.java | 3 +- .../tserver/compaction/MajorCompactionRequest.java | 3 +- .../org/apache/accumulo/tserver/log/DfsLogger.java | 5 +- .../accumulo/tserver/tablet/DatafileManager.java | 3 +- .../org/apache/accumulo/tserver/tablet/Tablet.java | 2 +- .../java/org/apache/accumulo/test/VolumeIT.java | 3 +- .../accumulo/test/functional/ScannerContextIT.java | 3 +- .../accumulo/test/functional/SplitRecoveryIT.java | 4 +- .../test/functional/WriteAheadLogEncryptedIT.java | 3 +- .../test/mapred/AccumuloFileOutputFormatIT.java | 3 +- .../apache/accumulo/test/mapred/TokenFileIT.java | 3 +- .../test/mapreduce/AccumuloFileOutputFormatIT.java | 3 +- .../accumulo/test/mapreduce/TokenFileIT.java | 3 +- 45 files changed, 139 insertions(+), 164 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java index d5ed87f..1f83f8e 100644 --- a/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java +++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java @@ -56,7 +56,6 @@ import org.apache.accumulo.core.metadata.schema.TabletsMetadata; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.accumulo.core.volume.VolumeConfiguration; import org.apache.commons.lang.NotImplementedException; @@ -303,7 +302,7 @@ class OfflineIterator implements Iterator> { AccumuloConfiguration acuTableConf = new ConfigurationCopy( context.tableOperations().getProperties(tableName)); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = context.getHadoopConf(); for (SortedKeyValueIterator reader : readers) { ((FileSKVIterator) reader).close(); diff --git a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java index 5bf92cb..7fe5d0e 100644 --- a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java +++ b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java @@ -65,7 +65,6 @@ import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client; import org.apache.accumulo.core.trace.Tracer; import org.apache.accumulo.core.trace.thrift.TInfo; import org.apache.accumulo.core.util.ByteBufferUtil; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.CancelFlagFuture; import org.apache.accumulo.core.util.CompletableFutureUtil; import org.apache.accumulo.core.util.HostAndPort; @@ -659,7 +658,7 @@ public class Gatherer { List ranges, BlockCache summaryCache, BlockCache indexCache, Cache fileLenCache) { Path path = new Path(file); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = ctx.getHadoopConf(); return SummaryReader.load(volMgr.get(path), conf, factory, path, summarySelector, summaryCache, indexCache, fileLenCache, cryptoService).getSummaries(ranges); } diff --git a/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java b/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java index 05badeb..d556cc2 100644 --- a/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java +++ b/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java @@ -24,7 +24,6 @@ import java.net.URISyntaxException; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -86,14 +85,6 @@ public class VolumeConfiguration { return baseDir; } - /** - * Compute the URIs to be used by Accumulo - * - */ - public static String[] getVolumeUris(AccumuloConfiguration conf) { - return getVolumeUris(conf, CachedConfiguration.getInstance()); - } - public static String[] getVolumeUris(AccumuloConfiguration conf, Configuration hadoopConfig) { String ns = conf.get(Property.INSTANCE_VOLUMES); diff --git a/core/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java b/core/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java index 308e53c..5b9f52e 100644 --- a/core/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java +++ b/core/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java @@ -31,7 +31,6 @@ import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.conf.AccumuloConfiguration; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.volume.VolumeConfiguration; import org.apache.accumulo.fate.util.Retry; import org.apache.accumulo.fate.util.Retry.RetryFactory; @@ -578,13 +577,6 @@ public class ZooUtil { return Constants.ZROOT + "/" + instanceId; } - /** - * Utility to support certain client side utilities to minimize command-line options. - */ - public static String getInstanceIDFromHdfs(Path instanceDirectory, AccumuloConfiguration conf) { - return getInstanceIDFromHdfs(instanceDirectory, conf, CachedConfiguration.getInstance()); - } - public static String getInstanceIDFromHdfs(Path instanceDirectory, AccumuloConfiguration conf, Configuration hadoopConf) { try { diff --git a/hadoop-mapreduce/pom.xml b/hadoop-mapreduce/pom.xml index c5f430e..5cc9eec 100644 --- a/hadoop-mapreduce/pom.xml +++ b/hadoop-mapreduce/pom.xml @@ -63,6 +63,11 @@ org.apache.accumulo + accumulo-server-base + test + + + org.apache.accumulo accumulo-test test diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java index 14369b3..c4c65ef 100644 --- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java +++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java @@ -39,7 +39,6 @@ import org.apache.accumulo.core.file.FileOperations; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.hadoop.mapred.AccumuloFileOutputFormat; import org.apache.accumulo.hadoop.mapred.AccumuloInputFormat; import org.apache.accumulo.hadoopImpl.mapreduce.lib.ConfiguratorBase; @@ -191,7 +190,7 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness { assertEquals(1, files.length); assertTrue(files[0].exists()); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); DefaultConfiguration acuconf = DefaultConfiguration.getInstance(); FileSKVIterator sample = FileOperations.getInstance().newReaderBuilder() .forFile(files[0].toString(), FileSystem.getLocal(conf), conf, diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java index ed19e57..5f2f66c 100644 --- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java +++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java @@ -38,7 +38,6 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.hadoop.mapred.AccumuloInputFormat; import org.apache.accumulo.hadoop.mapred.AccumuloOutputFormat; import org.apache.accumulo.harness.AccumuloClusterHarness; @@ -132,7 +131,7 @@ public class TokenFileIT extends AccumuloClusterHarness { @SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by test") public static void main(String[] args) throws Exception { - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); conf.set("hadoop.tmp.dir", new File(args[0]).getParent()); conf.set("mapreduce.framework.name", "local"); conf.set("mapreduce.cluster.local.dir", diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java index a7dcacb..b17aeb5 100644 --- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java +++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java @@ -37,7 +37,6 @@ import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.rfile.RFileOperations; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.hadoop.mapreduce.AccumuloFileOutputFormat; import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat; import org.apache.accumulo.harness.AccumuloClusterHarness; @@ -204,7 +203,7 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness { assertEquals(1, files.length); assertTrue(files[0].exists()); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); DefaultConfiguration acuconf = DefaultConfiguration.getInstance(); FileSKVIterator sample = RFileOperations.getInstance().newReaderBuilder() .forFile(files[0].toString(), FileSystem.getLocal(conf), conf, diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java index 3abb3b9..35006dc 100644 --- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java +++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java @@ -38,7 +38,6 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat; import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat; import org.apache.accumulo.harness.AccumuloClusterHarness; @@ -126,7 +125,7 @@ public class TokenFileIT extends AccumuloClusterHarness { @SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by test") public static void main(String[] args) throws Exception { - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); conf.set("hadoop.tmp.dir", new File(args[0]).getParent()); conf.set("mapreduce.framework.name", "local"); conf.set("mapreduce.cluster.local.dir", diff --git a/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java index ac4e703..db402f2 100644 --- a/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java +++ b/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java @@ -73,7 +73,6 @@ import org.apache.accumulo.core.master.thrift.MasterClientService; import org.apache.accumulo.core.master.thrift.MasterGoalState; import org.apache.accumulo.core.master.thrift.MasterMonitorInfo; import org.apache.accumulo.core.trace.Tracer; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.Pair; import org.apache.accumulo.fate.zookeeper.IZooReaderWriter; import org.apache.accumulo.fate.zookeeper.ZooUtil; @@ -409,7 +408,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster { siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo"); config.setSiteConfig(siteConfig); } else if (config.useExistingInstance()) { - dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY); + dfsUri = getServerContext().getHadoopConf().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY); } else { dfsUri = "file:///"; } diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java index f748ddf..fe70494 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java +++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java @@ -26,7 +26,6 @@ import java.util.List; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.Pair; import org.apache.accumulo.core.volume.Volume; import org.apache.accumulo.core.volume.VolumeConfiguration; @@ -88,11 +87,16 @@ public class ServerConstants { private static List> replacementsList = null; + public static String[] getBaseUris(ServerContext context) { + return getBaseUris(context.getConfiguration(), context.getHadoopConf()); + } + // these are functions to delay loading the Accumulo configuration unless we must - public static synchronized String[] getBaseUris(AccumuloConfiguration conf) { + public static synchronized String[] getBaseUris(AccumuloConfiguration conf, + Configuration hadoopConf) { if (baseUris == null) { - baseUris = checkBaseUris(conf, CachedConfiguration.getInstance(), - VolumeConfiguration.getVolumeUris(conf), false); + baseUris = checkBaseUris(conf, hadoopConf, + VolumeConfiguration.getVolumeUris(conf, hadoopConf), false); } return baseUris; @@ -150,12 +154,12 @@ public class ServerConstants { public static final String RECOVERY_DIR = "recovery"; public static final String WAL_DIR = "wal"; - public static String[] getTablesDirs(AccumuloConfiguration conf) { - return VolumeConfiguration.prefix(getBaseUris(conf), TABLE_DIR); + public static String[] getTablesDirs(ServerContext context) { + return VolumeConfiguration.prefix(getBaseUris(context), TABLE_DIR); } - public static String[] getRecoveryDirs(AccumuloConfiguration conf) { - return VolumeConfiguration.prefix(getBaseUris(conf), RECOVERY_DIR); + public static String[] getRecoveryDirs(ServerContext context) { + return VolumeConfiguration.prefix(getBaseUris(context), RECOVERY_DIR); } public static Path getInstanceIdLocation(Volume v) { @@ -168,8 +172,8 @@ public class ServerConstants { return v.prefixChild(VERSION_DIR); } - public static synchronized List> getVolumeReplacements( - AccumuloConfiguration conf) { + public static synchronized List> getVolumeReplacements(AccumuloConfiguration conf, + Configuration hadoopConf) { if (replacementsList == null) { String replacements = conf.get(Property.INSTANCE_VOLUMES_REPLACEMENTS); @@ -215,7 +219,7 @@ public class ServerConstants { } HashSet baseDirs = new HashSet<>(); - for (String baseDir : getBaseUris(conf)) { + for (String baseDir : getBaseUris(conf, hadoopConf)) { // normalize using path baseDirs.add(new Path(baseDir)); } diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java index c0e1439..f6c2dd0 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java @@ -30,7 +30,6 @@ import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.metadata.schema.DataFileValue; import org.apache.accumulo.core.protobuf.ProtobufUtil; import org.apache.accumulo.core.tabletserver.log.LogEntry; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.Pair; import org.apache.accumulo.fate.zookeeper.ZooLock; import org.apache.accumulo.server.ServerConstants; @@ -63,7 +62,7 @@ public class VolumeUtil { if (!dir.toString().contains(":")) return true; - for (String tableDir : ServerConstants.getTablesDirs(context.getConfiguration())) { + for (String tableDir : ServerConstants.getTablesDirs(context)) { // use Path to normalize tableDir if (dir.toString().startsWith(new Path(tableDir).toString())) return true; @@ -166,7 +165,7 @@ public class VolumeUtil { public static String switchRootTableVolume(ServerContext context, String location) throws IOException { String newLocation = switchVolume(location, FileType.TABLE, - ServerConstants.getVolumeReplacements(context.getConfiguration())); + ServerConstants.getVolumeReplacements(context.getConfiguration(), context.getHadoopConf())); if (newLocation != null) { MetadataTableUtil.setRootTabletDir(context, newLocation); log.info("Volume replaced: {} -> {}", location, newLocation); @@ -184,7 +183,7 @@ public class VolumeUtil { VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles, boolean replicate) throws IOException { List> replacements = ServerConstants - .getVolumeReplacements(context.getConfiguration()); + .getVolumeReplacements(context.getConfiguration(), context.getHadoopConf()); log.trace("Using volume replacements: {}", replacements); List logsToRemove = new ArrayList<>(); @@ -275,10 +274,9 @@ public class VolumeUtil { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(extent.getTableId(), context); - Path newDir = new Path( - vm.choose(chooserEnv, ServerConstants.getBaseUris(context.getConfiguration())) - + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR - + dir.getParent().getName() + Path.SEPARATOR + dir.getName()); + Path newDir = new Path(vm.choose(chooserEnv, ServerConstants.getBaseUris(context)) + + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + dir.getParent().getName() + + Path.SEPARATOR + dir.getName()); log.info("Updating directory for {} from {} to {}", extent, dir, newDir); if (extent.isRootTablet()) { @@ -302,7 +300,7 @@ public class VolumeUtil { // do a lot of logging since this is the root tablet log.info("copying {} to {}", dir, newDir); - if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) { + if (!FileUtil.copy(fs1, dir, fs2, newDir, false, context.getHadoopConf())) { throw new IOException("Failed to copy " + dir + " to " + newDir); } diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java index 2cef0b7..e7cb605 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java +++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java @@ -74,7 +74,6 @@ import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection; import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection; import org.apache.accumulo.core.replication.ReplicationTable; import org.apache.accumulo.core.spi.crypto.CryptoService; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.ColumnFQ; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.accumulo.core.util.Pair; @@ -242,14 +241,15 @@ public class Initialize implements KeywordExecutable { ReplicationUtil.STATUS_FORMATTER_CLASS_NAME); } - static boolean checkInit(Configuration conf, VolumeManager fs, SiteConfiguration sconf) - throws IOException { + static boolean checkInit(Configuration conf, VolumeManager fs, SiteConfiguration sconf, + Configuration hadoopConf) throws IOException { @SuppressWarnings("deprecation") String fsUri = sconf.get(Property.INSTANCE_DFS_URI); if (fsUri.equals("")) fsUri = FileSystem.getDefaultUri(conf).toString(); log.info("Hadoop Filesystem is {}", fsUri); - log.info("Accumulo data dirs are {}", Arrays.asList(VolumeConfiguration.getVolumeUris(sconf))); + log.info("Accumulo data dirs are {}", + Arrays.asList(VolumeConfiguration.getVolumeUris(sconf, hadoopConf))); log.info("Zookeeper server is {}", sconf.get(Property.INSTANCE_ZK_HOST)); log.info("Checking if Zookeeper is available. If this hangs, then you need" + " to make sure zookeeper is running"); @@ -274,8 +274,8 @@ public class Initialize implements KeywordExecutable { + " accumulo.properties. Without this accumulo will not operate" + " correctly"); } try { - if (isInitialized(fs, sconf)) { - printInitializeFailureMessages(sconf); + if (isInitialized(fs, sconf, hadoopConf)) { + printInitializeFailureMessages(sconf, hadoopConf); return false; } } catch (IOException e) { @@ -285,7 +285,7 @@ public class Initialize implements KeywordExecutable { return true; } - static void printInitializeFailureMessages(SiteConfiguration sconf) { + static void printInitializeFailureMessages(SiteConfiguration sconf, Configuration hadoopConf) { @SuppressWarnings("deprecation") Property INSTANCE_DFS_DIR = Property.INSTANCE_DFS_DIR; @SuppressWarnings("deprecation") @@ -293,7 +293,8 @@ public class Initialize implements KeywordExecutable { String instanceDfsDir = sconf.get(INSTANCE_DFS_DIR); // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility log.error("FATAL It appears the directories {}", - Arrays.asList(VolumeConfiguration.getVolumeUris(sconf)) + " were previously initialized."); + Arrays.asList(VolumeConfiguration.getVolumeUris(sconf, hadoopConf)) + + " were previously initialized."); String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES); String instanceDfsUri = sconf.get(INSTANCE_DFS_URI); @@ -316,7 +317,7 @@ public class Initialize implements KeywordExecutable { public boolean doInit(SiteConfiguration siteConfig, Opts opts, Configuration conf, VolumeManager fs) throws IOException { - if (!checkInit(conf, fs, siteConfig)) { + if (!checkInit(conf, fs, siteConfig, conf)) { return false; } @@ -345,15 +346,15 @@ public class Initialize implements KeywordExecutable { opts.rootpass = getRootPassword(siteConfig, opts, rootUser); } - return initialize(siteConfig, opts, instanceNamePath, fs, rootUser); + return initialize(siteConfig, conf, opts, instanceNamePath, fs, rootUser); } - private boolean initialize(SiteConfiguration siteConfig, Opts opts, String instanceNamePath, - VolumeManager fs, String rootUser) { + private boolean initialize(SiteConfiguration siteConfig, Configuration hadoopConf, Opts opts, + String instanceNamePath, VolumeManager fs, String rootUser) { UUID uuid = UUID.randomUUID(); // the actual disk locations of the root table and tablets - String[] configuredVolumes = VolumeConfiguration.getVolumeUris(siteConfig); + String[] configuredVolumes = VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf); VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT, null); final String rootTabletDir = new Path( fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR @@ -367,16 +368,15 @@ public class Initialize implements KeywordExecutable { } try { - initFileSystem(siteConfig, fs, uuid, rootTabletDir); + initFileSystem(siteConfig, hadoopConf, fs, uuid, rootTabletDir); } catch (Exception e) { log.error("FATAL Failed to initialize filesystem", e); if (siteConfig.get(Property.INSTANCE_VOLUMES).trim().equals("")) { - Configuration fsConf = CachedConfiguration.getInstance(); final String defaultFsUri = "file:///"; - String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), - fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri); + String fsDefaultName = hadoopConf.get("fs.default.name", defaultFsUri), + fsDefaultFS = hadoopConf.get("fs.defaultFS", defaultFsUri); // Try to determine when we couldn't find an appropriate core-site.xml on the classpath if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) { @@ -482,22 +482,23 @@ public class Initialize implements KeywordExecutable { } } - private void initFileSystem(SiteConfiguration siteConfig, VolumeManager fs, UUID uuid, - String rootTabletDir) throws IOException { - initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(siteConfig), false); + private void initFileSystem(SiteConfiguration siteConfig, Configuration hadoopConf, + VolumeManager fs, UUID uuid, String rootTabletDir) throws IOException { + initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf), false); // initialize initial system tables config in zookeeper - initSystemTablesConfig(zoo, Constants.ZROOT + "/" + uuid); + initSystemTablesConfig(zoo, Constants.ZROOT + "/" + uuid, hadoopConf); VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT, null); - String tableMetadataTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris(siteConfig)) - + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + TABLE_TABLETS_TABLET_DIR; + String tableMetadataTabletDir = fs.choose(chooserEnv, + ServerConstants.getBaseUris(siteConfig, hadoopConf)) + Constants.HDFS_TABLES_DIR + + Path.SEPARATOR + MetadataTable.ID + TABLE_TABLETS_TABLET_DIR; String replicationTableDefaultTabletDir = fs.choose(chooserEnv, - ServerConstants.getBaseUris(siteConfig)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR - + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION; - String defaultMetadataTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris(siteConfig)) - + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID - + Constants.DEFAULT_TABLET_LOCATION; + ServerConstants.getBaseUris(siteConfig, hadoopConf)) + Constants.HDFS_TABLES_DIR + + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION; + String defaultMetadataTabletDir = fs.choose(chooserEnv, + ServerConstants.getBaseUris(siteConfig, hadoopConf)) + Constants.HDFS_TABLES_DIR + + Path.SEPARATOR + MetadataTable.ID + Constants.DEFAULT_TABLET_LOCATION; // create table and default tablets directories createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir, @@ -780,14 +781,13 @@ public class Initialize implements KeywordExecutable { rootUser, opts.rootpass); } - public static void initSystemTablesConfig(ZooReaderWriter zoo, String zooKeeperRoot) - throws IOException { + public static void initSystemTablesConfig(ZooReaderWriter zoo, String zooKeeperRoot, + Configuration hadoopConf) throws IOException { try { - Configuration conf = CachedConfiguration.getInstance(); - int max = conf.getInt("dfs.replication.max", 512); + int max = hadoopConf.getInt("dfs.replication.max", 512); // Hadoop 0.23 switched the min value configuration name - int min = Math.max(conf.getInt("dfs.replication.min", 1), - conf.getInt("dfs.namenode.replication.min", 1)); + int min = Math.max(hadoopConf.getInt("dfs.replication.min", 1), + hadoopConf.getInt("dfs.namenode.replication.min", 1)); if (max < 5) setMetadataReplication(max, "max"); if (min > 5) @@ -832,9 +832,9 @@ public class Initialize implements KeywordExecutable { initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep); } - public static boolean isInitialized(VolumeManager fs, SiteConfiguration siteConfig) - throws IOException { - for (String baseDir : VolumeConfiguration.getVolumeUris(siteConfig)) { + public static boolean isInitialized(VolumeManager fs, SiteConfiguration siteConfig, + Configuration hadoopConf) throws IOException { + for (String baseDir : VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf)) { if (fs.exists(new Path(baseDir, ServerConstants.INSTANCE_ID_DIR)) || fs.exists(new Path(baseDir, ServerConstants.VERSION_DIR))) return true; @@ -861,7 +861,8 @@ public class Initialize implements KeywordExecutable { Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR); UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, siteConfig, hadoopConf)); - for (Pair replacementVolume : ServerConstants.getVolumeReplacements(siteConfig)) { + for (Pair replacementVolume : ServerConstants.getVolumeReplacements(siteConfig, + hadoopConf)) { if (aBasePath.equals(replacementVolume.getFirst())) log.error( "{} is set to be replaced in {} and should not appear in {}." @@ -870,8 +871,7 @@ public class Initialize implements KeywordExecutable { aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES); } - if (ServerUtil.getAccumuloPersistentVersion( - versionPath.getFileSystem(CachedConfiguration.getInstance()), + if (ServerUtil.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath) != ServerConstants.DATA_VERSION) { throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + ServerUtil.getAccumuloPersistentVersion(fs)); @@ -939,7 +939,7 @@ public class Initialize implements KeywordExecutable { if (opts.resetSecurity) { log.info("Resetting security on accumulo."); try (ServerContext context = new ServerContext(siteConfig)) { - if (isInitialized(fs, siteConfig)) { + if (isInitialized(fs, siteConfig, hadoopConfig)) { if (!opts.forceResetSecurity) { ConsoleReader c = getConsoleReader(); String userEnteredName = c.readLine("WARNING: This will remove all" diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java index c208e08..88f04cc 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java +++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java @@ -21,9 +21,9 @@ import java.io.IOException; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.server.fs.ViewFSUtils; import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -37,14 +37,15 @@ public class HadoopLogCloser implements LogCloser { private static final Logger log = LoggerFactory.getLogger(HadoopLogCloser.class); @Override - public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException { + public long close(AccumuloConfiguration conf, Configuration hadoopConf, VolumeManager fs, + Path source) throws IOException { FileSystem ns = fs.getVolumeByPath(source).getFileSystem(); // if path points to a viewfs path, then resolve to underlying filesystem if (ViewFSUtils.isViewFS(ns)) { Path newSource = ns.resolvePath(source); if (!newSource.equals(source) && newSource.toUri().getScheme() != null) { - ns = newSource.getFileSystem(CachedConfiguration.getInstance()); + ns = newSource.getFileSystem(hadoopConf); source = newSource; } } diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java index 1365742..41ee79e 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java +++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java @@ -20,8 +20,10 @@ import java.io.IOException; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; public interface LogCloser { - long close(AccumuloConfiguration conf, VolumeManager fs, Path path) throws IOException; + long close(AccumuloConfiguration conf, Configuration hadoopConf, VolumeManager fs, Path path) + throws IOException; } diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java index 7406755..dc3c388 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java @@ -43,7 +43,6 @@ import org.apache.accumulo.core.file.rfile.RFile; import org.apache.accumulo.core.file.rfile.RFileOperations; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.accumulo.core.iterators.system.MultiIterator; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.accumulo.core.volume.Volume; import org.apache.accumulo.server.ServerContext; @@ -209,8 +208,6 @@ public class FileUtil { public static double estimatePercentageLTE(ServerContext context, String tabletDir, Text prevEndRow, Text endRow, Collection mapFiles, Text splitRow) throws IOException { - Configuration conf = CachedConfiguration.getInstance(); - Path tmpDir = null; int maxToOpen = context.getConfiguration() @@ -225,7 +222,8 @@ public class FileUtil { mapFiles.size(), endRow, prevEndRow, tmpDir); long t1 = System.currentTimeMillis(); - mapFiles = reduceFiles(context, conf, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0); + mapFiles = reduceFiles(context, context.getHadoopConf(), prevEndRow, endRow, mapFiles, + maxToOpen, tmpDir, 0); long t2 = System.currentTimeMillis(); log.debug("Finished reducing indexes for {} {} in {}", endRow, prevEndRow, @@ -287,7 +285,6 @@ public class FileUtil { public static SortedMap findMidPoint(ServerContext context, String tabletDirectory, Text prevEndRow, Text endRow, Collection mapFiles, double minSplit, boolean useIndex) throws IOException { - Configuration conf = CachedConfiguration.getInstance(); Collection origMapFiles = mapFiles; @@ -308,7 +305,8 @@ public class FileUtil { mapFiles.size(), endRow, prevEndRow, tmpDir); long t1 = System.currentTimeMillis(); - mapFiles = reduceFiles(context, conf, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0); + mapFiles = reduceFiles(context, context.getHadoopConf(), prevEndRow, endRow, mapFiles, + maxToOpen, tmpDir, 0); long t2 = System.currentTimeMillis(); log.debug("Finished reducing indexes for {} {} in {}", endRow, prevEndRow, diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java index b549485..b5baa50 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java @@ -915,9 +915,8 @@ public class MetadataTableUtil { Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier()); VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId, context); - String dir = volumeManager.choose(chooserEnv, - ServerConstants.getBaseUris(context.getConfiguration())) + Constants.HDFS_TABLES_DIR - + Path.SEPARATOR + tableId + Path.SEPARATOR + new String( + String dir = volumeManager.choose(chooserEnv, ServerConstants.getBaseUris(context)) + + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + new String( FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES)); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes(UTF_8))); @@ -1026,9 +1025,9 @@ public class MetadataTableUtil { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ReplicationTable.ID, context); - String dir = context.getVolumeManager().choose(chooserEnv, - ServerConstants.getBaseUris(context.getConfiguration())) + Constants.HDFS_TABLES_DIR - + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION; + String dir = context.getVolumeManager().choose(chooserEnv, ServerConstants.getBaseUris(context)) + + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + + Constants.DEFAULT_TABLET_LOCATION; Mutation m = new Mutation(new Text(TabletsSection.getRow(ReplicationTable.ID, null))); m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java index b186304..94fbde1 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java @@ -106,9 +106,9 @@ public class RandomizeVolumes { Mutation m = new Mutation(key.getRow()); VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId, context); - final String newLocation = vm.choose(chooserEnv, - ServerConstants.getBaseUris(context.getConfiguration())) + Path.SEPARATOR - + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory; + final String newLocation = vm.choose(chooserEnv, ServerConstants.getBaseUris(context)) + + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + + directory; m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8))); if (log.isTraceEnabled()) { diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java index 692b006..7760836 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java @@ -66,7 +66,7 @@ public class ZooKeeperMain implements KeywordExecutable { opts.parseArgs(ZooKeeperMain.class.getName(), args); try (ServerContext context = new ServerContext(new SiteConfiguration())) { FileSystem fs = context.getVolumeManager().getDefaultVolume().getFileSystem(); - String baseDir = ServerConstants.getBaseUris(context.getConfiguration())[0]; + String baseDir = ServerConstants.getBaseUris(context)[0]; System.out.println("Using " + fs.makeQualified(new Path(baseDir + "/instance_id")) + " to lookup accumulo instance"); if (opts.servers == null) { diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java b/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java index b4bc194..05ae191 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java @@ -29,6 +29,7 @@ import org.apache.accumulo.fate.zookeeper.ZooReaderWriter; import org.apache.accumulo.fate.zookeeper.ZooUtil; import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy; import org.apache.accumulo.server.security.SecurityUtil; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -66,14 +67,15 @@ public class ZooZap { } SiteConfiguration siteConf = new SiteConfiguration(); + Configuration hadoopConf = new Configuration(); // Login as the server on secure HDFS if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) { SecurityUtil.serverLogin(siteConf); } - String volDir = VolumeConfiguration.getVolumeUris(siteConf)[0]; + String volDir = VolumeConfiguration.getVolumeUris(siteConf, hadoopConf)[0]; Path instanceDir = new Path(volDir, "instance_id"); - String iid = ZooUtil.getInstanceIDFromHdfs(instanceDir, siteConf); + String iid = ZooUtil.getInstanceIDFromHdfs(instanceDir, siteConf, hadoopConf); ZooReaderWriter zoo = new ZooReaderWriter(siteConf); if (opts.zapMaster) { diff --git a/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java b/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java index 674a942..f57a394 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java @@ -72,7 +72,7 @@ public class InitializeTest { expect(sconf.get(Property.INSTANCE_DFS_URI)).andReturn("hdfs://foo"); expect(fs.exists(anyObject(Path.class))).andReturn(true); replay(fs, sconf); - assertTrue(Initialize.isInitialized(fs, sconf)); + assertTrue(Initialize.isInitialized(fs, sconf, conf)); } @SuppressWarnings("deprecation") @@ -82,7 +82,7 @@ public class InitializeTest { expect(fs.exists(anyObject(Path.class))).andReturn(false); expect(fs.exists(anyObject(Path.class))).andReturn(true); replay(fs, sconf); - assertTrue(Initialize.isInitialized(fs, sconf)); + assertTrue(Initialize.isInitialized(fs, sconf, conf)); } @SuppressWarnings("deprecation") @@ -94,7 +94,7 @@ public class InitializeTest { expect(zoo.exists("/")).andReturn(false); replay(zoo); - assertFalse(Initialize.checkInit(conf, fs, sconf)); + assertFalse(Initialize.checkInit(conf, fs, sconf, conf)); } @SuppressWarnings("deprecation") @@ -109,7 +109,7 @@ public class InitializeTest { expect(fs.exists(anyObject(Path.class))).andReturn(true); replay(fs); - assertFalse(Initialize.checkInit(conf, fs, sconf)); + assertFalse(Initialize.checkInit(conf, fs, sconf, conf)); } // Cannot test, need to mock static FileSystem.getDefaultUri() @@ -128,7 +128,7 @@ public class InitializeTest { expect(fs.exists(anyObject(Path.class))).andReturn(true); replay(fs); - assertFalse(Initialize.checkInit(conf, fs, sconf)); + assertFalse(Initialize.checkInit(conf, fs, sconf, conf)); } @SuppressWarnings("deprecation") @@ -144,7 +144,7 @@ public class InitializeTest { expect(fs.exists(anyObject(Path.class))).andThrow(new IOException()); replay(fs); - Initialize.checkInit(conf, fs, sconf); + Initialize.checkInit(conf, fs, sconf, conf); } @SuppressWarnings("deprecation") @@ -160,6 +160,6 @@ public class InitializeTest { expect(fs.exists(anyObject(Path.class))).andReturn(false); replay(fs); - assertTrue(Initialize.checkInit(conf, fs, sconf)); + assertTrue(Initialize.checkInit(conf, fs, sconf, conf)); } } diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java index 06044d0..9107d15 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java @@ -398,7 +398,7 @@ public class GarbageCollectWriteAheadLogs { protected Map getSortedWALogs() throws IOException { Map result = new HashMap<>(); - for (String dir : ServerConstants.getRecoveryDirs(context.getConfiguration())) { + for (String dir : ServerConstants.getRecoveryDirs(context)) { Path recoveryDir = new Path(dir); if (fs.exists(recoveryDir)) { for (FileStatus status : fs.listStatus(recoveryDir)) { diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java index 07f8eb0..6bff46b 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java @@ -347,7 +347,7 @@ public class SimpleGarbageCollector implements Iface { new NamingThreadFactory("deleting")); final List> replacements = ServerConstants - .getVolumeReplacements(getConfiguration()); + .getVolumeReplacements(getConfiguration(), getContext().getHadoopConf()); for (final String delete : confirmedDeletes.values()) { @@ -448,7 +448,7 @@ public class SimpleGarbageCollector implements Iface { public void deleteTableDirIfEmpty(Table.ID tableID) throws IOException { // if dir exist and is empty, then empty list is returned... // hadoop 2.0 will throw an exception if the file does not exist - for (String dir : ServerConstants.getTablesDirs(context.getConfiguration())) { + for (String dir : ServerConstants.getTablesDirs(context)) { FileStatus[] tabletDirs = null; try { tabletDirs = fs.listStatus(new Path(dir + "/" + tableID)); diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java index 7e80307..f130e0c 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/Master.java +++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java @@ -287,7 +287,7 @@ public class Master Path oldPath = fs.getFullPath(FileType.TABLE, "/" + MetadataTable.ID + "/root_tablet"); if (fs.exists(oldPath)) { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(RootTable.ID, context); - String newPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(getConfiguration())) + String newPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID; fs.mkdirs(new Path(newPath)); if (!fs.rename(oldPath, new Path(newPath))) { @@ -299,7 +299,7 @@ public class Master Path location = null; - for (String basePath : ServerConstants.getTablesDirs(getConfiguration())) { + for (String basePath : ServerConstants.getTablesDirs(context)) { Path path = new Path(basePath + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION); if (fs.exists(path)) { if (location != null) { @@ -424,7 +424,8 @@ public class Master log.debug("Upgrade creating table {} (ID: {})", RootTable.NAME, RootTable.ID); TableManager.prepareNewTableState(zoo, getInstanceID(), RootTable.ID, Namespace.ID.ACCUMULO, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.SKIP); - Initialize.initSystemTablesConfig(context.getZooReaderWriter(), context.getZooKeeperRoot()); + Initialize.initSystemTablesConfig(context.getZooReaderWriter(), context.getZooKeeperRoot(), + context.getHadoopConf()); // ensure root user can flush root table security.grantTablePermission(context.rpcCreds(), security.getRootUsername(), RootTable.ID, TablePermission.ALTER_TABLE, Namespace.ID.ACCUMULO); diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java index fb72c42..4c4fe8a 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java +++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java @@ -662,7 +662,7 @@ abstract class TabletGroupWatcher extends Daemon { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(extent.getTableId(), master.getContext()); String tdir = master.getFileSystem().choose(chooserEnv, - ServerConstants.getBaseUris(master.getConfiguration())) + Constants.HDFS_TABLES_DIR + ServerConstants.getBaseUris(master.getContext())) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION; MetadataTableUtil.addTablet( new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, diff --git a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java index 7c740f7..8f4e019 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java +++ b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java @@ -92,8 +92,8 @@ public class RecoveryManager { public void run() { boolean rescheduled = false; try { - long time = closer.close(master.getConfiguration(), master.getFileSystem(), - new Path(source)); + long time = closer.close(master.getConfiguration(), master.getContext().getHadoopConf(), + master.getFileSystem(), new Path(source)); if (time > 0) { executor.schedule(this, time, TimeUnit.MILLISECONDS); @@ -137,8 +137,8 @@ public class RecoveryManager { for (Collection logs : walogs) { for (String walog : logs) { - String switchedWalog = VolumeUtil.switchVolume(walog, FileType.WAL, - ServerConstants.getVolumeReplacements(master.getConfiguration())); + String switchedWalog = VolumeUtil.switchVolume(walog, FileType.WAL, ServerConstants + .getVolumeReplacements(master.getConfiguration(), master.getContext().getHadoopConf())); if (switchedWalog != null) { // replaces the volume used for sorting, but do not change entry in metadata table. When // the tablet loads it will change the metadata table entry. If diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java index 1a3b4bd..e98885b 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java @@ -153,7 +153,7 @@ public class BulkImport extends MasterRepo { private static Path createNewBulkDir(ServerContext context, VolumeManager fs, String sourceDir, Table.ID tableId) throws IOException { Path tempPath = fs.matchingFileSystem(new Path(sourceDir), - ServerConstants.getTablesDirs(context.getConfiguration())); + ServerConstants.getTablesDirs(context)); if (tempPath == null) throw new IOException(sourceDir + " is not in a volume configured for Accumulo"); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java index 9ec597c..d554ae9 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java @@ -220,7 +220,7 @@ public class PrepBulkImport extends MasterRepo { private Path createNewBulkDir(ServerContext context, VolumeManager fs, Table.ID tableId) throws IOException { Path tempPath = fs.matchingFileSystem(new Path(bulkInfo.sourceDir), - ServerConstants.getTablesDirs(context.getConfiguration())); + ServerConstants.getTablesDirs(context)); if (tempPath == null) throw new IOException(bulkInfo.sourceDir + " is not in a volume configured for Accumulo"); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java index 9725f71..bf3e4e0 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java @@ -58,7 +58,7 @@ class ChooseDir extends MasterRepo { master.getContext()); String baseDir = master.getFileSystem().choose(chooserEnv, - ServerConstants.getBaseUris(master.getConfiguration())) + Constants.HDFS_TABLES_DIR + ServerConstants.getBaseUris(master.getContext())) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.getTableId(); tableInfo.defaultTabletDir = baseDir + Constants.DEFAULT_TABLET_LOCATION; diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java index c1606a6..2b7e9a9 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java @@ -174,7 +174,7 @@ class CleanUp extends MasterRepo { // delete the map files try { VolumeManager fs = master.getFileSystem(); - for (String dir : ServerConstants.getTablesDirs(master.getConfiguration())) { + for (String dir : ServerConstants.getTablesDirs(master.getContext())) { fs.deleteRecursively(new Path(dir, tableId.canonicalID())); } } catch (IOException e) { diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/CreateImportDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/CreateImportDir.java index 2aac167..53d2ac4 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/CreateImportDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/CreateImportDir.java @@ -45,7 +45,7 @@ class CreateImportDir extends MasterRepo { UniqueNameAllocator namer = master.getContext().getUniqueNameAllocator(); Path exportDir = new Path(tableInfo.exportDir); - String[] tableDirs = ServerConstants.getTablesDirs(master.getConfiguration()); + String[] tableDirs = ServerConstants.getTablesDirs(master.getContext()); log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs)); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java index d1a3fa3..ca2c968 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java @@ -106,7 +106,7 @@ class PopulateMetadataTable extends MasterRepo { // hdfs://localhost:8020/path/to/accumulo/tables/... final String bulkDir = tableInfo.importDir; - final String[] tableDirs = ServerConstants.getTablesDirs(master.getConfiguration()); + final String[] tableDirs = ServerConstants.getTablesDirs(master.getContext()); ZipEntry zipEntry; while ((zipEntry = zis.getNextEntry()) != null) { diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java index 2d6f0ac..5019993 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java @@ -20,7 +20,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.io.IOException; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.zookeeper.DistributedWorkQueue.Processor; @@ -62,7 +61,7 @@ public class BulkFailedCopyProcessor implements Processor { FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem(); FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); - FileUtil.copy(origFs, orig, destFs, tmp, false, true, CachedConfiguration.getInstance()); + FileUtil.copy(origFs, orig, destFs, tmp, false, true, context.getHadoopConf()); destFs.rename(tmp, dest); log.debug("copied {} to {}", orig, dest); } catch (IOException ex) { diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java index 16fe49a..df6e37a 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java @@ -43,7 +43,6 @@ import org.apache.accumulo.core.summary.Gatherer; import org.apache.accumulo.core.summary.SummarizerFactory; import org.apache.accumulo.core.summary.SummaryCollection; import org.apache.accumulo.core.summary.SummaryReader; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.fs.FileRef; import org.apache.accumulo.server.fs.VolumeManager; @@ -157,7 +156,7 @@ public class MajorCompactionRequest implements Cloneable { SummarizerFactory factory = new SummarizerFactory(tableConfig); for (FileRef file : files) { FileSystem fs = volumeManager.getVolumeByPath(file.path()).getFileSystem(); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = context.getHadoopConf(); SummaryCollection fsc = SummaryReader .load(fs, conf, factory, file.path(), summarySelector, summaryCache, indexCache, fileLenCache, context.getCryptoService()) diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java index 9381a88..5df5114 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java @@ -408,9 +408,8 @@ public class DfsLogger implements Comparable { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.LOGGER, context); - logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(context.getConfiguration())) - + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR - + filename; + logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Path.SEPARATOR + + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename; metaReference = toString(); LoggerOperation op = null; diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java index 1826caa..c28cb4e 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java @@ -224,8 +224,7 @@ class DatafileManager { boolean inTheRightDirectory = false; Path parent = tpath.path().getParent().getParent(); - for (String tablesDir : ServerConstants - .getTablesDirs(tablet.getContext().getConfiguration())) { + for (String tablesDir : ServerConstants.getTablesDirs(tablet.getContext())) { if (parent.equals(new Path(tablesDir, tablet.getExtent().getTableId().canonicalID()))) { inTheRightDirectory = true; break; diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java index da33e13..1720343 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java @@ -2862,7 +2862,7 @@ public class Tablet { UniqueNameAllocator namer = context.getUniqueNameAllocator(); VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId, context); - String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris(context.getConfiguration())) + String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR; while (true) { diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java index 5b2bb94..5986738 100644 --- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java +++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java @@ -59,7 +59,6 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.security.TablePermission; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.fate.zookeeper.ZooReader; import org.apache.accumulo.fate.zookeeper.ZooUtil; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; @@ -345,7 +344,7 @@ public class VolumeIT extends ConfigurableMacBase { // check that all volumes are initialized private void checkVolumesInitialized(List volumes, String uuid) throws Exception { for (Path volumePath : volumes) { - FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance()); + FileSystem fs = volumePath.getFileSystem(cluster.getServerContext().getHadoopConf()); Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR); FileStatus[] iids = fs.listStatus(vp); assertEquals(1, iids.length); diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScannerContextIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScannerContextIT.java index 888cc0e..658f571 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ScannerContextIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ScannerContextIT.java @@ -38,7 +38,6 @@ import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.fate.util.UtilWaitThread; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl; @@ -69,7 +68,7 @@ public class ScannerContextIT extends AccumuloClusterHarness { public void checkCluster() throws Exception { Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI)); MiniAccumuloClusterImpl.class.cast(getCluster()); - fs = FileSystem.get(CachedConfiguration.getInstance()); + fs = FileSystem.get(cluster.getServerContext().getHadoopConf()); } private Path copyTestIteratorsJarToTmp() throws IOException { diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java index 0cb7a21..d4be95b 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java @@ -145,8 +145,8 @@ public class SplitRecoveryIT extends ConfigurableMacBase { for (int i = 0; i < extents.length; i++) { KeyExtent extent = extents[i]; - String tdir = ServerConstants.getTablesDirs(context.getConfiguration())[0] + "/" - + extent.getTableId() + "/dir_" + i; + String tdir = ServerConstants.getTablesDirs(context)[0] + "/" + extent.getTableId() + "/dir_" + + i; MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl); SortedMap mapFiles = new TreeMap<>(); mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogEncryptedIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogEncryptedIT.java index 462d25e..ab6bdaf 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogEncryptedIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogEncryptedIT.java @@ -21,7 +21,6 @@ import static org.apache.accumulo.test.functional.WriteAheadLogIT.testWAL; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.hadoop.conf.Configuration; @@ -49,7 +48,7 @@ public class WriteAheadLogEncryptedIT extends AccumuloClusterHarness { // setup key file try { Path keyFile = new Path(keyPath); - FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance()); + FileSystem fs = FileSystem.getLocal(cluster.getServerContext().getHadoopConf()); fs.delete(keyFile, true); if (fs.createNewFile(keyFile)) log.info("Created keyfile at {}", keyPath); diff --git a/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloFileOutputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloFileOutputFormatIT.java index decec32..7a3d285 100644 --- a/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloFileOutputFormatIT.java +++ b/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloFileOutputFormatIT.java @@ -38,7 +38,6 @@ import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.rfile.RFileOperations; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -197,7 +196,7 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness { assertEquals(1, files.length); assertTrue(files[0].exists()); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); DefaultConfiguration acuconf = DefaultConfiguration.getInstance(); FileSKVIterator sample = RFileOperations.getInstance().newReaderBuilder() .forFile(files[0].toString(), FileSystem.getLocal(conf), conf, diff --git a/test/src/main/java/org/apache/accumulo/test/mapred/TokenFileIT.java b/test/src/main/java/org/apache/accumulo/test/mapred/TokenFileIT.java index 9744977..7a010d4 100644 --- a/test/src/main/java/org/apache/accumulo/test/mapred/TokenFileIT.java +++ b/test/src/main/java/org/apache/accumulo/test/mapred/TokenFileIT.java @@ -37,7 +37,6 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -175,7 +174,7 @@ public class TokenFileIT extends AccumuloClusterHarness { out.println(outString); } - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); conf.set("hadoop.tmp.dir", new File(tf.getAbsolutePath()).getParent()); conf.set("mapreduce.framework.name", "local"); conf.set("mapreduce.cluster.local.dir", diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloFileOutputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloFileOutputFormatIT.java index c21f327..8c9ca57 100644 --- a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloFileOutputFormatIT.java +++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloFileOutputFormatIT.java @@ -37,7 +37,6 @@ import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.rfile.RFileOperations; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -213,7 +212,7 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness { assertEquals(1, files.length); assertTrue(files[0].exists()); - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); DefaultConfiguration acuconf = DefaultConfiguration.getInstance(); FileSKVIterator sample = RFileOperations.getInstance().newReaderBuilder() .forFile(files[0].toString(), FileSystem.getLocal(conf), conf, diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/TokenFileIT.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/TokenFileIT.java index c6d92e9..a3dd49c 100644 --- a/test/src/main/java/org/apache/accumulo/test/mapreduce/TokenFileIT.java +++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/TokenFileIT.java @@ -37,7 +37,6 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.CachedConfiguration; import org.apache.accumulo.harness.AccumuloClusterHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -169,7 +168,7 @@ public class TokenFileIT extends AccumuloClusterHarness { out.println(outString); } - Configuration conf = CachedConfiguration.getInstance(); + Configuration conf = cluster.getServerContext().getHadoopConf(); conf.set("hadoop.tmp.dir", new File(tf.getAbsolutePath()).getParent()); conf.set("mapreduce.framework.name", "local"); conf.set("mapreduce.cluster.local.dir",