Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id CC683200CA9 for ; Thu, 11 May 2017 23:06:54 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id CB33D160BB3; Thu, 11 May 2017 21:06:54 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 5929C160BCF for ; Thu, 11 May 2017 23:06:51 +0200 (CEST) Received: (qmail 84302 invoked by uid 500); 11 May 2017 21:06:42 -0000 Mailing-List: contact commits-help@geode.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@geode.apache.org Delivered-To: mailing list commits@geode.apache.org Received: (qmail 83182 invoked by uid 99); 11 May 2017 21:06:42 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 11 May 2017 21:06:42 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id E1665E9638; Thu, 11 May 2017 21:06:41 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: klund@apache.org To: commits@geode.apache.org Date: Thu, 11 May 2017 21:07:19 -0000 Message-Id: In-Reply-To: <47deb8f24f0e4bf585981b4a266cf414@git.apache.org> References: <47deb8f24f0e4bf585981b4a266cf414@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [40/52] [abbrv] geode git commit: GEODE-2632: change dependencies on GemFireCacheImpl to InternalCache archived-at: Thu, 11 May 2017 21:06:55 -0000 http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java index 5ac48bc..b86caa4 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java @@ -12,13 +12,10 @@ * or implied. See the License for the specific language governing permissions and limitations under * the License. */ -/* - * RangeIndex.java - * - * Created on February 4, 2005, 11:10 AM - */ package org.apache.geode.cache.query.internal.index; +import static java.lang.Integer.*; + import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap; import java.util.ArrayList; @@ -58,21 +55,23 @@ import org.apache.geode.internal.cache.RegionEntry; import org.apache.geode.internal.cache.persistence.query.CloseableIterator; import org.apache.geode.internal.i18n.LocalizedStrings; -/** - */ public class RangeIndex extends AbstractIndex { protected volatile int valueToEntriesMapSize = 0; + /** * Map for valueOf(indexedExpression)=>RegionEntries. SortedMap)>. Package access for unit tests. */ final ConcurrentNavigableMap valueToEntriesMap = new ConcurrentSkipListMap(TypeUtils.getExtendedNumericComparator()); + // Map for RegionEntries=>value of indexedExpression (reverse map) final private RegionEntryToValuesMap entryToValuesMap; + // Map for RegionEntries=>values when indexedExpression evaluates to null protected RegionEntryToValuesMap nullMappedEntries; + // Map for RegionEntries=>values when indexedExpression evaluates to UNDEFINED protected RegionEntryToValuesMap undefinedMappedEntries; @@ -84,7 +83,7 @@ public class RangeIndex extends AbstractIndex { public static TestHook testHook; - // @todo need more specific list of exceptions + // TODO: need more specific list of exceptions /** * Create an Range Index that can be used when executing queries. * @@ -117,8 +116,8 @@ public class RangeIndex extends AbstractIndex { } @Override - void instantiateEvaluator(IndexCreationHelper ich) { - this.evaluator = new IMQEvaluator(ich); + void instantiateEvaluator(IndexCreationHelper indexCreationHelper) { + this.evaluator = new IMQEvaluator(indexCreationHelper); } @Override @@ -130,8 +129,6 @@ public class RangeIndex extends AbstractIndex { this.internalIndexStats.incUpdateTime(endTime - startTime); } - //// AbstractIndex implementation - void addMapping(RegionEntry entry) throws IMQException { // Save oldKeys somewhere first this.evaluator.evaluate(entry, true); @@ -254,6 +251,7 @@ public class RangeIndex extends AbstractIndex { continue; } else { this.internalIndexStats.incNumKeys(1); + // TODO: non-atomic operation on volatile int ++valueToEntriesMapSize; } } @@ -302,6 +300,7 @@ public class RangeIndex extends AbstractIndex { continue; } else { this.internalIndexStats.incNumKeys(1); + // TODO: non-atomic operation on volatile int ++valueToEntriesMapSize; } } @@ -343,7 +342,7 @@ public class RangeIndex extends AbstractIndex { private void removeOldMapping(RegionEntry entry, Object oldkeys) throws IMQException { if (oldkeys instanceof Collection) { - Iterator valuesIter = ((Collection) oldkeys).iterator(); + Iterator valuesIter = ((Iterable) oldkeys).iterator(); while (valuesIter.hasNext()) { Object key = valuesIter.next(); RegionEntryToValuesMap rvMap = (RegionEntryToValuesMap) this.valueToEntriesMap.get(key); @@ -474,6 +473,7 @@ public class RangeIndex extends AbstractIndex { rvMap = new RegionEntryToValuesMap(true /* use target list */); this.valueToEntriesMap.put(newKey, rvMap); this.internalIndexStats.incNumKeys(1); + // TODO: non-atomic operation on volatile int ++valueToEntriesMapSize; } rvMap.add(entry, value); @@ -509,7 +509,7 @@ public class RangeIndex extends AbstractIndex { undefinedMappedEntries.remove(entry); } } else if (values instanceof Collection) { - Iterator valuesIter = ((Collection) values).iterator(); + Iterator valuesIter = ((Iterable) values).iterator(); while (valuesIter.hasNext()) { Object key = valuesIter.next(); RegionEntryToValuesMap rvMap = (RegionEntryToValuesMap) this.valueToEntriesMap.get(key); @@ -709,7 +709,7 @@ public class RangeIndex extends AbstractIndex { size = this.valueToEntriesMap.containsKey(key) ? 1 : 0; } } else { - size = Integer.MAX_VALUE; + size = MAX_VALUE; } break; @@ -753,7 +753,7 @@ public class RangeIndex extends AbstractIndex { size = this.valueToEntriesMap.containsKey(key) ? 1 : 0; } } else { - size = Integer.MAX_VALUE; + size = MAX_VALUE; } break; } @@ -770,7 +770,7 @@ public class RangeIndex extends AbstractIndex { boolean multiColOrderBy = false; boolean asc = true; List orderByAttrs = null; - if (orderByClause != null && orderByClause.booleanValue()) { + if (orderByClause != null && orderByClause) { orderByAttrs = (List) context.cacheGet(CompiledValue.ORDERBY_ATTRIB); CompiledSortCriterion csc = (CompiledSortCriterion) orderByAttrs.get(0); asc = !csc.getCriterion(); @@ -840,7 +840,7 @@ public class RangeIndex extends AbstractIndex { } default: { throw new IllegalArgumentException( - LocalizedStrings.RangeIndex_OPERATOR_0.toLocalizedString(Integer.valueOf(operator))); + LocalizedStrings.RangeIndex_OPERATOR_0.toLocalizedString(valueOf(operator))); } } // end switch } catch (ClassCastException ex) { @@ -992,12 +992,12 @@ public class RangeIndex extends AbstractIndex { if (entriesMap == null || result == null) return; QueryObserver observer = QueryObserverHolder.getInstance(); - if (verifyLimit(result, limit, context)) { + if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result); return; } if (entriesMap instanceof SortedMap) { - if (((SortedMap) entriesMap).isEmpty()) { // bug#40514 + if (((Map) entriesMap).isEmpty()) { // bug#40514 return; } @@ -1010,7 +1010,7 @@ public class RangeIndex extends AbstractIndex { if (keysToRemove == null || !keysToRemove.remove(key)) { RegionEntryToValuesMap rvMap = (RegionEntryToValuesMap) entry.getValue(); rvMap.addValuesToCollection(result, limit, context); - if (verifyLimit(result, limit, context)) { + if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result); return; } @@ -1048,12 +1048,12 @@ public class RangeIndex extends AbstractIndex { if (entriesMap == null || result == null) return; QueryObserver observer = QueryObserverHolder.getInstance(); - if (verifyLimit(result, limit, context)) { + if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result); return; } assert entriesMap instanceof SortedMap; - Iterator entriesIter = ((SortedMap) entriesMap).entrySet().iterator(); + Iterator entriesIter = ((Map) entriesMap).entrySet().iterator(); Map.Entry entry = null; boolean foundKeyToRemove = false; while (entriesIter.hasNext()) { @@ -1062,7 +1062,7 @@ public class RangeIndex extends AbstractIndex { if (foundKeyToRemove || !keyToRemove.equals(entry.getKey())) { RegionEntryToValuesMap rvMap = (RegionEntryToValuesMap) entry.getValue(); rvMap.addValuesToCollection(result, limit, context); - if (verifyLimit(result, limit, context)) { + if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result); return; } @@ -1078,9 +1078,8 @@ public class RangeIndex extends AbstractIndex { throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { boolean limitApplied = false; - if (entriesMap == null || result == null - || (limitApplied = verifyLimit(result, limit, context))) { - if (limitApplied) { + if (entriesMap == null || result == null) { + if (verifyLimit(result, limit)) { QueryObserver observer = QueryObserverHolder.getInstance(); if (observer != null) { observer.limitAppliedAtIndexLevel(this, limit, result); @@ -1090,7 +1089,7 @@ public class RangeIndex extends AbstractIndex { } QueryObserver observer = QueryObserverHolder.getInstance(); if (entriesMap instanceof SortedMap) { - Iterator entriesIter = ((SortedMap) entriesMap).entrySet().iterator(); + Iterator entriesIter = ((Map) entriesMap).entrySet().iterator(); Map.Entry entry = null; boolean foundKeyToRemove = false; @@ -1106,7 +1105,7 @@ public class RangeIndex extends AbstractIndex { RegionEntryToValuesMap rvMap = (RegionEntryToValuesMap) entry.getValue(); rvMap.addValuesToCollection(result, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, limit); - if (verifyLimit(result, limit, context)) { + if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result); break; } @@ -1181,8 +1180,8 @@ public class RangeIndex extends AbstractIndex { int limit = -1; Boolean applyLimit = (Boolean) context.cacheGet(CompiledValue.CAN_APPLY_LIMIT_AT_INDEX); - if (applyLimit != null && applyLimit.booleanValue()) { - limit = ((Integer) context.cacheGet(CompiledValue.RESULT_LIMIT)).intValue(); + if (applyLimit != null && applyLimit) { + limit = (Integer) context.cacheGet(CompiledValue.RESULT_LIMIT); } Boolean orderByClause = (Boolean) context.cacheGet(CompiledValue.CAN_APPLY_ORDER_BY_AT_INDEX); @@ -1190,7 +1189,7 @@ public class RangeIndex extends AbstractIndex { List orderByAttrs = null; boolean asc = true; boolean applyOrderBy = false; - if (orderByClause != null && orderByClause.booleanValue()) { + if (orderByClause != null && orderByClause) { orderByAttrs = (List) context.cacheGet(CompiledValue.ORDERBY_ATTRIB); CompiledSortCriterion csc = (CompiledSortCriterion) orderByAttrs.get(0); asc = !csc.getCriterion(); @@ -1281,7 +1280,7 @@ public class RangeIndex extends AbstractIndex { List orderByAttrs = null; boolean multiColOrderBy = false; - if (orderByClause != null && orderByClause.booleanValue()) { + if (orderByClause != null && orderByClause) { orderByAttrs = (List) context.cacheGet(CompiledValue.ORDERBY_ATTRIB); CompiledSortCriterion csc = (CompiledSortCriterion) orderByAttrs.get(0); asc = !csc.getCriterion(); @@ -1360,7 +1359,7 @@ public class RangeIndex extends AbstractIndex { boolean multiColOrderBy = false; List orderByAttrs = null; boolean asc = true; - if (orderByClause != null && orderByClause.booleanValue()) { + if (orderByClause != null && orderByClause) { orderByAttrs = (List) context.cacheGet(CompiledValue.ORDERBY_ATTRIB); CompiledSortCriterion csc = (CompiledSortCriterion) orderByAttrs.get(0); asc = !csc.getCriterion(); @@ -1397,7 +1396,7 @@ public class RangeIndex extends AbstractIndex { } public String dump() { - StringBuffer sb = new StringBuffer(toString()).append(" {\n"); + StringBuilder sb = new StringBuilder(toString()).append(" {\n"); sb.append("Null Values\n"); Iterator nI = nullMappedEntries.entrySet().iterator(); while (nI.hasNext()) { @@ -1429,11 +1428,12 @@ public class RangeIndex extends AbstractIndex { Iterator i1 = this.valueToEntriesMap.entrySet().iterator(); while (i1.hasNext()) { Map.Entry indexEntry = (Map.Entry) i1.next(); - sb.append(" Key = " + indexEntry.getKey()).append("\n"); - sb.append(" Value Type = ").append(" " + indexEntry.getValue().getClass().getName()) + sb.append(" Key = ").append(indexEntry.getKey()).append("\n"); + sb.append(" Value Type = ").append(" ").append(indexEntry.getValue().getClass().getName()) .append("\n"); if (indexEntry.getValue() instanceof Map) { - sb.append(" Value Size = ").append(" " + ((Map) indexEntry.getValue()).size()).append("\n"); + sb.append(" Value Size = ").append(" ").append(((Map) indexEntry.getValue()).size()) + .append("\n"); } Iterator i2 = ((RegionEntryToValuesMap) indexEntry.getValue()).entrySet().iterator(); while (i2.hasNext()) { @@ -1454,7 +1454,6 @@ public class RangeIndex extends AbstractIndex { return sb.toString(); } - public static void setTestHook(TestHook hook) { RangeIndex.testHook = hook; } @@ -1470,10 +1469,8 @@ public class RangeIndex extends AbstractIndex { public RangeIndexStatistics(String indexName) { this.vsdStats = new IndexStats(getRegion().getCache().getDistributedSystem(), indexName); - } - /** * Return the total number of times this index has been updated */ @@ -1521,15 +1518,10 @@ public class RangeIndex extends AbstractIndex { this.vsdStats.incUsesInProgress(delta); } - public void incReadLockCount(int delta) { this.vsdStats.incReadLockCount(delta); } - public long getUseTime() { - return this.vsdStats.getUseTime(); - } - /** * Returns the total amount of time (in nanoseconds) spent updating this index. */ @@ -1586,7 +1578,7 @@ public class RangeIndex extends AbstractIndex { @Override public String toString() { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); sb.append("No Keys = ").append(getNumberOfKeys()).append("\n"); sb.append("No Values = ").append(getNumberOfValues()).append("\n"); sb.append("No Uses = ").append(getTotalUses()).append("\n"); http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java index c96732c..459123f 100755 --- a/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/ServerLauncher.java @@ -55,6 +55,7 @@ import org.apache.geode.internal.cache.AbstractCacheServer; import org.apache.geode.internal.cache.CacheConfig; import org.apache.geode.internal.cache.CacheServerLauncher; import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.PartitionedRegion; import org.apache.geode.internal.cache.tier.sockets.CacheServerHelper; import org.apache.geode.internal.i18n.LocalizedStrings; @@ -101,6 +102,7 @@ public class ServerLauncher extends AbstractLauncher { * @deprecated This is specific to the internal implementation and may go away in a future * release. */ + @Deprecated protected static final Integer DEFAULT_SERVER_PORT = getDefaultServerPort(); private static final Map helpMap = new HashMap<>(); @@ -156,6 +158,7 @@ public class ServerLauncher extends AbstractLauncher { * @deprecated This is specific to the internal implementation and may go away in a future * release. */ + @Deprecated public static final String DEFAULT_SERVER_PID_FILE = "vf.gf.server.pid"; private static final String DEFAULT_SERVER_LOG_EXT = ".log"; @@ -167,9 +170,9 @@ public class ServerLauncher extends AbstractLauncher { private static final ServerLauncherCacheProvider DEFAULT_CACHE_PROVIDER = new DefaultServerLauncherCacheProvider(); - private volatile transient boolean debug; + private volatile boolean debug; - private final transient ControlNotificationHandler controlHandler; + private final ControlNotificationHandler controlHandler; private final AtomicBoolean starting = new AtomicBoolean(false); @@ -180,9 +183,9 @@ public class ServerLauncher extends AbstractLauncher { private final boolean rebalance; private final boolean redirectOutput; - private volatile transient Cache cache; + private volatile Cache cache; - private final transient CacheConfig cacheConfig; + private final CacheConfig cacheConfig; private final Command command; @@ -198,7 +201,7 @@ public class ServerLauncher extends AbstractLauncher { private final String workingDirectory; // NOTE in addition to debug, the other shared, mutable state - private volatile transient String statusMessage; + private volatile String statusMessage; private final Float criticalHeapPercentage; private final Float evictionHeapPercentage; @@ -214,9 +217,9 @@ public class ServerLauncher extends AbstractLauncher { private final Integer maxThreads; - private volatile transient ControllableProcess process; + private volatile ControllableProcess process; - private final transient ServerControllerParameters controllerParameters; + private final ServerControllerParameters controllerParameters; /** * Launches a GemFire Server from the command-line configured with the given arguments. @@ -459,8 +462,8 @@ public class ServerLauncher extends AbstractLauncher { * * @return a String indicating the name of the member (this Server) in the GemFire distributed * system. - * @see AbstractLauncher#getMemberName() */ + @Override public String getMemberName() { return StringUtils.defaultIfBlank(this.memberName, super.getMemberName()); } @@ -653,12 +656,12 @@ public class ServerLauncher extends AbstractLauncher { } else { info(StringUtils.wrap(helpMap.get(command.getName()), 80, "")); info("\n\nusage: \n\n"); - info(StringUtils.wrap("> java ... " + getClass().getName() + " " + usageMap.get(command), 80, + info(StringUtils.wrap("> java ... " + getClass().getName() + ' ' + usageMap.get(command), 80, "\t\t")); info("\n\noptions: \n\n"); for (final String option : command.getOptions()) { - info(StringUtils.wrap("--" + option + ": " + helpMap.get(option) + "\n", 80, "\t")); + info(StringUtils.wrap("--" + option + ": " + helpMap.get(option) + '\n', 80, "\t")); } info("\n\n"); @@ -904,7 +907,7 @@ public class ServerLauncher extends AbstractLauncher { /** * Causes the calling Thread to block until the GemFire Cache Server/Data Member stops. */ - public void waitOnServer() { + void waitOnServer() { assert getCache() != null : "The Cache Server must first be started with a call to start!"; if (!isServing(getCache())) { @@ -1012,8 +1015,8 @@ public class ServerLauncher extends AbstractLauncher { * @return a boolean indicating if bucket assignment is both enabled and allowed. * @see #isAssignBuckets() */ - protected boolean isAssignBucketsAllowed(final Cache cache) { - return (isAssignBuckets() && (cache instanceof GemFireCacheImpl)); + private boolean isAssignBucketsAllowed(final Cache cache) { + return isAssignBuckets() && cache instanceof GemFireCacheImpl; } /** @@ -1022,9 +1025,9 @@ public class ServerLauncher extends AbstractLauncher { * @param cache the Cache who's Partitioned Regions are accessed to assign buckets to. * @see PartitionRegionHelper#assignBucketsToPartitions(org.apache.geode.cache.Region) */ - final void assignBuckets(final Cache cache) { + private void assignBuckets(final Cache cache) { if (isAssignBucketsAllowed(cache)) { - for (PartitionedRegion region : ((GemFireCacheImpl) cache).getPartitionedRegions()) { + for (PartitionedRegion region : ((InternalCache) cache).getPartitionedRegions()) { PartitionRegionHelper.assignBucketsToPartitions(region); } } @@ -1192,7 +1195,7 @@ public class ServerLauncher extends AbstractLauncher { * process with an embedded Server). */ private boolean isStoppable() { - return (isRunning() && getCache() != null); + return isRunning() && getCache() != null; } /** @@ -1388,9 +1391,7 @@ public class ServerLauncher extends AbstractLauncher { public ObjectName getNamePattern() { try { return ObjectName.getInstance("GemFire:type=Member,*"); - } catch (MalformedObjectNameException e) { - return null; - } catch (NullPointerException e) { + } catch (MalformedObjectNameException | NullPointerException ignore) { return null; } } @@ -1545,12 +1546,12 @@ public class ServerLauncher extends AbstractLauncher { * @param args the array of arguments used to configure this Builder and create an instance of * ServerLauncher. */ - protected void parseArguments(final String... args) { + void parseArguments(final String... args) { try { OptionSet options = getParser().parse(args); parseCommand(args); - parseMemberName(args); // TODO:KIRK: need to get the name to LogService for log file name + parseMemberName(args); setAssignBuckets(options.has("assign-buckets")); setDebug(options.has("debug")); @@ -2424,8 +2425,8 @@ public class ServerLauncher extends AbstractLauncher { * * @see org.apache.geode.distributed.ServerLauncher.Command#START */ - protected void validateOnStart() { - if (Command.START.equals(getCommand())) { + void validateOnStart() { + if (Command.START == getCommand()) { if (StringUtils.isBlank(getMemberName()) && !isSet(System.getProperties(), DistributionConfig.GEMFIRE_PREFIX + NAME) && !isSet(getDistributedSystemProperties(), NAME) @@ -2448,8 +2449,8 @@ public class ServerLauncher extends AbstractLauncher { * * @see org.apache.geode.distributed.ServerLauncher.Command#STATUS */ - protected void validateOnStatus() { - if (Command.STATUS.equals(getCommand())) { + void validateOnStatus() { + if (Command.STATUS == getCommand()) { // do nothing } } @@ -2459,8 +2460,8 @@ public class ServerLauncher extends AbstractLauncher { * * @see org.apache.geode.distributed.ServerLauncher.Command#STOP */ - protected void validateOnStop() { - if (Command.STOP.equals(getCommand())) { + void validateOnStop() { + if (Command.STOP == getCommand()) { // do nothing } } @@ -2482,7 +2483,7 @@ public class ServerLauncher extends AbstractLauncher { /** * An enumerated type representing valid commands to the Server launcher. */ - public static enum Command { + public enum Command { START("start", "assign-buckets", "disable-default-server", "rebalance", SERVER_BIND_ADDRESS, "server-port", "force", "debug", "help"), STATUS("status", "member", "pid", "dir", "debug", "help"), @@ -2688,10 +2689,10 @@ public class ServerLauncher extends AbstractLauncher { @SuppressWarnings("unchecked") private static String getServerBindAddressAsString(final ServerLauncher launcher) { - final GemFireCacheImpl gemfireCache = GemFireCacheImpl.getInstance(); + final InternalCache internalCache = GemFireCacheImpl.getInstance(); - if (gemfireCache != null) { - final List csList = gemfireCache.getCacheServers(); + if (internalCache != null) { + final List csList = internalCache.getCacheServers(); if (csList != null && !csList.isEmpty()) { final CacheServer cs = csList.get(0); final String serverBindAddressAsString = cs.getBindAddress(); @@ -2706,10 +2707,10 @@ public class ServerLauncher extends AbstractLauncher { @SuppressWarnings("unchecked") private static String getServerPortAsString(final ServerLauncher launcher) { - final GemFireCacheImpl gemfireCache = GemFireCacheImpl.getInstance(); + final InternalCache internalCache = GemFireCacheImpl.getInstance(); - if (gemfireCache != null) { - final List csList = gemfireCache.getCacheServers(); + if (internalCache != null) { + final List csList = internalCache.getCacheServers(); if (csList != null && !csList.isEmpty()) { final CacheServer cs = csList.get(0); final String portAsString = String.valueOf(cs.getPort()); http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterConfigurationService.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterConfigurationService.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterConfigurationService.java index 95d1a5b..10623b4 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterConfigurationService.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterConfigurationService.java @@ -14,26 +14,56 @@ */ package org.apache.geode.distributed.internal; -import static org.apache.geode.distributed.ConfigurationProperties.CLUSTER_CONFIGURATION_DIR; -import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_MANAGER; -import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_POST_PROCESSOR; +import static org.apache.geode.distributed.ConfigurationProperties.*; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileFilter; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.nio.file.Path; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactoryConfigurationError; import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.apache.commons.io.filefilter.DirectoryFileFilter; +import org.apache.logging.log4j.Logger; +import org.w3c.dom.Document; +import org.xml.sax.SAXException; + import org.apache.geode.CancelException; import org.apache.geode.cache.AttributesFactory; -import org.apache.geode.cache.Cache; +import org.apache.geode.cache.CacheLoaderException; import org.apache.geode.cache.DataPolicy; import org.apache.geode.cache.DiskStore; import org.apache.geode.cache.Region; import org.apache.geode.cache.Scope; +import org.apache.geode.cache.TimeoutException; import org.apache.geode.cache.execute.ResultCollector; import org.apache.geode.distributed.DistributedLockService; import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.DistributedSystem; +import org.apache.geode.distributed.LeaseExpiredException; import org.apache.geode.distributed.internal.locks.DLockService; -import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.InternalRegionArguments; import org.apache.geode.internal.cache.persistence.PersistentMemberID; import org.apache.geode.internal.cache.persistence.PersistentMemberManager; @@ -51,44 +81,18 @@ import org.apache.geode.management.internal.configuration.messages.Configuration import org.apache.geode.management.internal.configuration.messages.ConfigurationResponse; import org.apache.geode.management.internal.configuration.messages.SharedConfigurationStatusResponse; import org.apache.geode.management.internal.configuration.utils.XmlUtils; -import org.apache.logging.log4j.Logger; -import org.w3c.dom.Document; -import org.xml.sax.SAXException; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileFilter; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.nio.file.Path; -import java.text.SimpleDateFormat; -import java.util.Arrays; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.transform.TransformerException; -import javax.xml.transform.TransformerFactoryConfigurationError; @SuppressWarnings({"deprecation", "unchecked"}) public class ClusterConfigurationService { - private static final Logger logger = LogService.getLogger(); /** * Name of the directory where the shared configuration artifacts are stored */ public static final String CLUSTER_CONFIG_ARTIFACTS_DIR_NAME = "cluster_config"; + private static final String CLUSTER_CONFIG_DISK_STORE_NAME = "cluster_config"; + public static final String CLUSTER_CONFIG_DISK_DIR_PREFIX = "ConfigDiskDir_"; public static final String CLUSTER_CONFIG = "cluster"; @@ -101,7 +105,7 @@ public class ClusterConfigurationService { /** * Name of the lock for locking the shared configuration */ - public static final String SHARED_CONFIG_LOCK_NAME = "__CLUSTER_CONFIG_LOCK"; + private static final String SHARED_CONFIG_LOCK_NAME = "__CLUSTER_CONFIG_LOCK"; /** * Name of the region which is used to store the configuration information @@ -114,11 +118,11 @@ public class ClusterConfigurationService { private final Set newerSharedConfigurationLocatorInfo = new HashSet<>(); private final AtomicReference status = new AtomicReference<>(); - private GemFireCacheImpl cache; + private final InternalCache cache; private final DistributedLockService sharedConfigLockingService; - public ClusterConfigurationService(Cache cache) throws IOException { - this.cache = (GemFireCacheImpl) cache; + public ClusterConfigurationService(InternalCache cache) throws IOException { + this.cache = cache; Properties properties = cache.getDistributedSystem().getProperties(); // resolve the cluster config dir String clusterConfigRootDir = properties.getProperty(CLUSTER_CONFIGURATION_DIR); @@ -137,10 +141,11 @@ public class ClusterConfigurationService { String configDiskDirName = CLUSTER_CONFIG_DISK_DIR_PREFIX + cache.getDistributedSystem().getName(); - configDirPath = FilenameUtils.concat(clusterConfigRootDir, CLUSTER_CONFIG_ARTIFACTS_DIR_NAME); - configDiskDirPath = FilenameUtils.concat(clusterConfigRootDir, configDiskDirName); - sharedConfigLockingService = getSharedConfigLockService(cache.getDistributedSystem()); - status.set(SharedConfigurationStatus.NOT_STARTED); + this.configDirPath = + FilenameUtils.concat(clusterConfigRootDir, CLUSTER_CONFIG_ARTIFACTS_DIR_NAME); + this.configDiskDirPath = FilenameUtils.concat(clusterConfigRootDir, configDiskDirName); + this.sharedConfigLockingService = getSharedConfigLockService(cache.getDistributedSystem()); + this.status.set(SharedConfigurationStatus.NOT_STARTED); } /** @@ -154,7 +159,7 @@ public class ClusterConfigurationService { sharedConfigDls = DLockService.create(SHARED_CONFIG_LOCK_SERVICE_NAME, (InternalDistributedSystem) ds, true, true); } - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException ignore) { return DLockService.getServiceNamed(SHARED_CONFIG_LOCK_SERVICE_NAME); } return sharedConfigDls; @@ -172,7 +177,7 @@ public class ClusterConfigurationService { groups = new String[] {ClusterConfigurationService.CLUSTER_CONFIG}; } for (String group : groups) { - Configuration configuration = (Configuration) configRegion.get(group); + Configuration configuration = configRegion.get(group); if (configuration == null) { configuration = new Configuration(group); } @@ -189,7 +194,7 @@ public class ClusterConfigurationService { configuration.setCacheXmlContent(XmlUtils.prettyXml(doc)); configRegion.put(group, configuration); } catch (Exception e) { - logger.error("error updating cluster configuration for group " + group, e); + logger.error("error updating cluster configuration for group {}", group, e); } } } finally { @@ -210,7 +215,7 @@ public class ClusterConfigurationService { groups = groupSet.toArray(new String[groupSet.size()]); } for (String group : groups) { - Configuration configuration = (Configuration) configRegion.get(group); + Configuration configuration = configRegion.get(group); if (configuration != null) { String xmlContent = configuration.getCacheXmlContent(); try { @@ -221,7 +226,7 @@ public class ClusterConfigurationService { configRegion.put(group, configuration); } } catch (Exception e) { - logger.error("error updating cluster configuration for group " + group, e); + logger.error("error updating cluster configuration for group {}", group, e); } } } @@ -230,8 +235,10 @@ public class ClusterConfigurationService { } } - // we don't need to trigger the change listener for this modification, so it's ok to - // operate on the original configuration object + /** + * we don't need to trigger the change listener for this modification, so it's ok to operate on + * the original configuration object + */ public void modifyXmlAndProperties(Properties properties, XmlEntity xmlEntity, String[] groups) { lockSharedConfiguration(); try { @@ -260,7 +267,7 @@ public class ClusterConfigurationService { // Change the xml content of the configuration and put it the config region configuration.setCacheXmlContent(XmlUtils.prettyXml(doc)); } catch (Exception e) { - logger.error("error updating cluster configuration for group " + group, e); + logger.error("error updating cluster configuration for group {}", group, e); } } @@ -274,7 +281,6 @@ public class ClusterConfigurationService { } } - /** * Add jar information into the shared configuration and save the jars in the file system used * when deploying jars @@ -282,8 +288,8 @@ public class ClusterConfigurationService { * @return true on success */ public boolean addJarsToThisLocator(String[] jarNames, byte[][] jarBytes, String[] groups) { - boolean success = true; lockSharedConfiguration(); + boolean success = true; try { if (groups == null) { groups = new String[] {ClusterConfigurationService.CLUSTER_CONFIG}; @@ -297,11 +303,11 @@ public class ClusterConfigurationService { createConfigDirIfNecessary(group); } - String groupDir = FilenameUtils.concat(configDirPath, group); + String groupDir = FilenameUtils.concat(this.configDirPath, group); for (int i = 0; i < jarNames.length; i++) { String filePath = FilenameUtils.concat(groupDir, jarNames[i]); - File jarFile = new File(filePath); try { + File jarFile = new File(filePath); FileUtils.writeByteArrayToFile(jarFile, jarBytes[i]); } catch (IOException e) { logger.info(e); @@ -333,8 +339,8 @@ public class ClusterConfigurationService { * @return true on success. */ public boolean removeJars(final String[] jarNames, String[] groups) { - boolean success = true; lockSharedConfiguration(); + boolean success = true; try { Region configRegion = getConfigurationRegion(); if (groups == null) { @@ -360,10 +366,10 @@ public class ClusterConfigurationService { /** * read the jar bytes in the file system + *

+ * used when creating cluster config response and used when uploading the jars to another locator */ - // used when creating cluster config response - // and used when uploading the jars to another locator - public byte[] getJarBytesFromThisLocator(String group, String jarName) throws Exception { + public byte[] getJarBytesFromThisLocator(String group, String jarName) throws IOException { Configuration configuration = getConfiguration(group); File jar = getPathToJarOnThisLocator(group, jarName).toFile(); @@ -376,10 +382,11 @@ public class ClusterConfigurationService { } // used in the cluster config change listener when jarnames are changed in the internal region - public void downloadJarFromOtherLocators(String groupName, String jarName) throws Exception { + public void downloadJarFromOtherLocators(String groupName, String jarName) + throws IllegalStateException, IOException { logger.info("Getting Jar files from other locators"); - DM dm = cache.getDistributionManager(); - DistributedMember me = cache.getMyId(); + DM dm = this.cache.getDistributionManager(); + DistributedMember me = this.cache.getMyId(); Set locators = new HashSet<>(dm.getAllHostedLocatorsWithSharedConfiguration().keySet()); locators.remove(me); @@ -396,7 +403,7 @@ public class ClusterConfigurationService { } // used when creating cluster config response - public Map getAllJarsFromThisLocator(Set groups) throws Exception { + public Map getAllJarsFromThisLocator(Set groups) throws IOException { Map jarNamesToJarBytes = new HashMap<>(); for (String group : groups) { @@ -421,8 +428,10 @@ public class ClusterConfigurationService { * @param loadSharedConfigFromDir when set to true, loads the configuration from the share_config * directory */ - public void initSharedConfiguration(boolean loadSharedConfigFromDir) throws Exception { - status.set(SharedConfigurationStatus.STARTED); + void initSharedConfiguration(boolean loadSharedConfigFromDir) + throws CacheLoaderException, TimeoutException, IllegalStateException, IOException, + TransformerException, SAXException, ParserConfigurationException { + this.status.set(SharedConfigurationStatus.STARTED); Region configRegion = this.getConfigurationRegion(); lockSharedConfiguration(); try { @@ -434,12 +443,12 @@ public class ClusterConfigurationService { persistSecuritySettings(configRegion); // for those groups that have jar files, need to download the jars from other locators // if it doesn't exist yet - Set groups = configRegion.keySet(); - for (String group : groups) { - Configuration config = configRegion.get(group); + for (Entry stringConfigurationEntry : configRegion.entrySet()) { + Configuration config = stringConfigurationEntry.getValue(); for (String jar : config.getJarNames()) { - if (!(getPathToJarOnThisLocator(group, jar).toFile()).exists()) { - downloadJarFromOtherLocators(group, jar); + if (!getPathToJarOnThisLocator(stringConfigurationEntry.getKey(), jar).toFile() + .exists()) { + downloadJarFromOtherLocators(stringConfigurationEntry.getKey(), jar); } } } @@ -448,11 +457,11 @@ public class ClusterConfigurationService { unlockSharedConfiguration(); } - status.set(SharedConfigurationStatus.RUNNING); + this.status.set(SharedConfigurationStatus.RUNNING); } private void persistSecuritySettings(final Region configRegion) { - Properties securityProps = cache.getDistributedSystem().getSecurityProperties(); + Properties securityProps = this.cache.getDistributedSystem().getSecurityProperties(); Configuration clusterPropertiesConfig = configRegion.get(ClusterConfigurationService.CLUSTER_CONFIG); @@ -476,13 +485,13 @@ public class ClusterConfigurationService { * Creates a ConfigurationResponse based on the configRequest, configuration response contains the * requested shared configuration This method locks the ClusterConfigurationService */ - public ConfigurationResponse createConfigurationReponse(final ConfigurationRequest configRequest) - throws Exception { + public ConfigurationResponse createConfigurationResponse(final ConfigurationRequest configRequest) + throws LeaseExpiredException, IOException { ConfigurationResponse configResponse = new ConfigurationResponse(); for (int i = 0; i < configRequest.getNumAttempts(); i++) { - boolean isLocked = sharedConfigLockingService.lock(SHARED_CONFIG_LOCK_NAME, 5000, 5000); + boolean isLocked = this.sharedConfigLockingService.lock(SHARED_CONFIG_LOCK_NAME, 5000, 5000); try { if (isLocked) { Set groups = configRequest.getGroups(); @@ -504,7 +513,7 @@ public class ClusterConfigurationService { return configResponse; } } finally { - sharedConfigLockingService.unlock(SHARED_CONFIG_LOCK_NAME); + this.sharedConfigLockingService.unlock(SHARED_CONFIG_LOCK_NAME); } } @@ -519,16 +528,16 @@ public class ClusterConfigurationService { * @return {@link SharedConfigurationStatusResponse} containing the * {@link SharedConfigurationStatus} */ - public SharedConfigurationStatusResponse createStatusResponse() { + SharedConfigurationStatusResponse createStatusResponse() { SharedConfigurationStatusResponse response = new SharedConfigurationStatusResponse(); response.setStatus(getStatus()); - response.addWaitingLocatorInfo(newerSharedConfigurationLocatorInfo); + response.addWaitingLocatorInfo(this.newerSharedConfigurationLocatorInfo); return response; } /** * For tests only. TODO: clean this up and remove from production code - *

+ *

* Throws {@code AssertionError} wrapping any exception thrown by operation. */ public void destroySharedConfiguration() { @@ -540,25 +549,24 @@ public class ClusterConfigurationService { DiskStore configDiskStore = this.cache.findDiskStore(CLUSTER_CONFIG_ARTIFACTS_DIR_NAME); if (configDiskStore != null) { configDiskStore.destroy(); - File file = new File(configDiskDirPath); + File file = new File(this.configDiskDirPath); FileUtils.deleteDirectory(file); } - FileUtils.deleteDirectory(new File(configDirPath)); + FileUtils.deleteDirectory(new File(this.configDirPath)); } catch (Exception exception) { throw new AssertionError(exception); } } public Path getPathToJarOnThisLocator(String groupName, String jarName) { - return new File(configDirPath).toPath().resolve(groupName).resolve(jarName); + return new File(this.configDirPath).toPath().resolve(groupName).resolve(jarName); } public Configuration getConfiguration(String groupName) { - Configuration configuration = getConfigurationRegion().get(groupName); - return configuration; + return getConfigurationRegion().get(groupName); } - public Map getEntireConfiguration() throws Exception { + public Map getEntireConfiguration() { Set keys = getConfigurationRegion().keySet(); return getConfigurationRegion().getAll(keys); } @@ -581,7 +589,7 @@ public class ClusterConfigurationService { public SharedConfigurationStatus getStatus() { SharedConfigurationStatus scStatus = this.status.get(); if (scStatus == SharedConfigurationStatus.STARTED) { - PersistentMemberManager pmm = cache.getPersistentMemberManager(); + PersistentMemberManager pmm = this.cache.getPersistentMemberManager(); Map> waitingRegions = pmm.getWaitingRegions(); if (!waitingRegions.isEmpty()) { this.status.compareAndSet(SharedConfigurationStatus.STARTED, @@ -589,7 +597,7 @@ public class ClusterConfigurationService { Set persMemIds = waitingRegions.get(Region.SEPARATOR_CHAR + CONFIG_REGION_NAME); for (PersistentMemberID persMemId : persMemIds) { - newerSharedConfigurationLocatorInfo.add(new PersistentMemberPattern(persMemId)); + this.newerSharedConfigurationLocatorInfo.add(new PersistentMemberPattern(persMemId)); } } } @@ -599,18 +607,19 @@ public class ClusterConfigurationService { /** * Loads the internal region with the configuration in the configDirPath */ - public void loadSharedConfigurationFromDisk() throws Exception { + public void loadSharedConfigurationFromDisk() + throws SAXException, ParserConfigurationException, TransformerException, IOException { lockSharedConfiguration(); File[] groupNames = - new File(configDirPath).listFiles((FileFilter) DirectoryFileFilter.INSTANCE); - Map sharedConfiguration = new HashMap(); + new File(this.configDirPath).listFiles((FileFilter) DirectoryFileFilter.INSTANCE); try { + Map sharedConfiguration = new HashMap<>(); for (File groupName : groupNames) { Configuration configuration = readConfiguration(groupName); sharedConfiguration.put(groupName.getName(), configuration); } - Region clusterRegion = getConfigurationRegion(); + Region clusterRegion = getConfigurationRegion(); clusterRegion.clear(); clusterRegion.putAll(sharedConfiguration); @@ -624,12 +633,12 @@ public class ClusterConfigurationService { } public void renameExistingSharedConfigDirectory() { - File configDirFile = new File(configDirPath); + File configDirFile = new File(this.configDirPath); if (configDirFile.exists()) { String configDirFileName2 = CLUSTER_CONFIG_ARTIFACTS_DIR_NAME - + new SimpleDateFormat("yyyyMMddhhmm").format(new Date()) + "." + System.nanoTime(); - File configDirFile2 = new File(configDirFile.getParent(), configDirFileName2); + + new SimpleDateFormat("yyyyMMddhhmm").format(new Date()) + '.' + System.nanoTime(); try { + File configDirFile2 = new File(configDirFile.getParent(), configDirFileName2); FileUtils.moveDirectory(configDirFile, configDirFile2); } catch (IOException e) { logger.info(e); @@ -639,7 +648,7 @@ public class ClusterConfigurationService { // Write the content of xml and properties into the file system for exporting purpose - public void writeConfigToFile(final Configuration configuration) throws Exception { + public void writeConfigToFile(final Configuration configuration) throws IOException { File configDir = createConfigDirIfNecessary(configuration.getConfigName()); File propsFile = new File(configDir, configuration.getPropertiesFileName()); @@ -651,15 +660,15 @@ public class ClusterConfigurationService { FileUtils.writeStringToFile(xmlFile, configuration.getCacheXmlContent(), "UTF-8"); } + // TODO: return value is never used private boolean lockSharedConfiguration() { - return sharedConfigLockingService.lock(SHARED_CONFIG_LOCK_NAME, -1, -1); + return this.sharedConfigLockingService.lock(SHARED_CONFIG_LOCK_NAME, -1, -1); } private void unlockSharedConfiguration() { - sharedConfigLockingService.unlock(SHARED_CONFIG_LOCK_NAME); + this.sharedConfigLockingService.unlock(SHARED_CONFIG_LOCK_NAME); } - private byte[] downloadJarFromLocator(DistributedMember locator, String groupName, String jarName) { ResultCollector> rc = (ResultCollector>) CliUtil @@ -679,24 +688,24 @@ public class ClusterConfigurationService { * @return {@link Region} ConfigurationRegion, this should never be null */ private Region getConfigurationRegion() { - Region configRegion = cache.getRegion(CONFIG_REGION_NAME); + Region configRegion = this.cache.getRegion(CONFIG_REGION_NAME); try { if (configRegion == null) { - File diskDir = new File(configDiskDirPath); + File diskDir = new File(this.configDiskDirPath); if (!diskDir.exists()) { if (!diskDir.mkdirs()) { - throw new IOException("Cannot create directory at " + configDiskDirPath); + // TODO: throw caught by containing try statement + throw new IOException("Cannot create directory at " + this.configDiskDirPath); } } File[] diskDirs = {diskDir}; - cache.createDiskStoreFactory().setDiskDirs(diskDirs).setAutoCompact(true) + this.cache.createDiskStoreFactory().setDiskDirs(diskDirs).setAutoCompact(true) .setMaxOplogSize(10).create(CLUSTER_CONFIG_DISK_STORE_NAME); - AttributesFactory regionAttrsFactory = - new AttributesFactory(); + AttributesFactory regionAttrsFactory = new AttributesFactory<>(); regionAttrsFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); regionAttrsFactory.setCacheListener(new ConfigurationChangeListener(this)); regionAttrsFactory.setDiskStoreName(CLUSTER_CONFIG_DISK_STORE_NAME); @@ -705,15 +714,16 @@ public class ClusterConfigurationService { internalArgs.setIsUsedForMetaRegion(true); internalArgs.setMetaRegionWithTransactions(false); - configRegion = - cache.createVMRegion(CONFIG_REGION_NAME, regionAttrsFactory.create(), internalArgs); + configRegion = this.cache.createVMRegion(CONFIG_REGION_NAME, regionAttrsFactory.create(), + internalArgs); } } catch (CancelException e) { if (configRegion == null) { this.status.set(SharedConfigurationStatus.STOPPED); } - throw e; // CONFIG: don't rethrow as Exception, keep it a subclass of CancelException + // CONFIG: don't rethrow as Exception, keep it a subclass of CancelException + throw e; } catch (Exception e) { if (configRegion == null) { @@ -750,7 +760,7 @@ public class ClusterConfigurationService { /** * Creates a directory for this configuration if it doesn't already exist. */ - private File createConfigDirIfNecessary(final String configName) throws Exception { + private File createConfigDirIfNecessary(final String configName) throws IOException { File clusterConfigDir = new File(getSharedConfigurationDirPath()); if (!clusterConfigDir.exists()) { if (!clusterConfigDir.mkdirs()) { @@ -769,20 +779,4 @@ public class ClusterConfigurationService { return configDir; } - // check if it's ok from populate the properties from one member to another - public static boolean isMisConfigured(Properties fromProps, Properties toProps, String key) { - String fromPropValue = fromProps.getProperty(key); - String toPropValue = toProps.getProperty(key); - - // if this to prop is not specified, this is always OK. - if (org.apache.commons.lang.StringUtils.isBlank(toPropValue)) - return false; - - // to props is not blank, but from props is blank, NOT OK. - if (org.apache.commons.lang.StringUtils.isBlank(fromPropValue)) - return true; - - // at this point check for eqality - return !fromPropValue.equals(toPropValue); - } } http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/distributed/internal/DSClock.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DSClock.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DSClock.java index 9694394..70bf2c2 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DSClock.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DSClock.java @@ -18,6 +18,7 @@ import org.apache.geode.distributed.DistributedMember; import org.apache.geode.internal.SystemTimer; import org.apache.geode.internal.SystemTimer.SystemTimerTask; import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.i18n.LocalizedStrings; import org.apache.geode.internal.logging.DateFormatter; import org.apache.geode.internal.logging.LogService; @@ -203,12 +204,10 @@ public class DSClock implements CacheTime { /** * Cancel the previous slow down task (If it exists) and schedule a new one. - * - * @param offset */ private void cancelAndScheduleNewCacheTimerTask(long offset) { - GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); + InternalCache cache = GemFireCacheImpl.getInstance(); if (cache != null && !cache.isClosed()) { if (this.cacheTimeTask != null) { @@ -288,7 +287,7 @@ public class DSClock implements CacheTime { @Override public boolean cancel() { - GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); + InternalCache cache = GemFireCacheImpl.getInstance(); if (cache != null && !cache.isClosed()) { suspendCacheTimeMillis(false); } http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java index 3418c58..873ec4b 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalDistributedSystem.java @@ -80,6 +80,7 @@ import org.apache.geode.internal.cache.CacheConfig; import org.apache.geode.internal.cache.CacheServerImpl; import org.apache.geode.internal.cache.EventID; import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.execute.FunctionServiceStats; import org.apache.geode.internal.cache.execute.FunctionStats; import org.apache.geode.internal.cache.tier.sockets.HandShake; @@ -1311,7 +1312,7 @@ public class InternalDistributedSystem extends DistributedSystem // // However, make sure cache is completely closed before starting // the distributed system close. - GemFireCacheImpl currentCache = GemFireCacheImpl.getInstance(); + InternalCache currentCache = GemFireCacheImpl.getInstance(); if (currentCache != null && !currentCache.isClosed()) { disconnectListenerThread.set(Boolean.TRUE); // bug #42663 - this must be set while // closing the cache @@ -1541,7 +1542,7 @@ public class InternalDistributedSystem extends DistributedSystem StringTokenizer st = new StringTokenizer(locators, ","); while (st.hasMoreTokens()) { String l = st.nextToken(); - StringBuffer canonical = new StringBuffer(); + StringBuilder canonical = new StringBuilder(); DistributionLocatorId locId = new DistributionLocatorId(l); String addr = locId.getBindAddress(); if (addr != null && addr.trim().length() > 0) { @@ -1555,7 +1556,7 @@ public class InternalDistributedSystem extends DistributedSystem sorted.add(canonical.toString()); } - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); for (Iterator iter = sorted.iterator(); iter.hasNext();) { sb.append((String) iter.next()); if (iter.hasNext()) { @@ -1678,7 +1679,7 @@ public class InternalDistributedSystem extends DistributedSystem */ @Override public String toString() { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); sb.append("Connected "); String name = this.getName(); if (name != null && !name.equals("")) { @@ -2481,7 +2482,7 @@ public class InternalDistributedSystem extends DistributedSystem * * @param oldCache cache that has apparently failed */ - public boolean tryReconnect(boolean forcedDisconnect, String reason, GemFireCacheImpl oldCache) { + public boolean tryReconnect(boolean forcedDisconnect, String reason, InternalCache oldCache) { final boolean isDebugEnabled = logger.isDebugEnabled(); if (this.isReconnectingDS && forcedDisconnect) { return false; @@ -2490,7 +2491,7 @@ public class InternalDistributedSystem extends DistributedSystem // cache synchronized (GemFireCacheImpl.class) { // bug 39329: must lock reconnectLock *after* the cache - synchronized (reconnectLock) { + synchronized (this.reconnectLock) { if (!forcedDisconnect && !oldCache.isClosed() && oldCache.getCachePerfStats().getReliableRegionsMissing() == 0) { if (isDebugEnabled) { @@ -2503,7 +2504,7 @@ public class InternalDistributedSystem extends DistributedSystem logger.debug("tryReconnect: forcedDisconnect={}", forcedDisconnect); } if (forcedDisconnect) { - if (config.getDisableAutoReconnect()) { + if (this.config.getDisableAutoReconnect()) { if (isDebugEnabled) { logger.debug("tryReconnect: auto reconnect after forced disconnect is disabled"); } @@ -2511,7 +2512,7 @@ public class InternalDistributedSystem extends DistributedSystem } } reconnect(forcedDisconnect, reason); - return (this.reconnectDS != null && this.reconnectDS.isConnected()); + return this.reconnectDS != null && this.reconnectDS.isConnected(); } // synchronized reconnectLock } // synchronized cache } // synchronized CacheFactory.class @@ -2557,7 +2558,7 @@ public class InternalDistributedSystem extends DistributedSystem String cacheXML = null; List cacheServerCreation = null; - GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); + InternalCache cache = GemFireCacheImpl.getInstance(); if (cache != null) { cacheXML = cache.getCacheConfig().getCacheXMLDescription(); cacheServerCreation = cache.getCacheConfig().getCacheServerCreation(); @@ -2827,7 +2828,7 @@ public class InternalDistributedSystem extends DistributedSystem * after an auto-reconnect we may need to recreate a cache server and start it */ public void createAndStartCacheServers(List cacheServerCreation, - GemFireCacheImpl cache) { + InternalCache cache) { List servers = cache.getCacheServers(); @@ -2861,11 +2862,11 @@ public class InternalDistributedSystem extends DistributedSystem * * @param propsToCheck the Properties instance to compare with the existing Properties * - * @throws java.lang.IllegalStateException when the configuration is not the same other returns + * @throws IllegalStateException when the configuration is not the same other returns */ public void validateSameProperties(Properties propsToCheck, boolean isConnected) { if (!this.sameAs(propsToCheck, isConnected)) { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); DistributionConfig wanted = DistributionConfigImpl.produce(propsToCheck);