lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r1578144 [23/37] - in /lucene/dev/branches/lucene5376_2: ./ dev-tools/ dev-tools/idea/.idea/libraries/ dev-tools/idea/solr/contrib/dataimporthandler/ dev-tools/idea/solr/contrib/map-reduce/ dev-tools/idea/solr/core/src/test/ dev-tools/scrip...
Date Sun, 16 Mar 2014 19:39:37 GMT
Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java Sun Mar 16 19:39:10 2014
@@ -158,7 +158,7 @@ public class ExpressionFactory {
     String[] strings = new String[1];
     int stack = 0;
     int start = 0;
-    List<String> arguments = new ArrayList<String>();
+    List<String> arguments = new ArrayList<>();
     char[] chars = expression.toCharArray();
     for (int count = 0; count < expression.length(); count++) {
       char c = chars[count];

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java Sun Mar 16 19:39:10 2014
@@ -88,7 +88,7 @@ public class AnalyticsStatisticsCollecto
   }
 
   public NamedList<Object> getStatistics() {
-    NamedList<Object> lst = new SimpleOrderedMap<Object>();
+    NamedList<Object> lst = new SimpleOrderedMap<>();
     Snapshot snapshot = requestTimes.getSnapshot();
     lst.add("requests", numRequests.longValue());
     lst.add("analyticsRequests", numAnalyticsRequests.longValue());

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java Sun Mar 16 19:39:10 2014
@@ -177,28 +177,28 @@ public class AnalyticsContentHandler imp
           }
           
           // Initiate Range Facet classes
-          gaps = new ArrayList<String>();
+          gaps = new ArrayList<>();
           includeBoundaries = EnumSet.noneOf(FacetRangeInclude.class);
           otherRanges = EnumSet.noneOf(FacetRangeOther.class);
           inRangeFacet = true;
         } else if (localName.equals(QUERY_FACET)) {
           // Start a Query Facet Request
-          queries = new ArrayList<String>();
+          queries = new ArrayList<>();
           inQueryFacet = true;
         }
       } else if (localName.equals(ANALYTICS_REQUEST)){
         // Start an Analytics Request
         
         // Renew each list.
-        fieldFacetList = new ArrayList<FieldFacetRequest>();
-        rangeFacetList = new ArrayList<RangeFacetRequest>();
-        queryFacetList = new ArrayList<QueryFacetRequest>();
-        expressionList = new ArrayList<ExpressionRequest>();
+        fieldFacetList = new ArrayList<>();
+        rangeFacetList = new ArrayList<>();
+        queryFacetList = new ArrayList<>();
+        expressionList = new ArrayList<>();
         inRequest = true;
       }
     } else if (localName.equals(ANALYTICS_REQUEST_ENVELOPE)){
       //Begin the parsing of the Analytics Requests
-      requests = new ArrayList<AnalyticsRequest>();
+      requests = new ArrayList<>();
       inEnvelope = true;
     }
   }

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java Sun Mar 16 19:39:10 2014
@@ -38,11 +38,11 @@ public class AnalyticsRequest {
   
   public AnalyticsRequest(String name) {
     this.name = name;
-    expressions = new ArrayList<ExpressionRequest>();
-    hiddenExpressions = new HashSet<String>();
-    fieldFacets = new ArrayList<FieldFacetRequest>();
-    rangeFacets = new ArrayList<RangeFacetRequest>();
-    queryFacets = new ArrayList<QueryFacetRequest>();
+    expressions = new ArrayList<>();
+    hiddenExpressions = new HashSet<>();
+    fieldFacets = new ArrayList<>();
+    rangeFacets = new ArrayList<>();
+    queryFacets = new ArrayList<>();
   }
   
   public String getName() {

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java Sun Mar 16 19:39:10 2014
@@ -51,14 +51,14 @@ public class AnalyticsRequestFactory imp
   public static final Pattern queryFacetParamPattern = Pattern.compile("^o(?:lap)?\\.([^\\.]+)\\.(?:"+QUERY_FACET+")\\.([^\\.]+)\\.("+QUERY+"|"+DEPENDENCY+")$", Pattern.CASE_INSENSITIVE);
   
   public static List<AnalyticsRequest> parse(IndexSchema schema, SolrParams params) {
-    Map<String, AnalyticsRequest> requestMap = new HashMap<String, AnalyticsRequest>();
-    Map<String, Map<String,FieldFacetRequest>> fieldFacetMap = new HashMap<String, Map<String,FieldFacetRequest>>();
-    Map<String, Set<String>> fieldFacetSet = new HashMap<String,Set<String>>();
-    Map<String, Map<String,RangeFacetRequest>> rangeFacetMap = new HashMap<String, Map<String,RangeFacetRequest>>();
-    Map<String, Set<String>> rangeFacetSet = new HashMap<String,Set<String>>();
-    Map<String, Map<String,QueryFacetRequest>> queryFacetMap = new HashMap<String, Map<String,QueryFacetRequest>>();
-    Map<String, Set<String>> queryFacetSet = new HashMap<String,Set<String>>();
-    List<AnalyticsRequest> requestList = new ArrayList<AnalyticsRequest>();
+    Map<String, AnalyticsRequest> requestMap = new HashMap<>();
+    Map<String, Map<String,FieldFacetRequest>> fieldFacetMap = new HashMap<>();
+    Map<String, Set<String>> fieldFacetSet = new HashMap<>();
+    Map<String, Map<String,RangeFacetRequest>> rangeFacetMap = new HashMap<>();
+    Map<String, Set<String>> rangeFacetSet = new HashMap<>();
+    Map<String, Map<String,QueryFacetRequest>> queryFacetMap = new HashMap<>();
+    Map<String, Set<String>> queryFacetSet = new HashMap<>();
+    List<AnalyticsRequest> requestList = new ArrayList<>();
     
     Iterator<String> paramsIterator = params.getParameterNamesIterator();
     while (paramsIterator.hasNext()) {
@@ -115,7 +115,7 @@ public class AnalyticsRequestFactory imp
     }
     for (String reqName : requestMap.keySet()) {
       AnalyticsRequest ar = requestMap.get(reqName);
-      List<FieldFacetRequest> ffrs = new ArrayList<FieldFacetRequest>();
+      List<FieldFacetRequest> ffrs = new ArrayList<>();
       if (fieldFacetSet.get(reqName)!=null) {
         for (String field : fieldFacetSet.get(reqName)) {
           ffrs.add(fieldFacetMap.get(reqName).get(field));
@@ -123,7 +123,7 @@ public class AnalyticsRequestFactory imp
       }
       ar.setFieldFacets(ffrs);
       
-      List<RangeFacetRequest> rfrs = new ArrayList<RangeFacetRequest>();
+      List<RangeFacetRequest> rfrs = new ArrayList<>();
       if (rangeFacetSet.get(reqName)!=null) {
         for (String field : rangeFacetSet.get(reqName)) {
           RangeFacetRequest rfr = rangeFacetMap.get(reqName).get(field);
@@ -134,7 +134,7 @@ public class AnalyticsRequestFactory imp
       }
       ar.setRangeFacets(rfrs);
       
-      List<QueryFacetRequest> qfrs = new ArrayList<QueryFacetRequest>();
+      List<QueryFacetRequest> qfrs = new ArrayList<>();
       if (queryFacetSet.get(reqName)!=null) {
         for (String name : queryFacetSet.get(reqName)) {
           QueryFacetRequest qfr = queryFacetMap.get(reqName).get(name);
@@ -157,12 +157,12 @@ public class AnalyticsRequestFactory imp
   private static void makeFieldFacet(IndexSchema schema, Map<String, Map<String, FieldFacetRequest>> fieldFacetMap, Map<String, Set<String>> fieldFacetSet, String requestName, String[] fields) {
     Map<String, FieldFacetRequest> facetMap = fieldFacetMap.get(requestName);
     if (facetMap == null) {
-      facetMap = new HashMap<String, FieldFacetRequest>();
+      facetMap = new HashMap<>();
       fieldFacetMap.put(requestName, facetMap);
     }
     Set<String> set = fieldFacetSet.get(requestName);
     if (set == null) {
-      set = new HashSet<String>();
+      set = new HashSet<>();
       fieldFacetSet.put(requestName, set);
     }
     for (String field : fields) {
@@ -176,7 +176,7 @@ public class AnalyticsRequestFactory imp
   private static void setFieldFacetParam(IndexSchema schema, Map<String, Map<String, FieldFacetRequest>> fieldFacetMap, String requestName, String field, String paramType, String[] params) {
     Map<String, FieldFacetRequest> facetMap = fieldFacetMap.get(requestName);
     if (facetMap == null) {
-      facetMap = new HashMap<String, FieldFacetRequest>();
+      facetMap = new HashMap<>();
       fieldFacetMap.put(requestName, facetMap);
     }
     FieldFacetRequest fr = facetMap.get(field);
@@ -202,7 +202,7 @@ public class AnalyticsRequestFactory imp
   private static void makeRangeFacet(IndexSchema schema, Map<String, Set<String>> rangeFacetSet, String requestName, String[] fields) {
     Set<String> set = rangeFacetSet.get(requestName);
     if (set == null) {
-      set = new HashSet<String>();
+      set = new HashSet<>();
       rangeFacetSet.put(requestName, set);
     }
     for (String field : fields) {
@@ -213,7 +213,7 @@ public class AnalyticsRequestFactory imp
   private static void setRangeFacetParam(IndexSchema schema, Map<String, Map<String, RangeFacetRequest>> rangeFacetMap, String requestName, String field, String paramType, String[] params) {
     Map<String, RangeFacetRequest> facetMap = rangeFacetMap.get(requestName);
     if (facetMap == null) {
-      facetMap = new HashMap<String, RangeFacetRequest>();
+      facetMap = new HashMap<>();
       rangeFacetMap.put(requestName, facetMap);
     }
     RangeFacetRequest rr = facetMap.get(field);
@@ -243,7 +243,7 @@ public class AnalyticsRequestFactory imp
   private static void makeQueryFacet(IndexSchema schema,Map<String, Set<String>> queryFacetSet, String requestName, String[] names) {
     Set<String> set = queryFacetSet.get(requestName);
     if (set == null) {
-      set = new HashSet<String>();
+      set = new HashSet<>();
       queryFacetSet.put(requestName, set);
     }
     for (String name : names) {
@@ -254,7 +254,7 @@ public class AnalyticsRequestFactory imp
   private static void setQueryFacetParam(IndexSchema schema, Map<String, Map<String, QueryFacetRequest>> queryFacetMap, String requestName, String name, String paramType, String[] params) {
     Map<String, QueryFacetRequest> facetMap = queryFacetMap.get(requestName);
     if (facetMap == null) {
-      facetMap = new HashMap<String, QueryFacetRequest>();
+      facetMap = new HashMap<>();
       queryFacetMap.put(requestName, facetMap);
     }
     QueryFacetRequest qr = facetMap.get(name);

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java Sun Mar 16 19:39:10 2014
@@ -60,7 +60,7 @@ public class AnalyticsStats {
    */
   public NamedList<?> execute() throws IOException {
     statsCollector.startRequest();
-    NamedList<Object> res = new NamedList<Object>();
+    NamedList<Object> res = new NamedList<>();
     List<AnalyticsRequest> requests;
     
     requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params);

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java Sun Mar 16 19:39:10 2014
@@ -31,13 +31,13 @@ public class QueryFacetRequest implement
   private Set<String> dependencies;
   
   public QueryFacetRequest() {
-    dependencies = new HashSet<String>();
+    dependencies = new HashSet<>();
   }
 
   public QueryFacetRequest(String name) {
     this.name = name;
-    this.queries = new ArrayList<String>();
-    dependencies = new HashSet<String>();
+    this.queries = new ArrayList<>();
+    dependencies = new HashSet<>();
   }
  
   public List<String> getQueries() {

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java Sun Mar 16 19:39:10 2014
@@ -28,7 +28,7 @@ import org.apache.solr.analytics.util.Me
  */
 public class MedianStatsCollector extends AbstractDelegatingStatsCollector{
 
-  private final List<Double> values = new ArrayList<Double>();
+  private final List<Double> values = new ArrayList<>();
   protected double median;
   
   public MedianStatsCollector(StatsCollector delegate) {

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java Sun Mar 16 19:39:10 2014
@@ -30,7 +30,7 @@ import com.google.common.collect.Iterabl
  */
 @SuppressWarnings("rawtypes")
 public class PercentileStatsCollector extends AbstractDelegatingStatsCollector{
-  public final List<Comparable> values = new ArrayList<Comparable>();
+  public final List<Comparable> values = new ArrayList<>();
   public static final Pattern PERCENTILE_PATTERN = Pattern.compile("perc(?:entile)?_(\\d+)",Pattern.CASE_INSENSITIVE);
   protected final double[] percentiles;
   protected final String[] percentileNames;

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java Sun Mar 16 19:39:10 2014
@@ -88,9 +88,9 @@ public class StatsCollectorSupplierFacto
    */
   @SuppressWarnings("unchecked")
   public static Supplier<StatsCollector[]> create(IndexSchema schema, AnalyticsRequest request) {
-    final Map<String, Set<String>> collectorStats =  new HashMap<String, Set<String>>();
-    final Map<String, Set<Integer>> collectorPercs =  new HashMap<String, Set<Integer>>();
-    final Map<String, ValueSource> collectorSources =  new HashMap<String, ValueSource>();
+    final Map<String, Set<String>> collectorStats =  new HashMap<>();
+    final Map<String, Set<Integer>> collectorPercs =  new HashMap<>();
+    final Map<String, ValueSource> collectorSources =  new HashMap<>();
     
     // Iterate through all expression request to make a list of ValueSource strings
     // and statistics that need to be calculated on those ValueSources.
@@ -121,7 +121,7 @@ public class StatsCollectorSupplierFacto
           source = arguments[1];
           Set<Integer> percs = collectorPercs.get(source);
           if (percs == null) {
-            percs = new HashSet<Integer>();
+            percs = new HashSet<>();
             collectorPercs.put(source, percs);
           }
           try {
@@ -143,7 +143,7 @@ public class StatsCollectorSupplierFacto
         // each ValueSource, even across different expression requests
         Set<String> stats = collectorStats.get(source);
         if (stats == null) {
-          stats = new HashSet<String>();
+          stats = new HashSet<>();
           collectorStats.put(source, stats);
         }
         stats.add(stat);
@@ -244,7 +244,7 @@ public class StatsCollectorSupplierFacto
    * @return The set of statistics (sum, mean, median, etc.) found in the expression
    */
   public static Set<String> getStatistics(String expression) {
-    HashSet<String> set = new HashSet<String>();
+    HashSet<String> set = new HashSet<>();
     int firstParen = expression.indexOf('(');
     if (firstParen>0) {
       String topOperation = expression.substring(0,firstParen).trim();
@@ -511,7 +511,7 @@ public class StatsCollectorSupplierFacto
     } else if (operation.equals(AnalyticsParams.FILTER)) {
       return buildFilterSource(schema, operands, NUMBER_TYPE);
     }
-    List<ValueSource> subExpressions = new ArrayList<ValueSource>();
+    List<ValueSource> subExpressions = new ArrayList<>();
     for (String argument : arguments) {
       ValueSource argSource = buildNumericSource(schema, argument);
       if (argSource == null) {
@@ -577,7 +577,7 @@ public class StatsCollectorSupplierFacto
       return buildFilterSource(schema, operands, DATE_TYPE);
     }
     if (operation.equals(AnalyticsParams.DATE_MATH)) {
-      List<ValueSource> subExpressions = new ArrayList<ValueSource>();
+      List<ValueSource> subExpressions = new ArrayList<>();
       boolean first = true;
       for (String argument : arguments) {
         ValueSource argSource;
@@ -632,7 +632,7 @@ public class StatsCollectorSupplierFacto
       }
       return new ReverseStringFunction(buildStringSource(schema, operands));
     }
-    List<ValueSource> subExpressions = new ArrayList<ValueSource>();
+    List<ValueSource> subExpressions = new ArrayList<>();
     for (String argument : arguments) {
       subExpressions.add(buildSourceTree(schema, argument));
     }

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java Sun Mar 16 19:39:10 2014
@@ -24,7 +24,7 @@ import java.util.Set;
  * <code>UniqueValueCounter</code> computes the number of unique values.
  */
 public class UniqueStatsCollector extends AbstractDelegatingStatsCollector{
-  private final Set<Object> uniqueValues = new HashSet<Object>();
+  private final Set<Object> uniqueValues = new HashSet<>();
   
   public UniqueStatsCollector(StatsCollector delegate) {
     super(delegate);

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java Sun Mar 16 19:39:10 2014
@@ -46,7 +46,7 @@ public class PercentileCalculator {
       throw new IllegalArgumentException();
     }
 
-    List<T> results = new ArrayList<T>(percs.length);
+    List<T> results = new ArrayList<>(percs.length);
 
     distributeAndFind(list, percentiles, 0, percentiles.length - 1);
 

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java Sun Mar 16 19:39:10 2014
@@ -151,7 +151,7 @@ public abstract class RangeEndpointCalcu
         
     T low = start;
     
-    List<FacetRange> ranges = new ArrayList<FacetRange>();
+    List<FacetRange> ranges = new ArrayList<>();
     
     int gapCounter = 0;
     

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java Sun Mar 16 19:39:10 2014
@@ -90,7 +90,7 @@ public class JettySolrRunner {
   private String coreNodeName;
 
   /** Maps servlet holders (i.e. factories: class + init params) to path specs */
-  private SortedMap<ServletHolder,String> extraServlets = new TreeMap<ServletHolder,String>();
+  private SortedMap<ServletHolder,String> extraServlets = new TreeMap<>();
   private SortedMap<Class,String> extraRequestFilters;
   private LinkedList<FilterHolder> extraFilters;
 
@@ -106,7 +106,7 @@ public class JettySolrRunner {
     }
 
     // TODO: keep track of certain number of last requests
-    private LinkedList<HttpServletRequest> requests = new LinkedList<HttpServletRequest>();
+    private LinkedList<HttpServletRequest> requests = new LinkedList<>();
 
 
     @Override
@@ -188,7 +188,7 @@ public class JettySolrRunner {
       SortedMap<Class,String> extraRequestFilters) {
     if (null != extraServlets) { this.extraServlets.putAll(extraServlets); }
     if (null != extraRequestFilters) {
-      this.extraRequestFilters = new TreeMap<Class,String>(extraRequestFilters.comparator());
+      this.extraRequestFilters = new TreeMap<>(extraRequestFilters.comparator());
       this.extraRequestFilters.putAll(extraRequestFilters);
     }
     this.solrConfigFilename = solrConfigFilename;
@@ -316,7 +316,7 @@ public class JettySolrRunner {
 //        FilterHolder fh = new FilterHolder(filter);
         debugFilter = root.addFilter(DebugFilter.class, "*", EnumSet.of(DispatcherType.REQUEST) );
         if (extraRequestFilters != null) {
-          extraFilters = new LinkedList<FilterHolder>();
+          extraFilters = new LinkedList<>();
           for (Class filterClass : extraRequestFilters.keySet()) {
             extraFilters.add(root.addFilter(filterClass, extraRequestFilters.get(filterClass),
               EnumSet.of(DispatcherType.REQUEST)));

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Assign.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Assign.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Assign.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Assign.java Sun Mar 16 19:39:10 2014
@@ -86,7 +86,7 @@ public class Assign {
       return "shard1";
     }
 
-    List<String> shardIdNames = new ArrayList<String>(sliceMap.keySet());
+    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
 
     if (shardIdNames.size() < numShards) {
       return "shard" + (shardIdNames.size() + 1);
@@ -95,7 +95,7 @@ public class Assign {
     // TODO: don't need to sort to find shard with fewest replicas!
 
     // else figure out which shard needs more replicas
-    final Map<String, Integer> map = new HashMap<String, Integer>();
+    final Map<String, Integer> map = new HashMap<>();
     for (String shardId : shardIdNames) {
       int cnt = sliceMap.get(shardId).getReplicasMap().size();
       map.put(shardId, cnt);
@@ -135,12 +135,12 @@ public class Assign {
 
     Set<String> nodes = clusterState.getLiveNodes();
 
-    List<String> nodeList = new ArrayList<String>(nodes.size());
+    List<String> nodeList = new ArrayList<>(nodes.size());
     nodeList.addAll(nodes);
     if (createNodeList != null) nodeList.retainAll(createNodeList);
 
 
-    HashMap<String,Node> nodeNameVsShardCount =  new HashMap<String, Node>();
+    HashMap<String,Node> nodeNameVsShardCount =  new HashMap<>();
     for (String s : nodeList) nodeNameVsShardCount.put(s,new Node(s));
     for (String s : clusterState.getCollections()) {
       DocCollection c = clusterState.getCollection(s);

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java Sun Mar 16 19:39:10 2014
@@ -82,7 +82,7 @@ public class DistributedQueue {
    */
   private TreeMap<Long,String> orderedChildren(Watcher watcher)
       throws KeeperException, InterruptedException {
-    TreeMap<Long,String> orderedChildren = new TreeMap<Long,String>();
+    TreeMap<Long,String> orderedChildren = new TreeMap<>();
     
     List<String> childNames = null;
     try {

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java Sun Mar 16 19:39:10 2014
@@ -2,6 +2,7 @@ package org.apache.solr.cloud;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.solr.common.SolrException;
@@ -14,6 +15,8 @@ import org.apache.solr.common.cloud.ZkCm
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.RetryUtil;
+import org.apache.solr.common.util.RetryUtil.RetryCmd;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -22,6 +25,7 @@ import org.apache.solr.util.RefCounted;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -71,7 +75,7 @@ public abstract class ElectionContext {
     }
   }
 
-  abstract void runLeaderProcess(boolean weAreReplacement, int pauseTime) throws KeeperException, InterruptedException, IOException;
+  abstract void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException, InterruptedException, IOException;
 
   public void checkIfIamLeaderFired() {}
 
@@ -79,24 +83,28 @@ public abstract class ElectionContext {
 }
 
 class ShardLeaderElectionContextBase extends ElectionContext {
-  private static Logger log = LoggerFactory.getLogger(ShardLeaderElectionContextBase.class);
+  private static Logger log = LoggerFactory
+      .getLogger(ShardLeaderElectionContextBase.class);
   protected final SolrZkClient zkClient;
   protected String shardId;
   protected String collection;
   protected LeaderElector leaderElector;
-
-  public ShardLeaderElectionContextBase(LeaderElector leaderElector, final String shardId,
-      final String collection, final String coreNodeName, ZkNodeProps props, ZkStateReader zkStateReader) {
-    super(coreNodeName, ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/leader_elect/"
-        + shardId, ZkStateReader.getShardLeadersPath(collection, shardId),
-        props, zkStateReader.getZkClient());
+  
+  public ShardLeaderElectionContextBase(LeaderElector leaderElector,
+      final String shardId, final String collection, final String coreNodeName,
+      ZkNodeProps props, ZkStateReader zkStateReader) {
+    super(coreNodeName, ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection
+        + "/leader_elect/" + shardId, ZkStateReader.getShardLeadersPath(
+        collection, shardId), props, zkStateReader.getZkClient());
     this.leaderElector = leaderElector;
     this.zkClient = zkStateReader.getZkClient();
     this.shardId = shardId;
     this.collection = collection;
     
     try {
-      new ZkCmdExecutor(zkStateReader.getZkClient().getZkClientTimeout()).ensureExists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, zkClient);
+      new ZkCmdExecutor(zkStateReader.getZkClient().getZkClientTimeout())
+          .ensureExists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection,
+              zkClient);
     } catch (KeeperException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     } catch (InterruptedException e) {
@@ -104,24 +112,40 @@ class ShardLeaderElectionContextBase ext
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
   }
-
+  
   @Override
-  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws KeeperException,
-      InterruptedException, IOException {
+  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs)
+      throws KeeperException, InterruptedException, IOException {
+    // register as leader - if an ephemeral is already there, wait just a bit
+    // to see if it goes away
+    try {
+      RetryUtil.retryOnThrowable(NodeExistsException.class, 15000, 1000,
+          new RetryCmd() {
+            
+            @Override
+            public void execute() throws Throwable {
+              zkClient.makePath(leaderPath, ZkStateReader.toJSON(leaderProps),
+                  CreateMode.EPHEMERAL, true);
+            }
+          });
+    } catch (Throwable t) {
+      if (t instanceof OutOfMemoryError) {
+        throw (OutOfMemoryError) t;
+      }
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Could not register as the leader because creating the ephemeral registration node in ZooKeeper failed", t);
+    }
     
-    zkClient.makePath(leaderPath, ZkStateReader.toJSON(leaderProps),
-        CreateMode.EPHEMERAL, true);
     assert shardId != null;
-    ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION, ZkStateReader.LEADER_PROP,
-        ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP,
-        collection, ZkStateReader.BASE_URL_PROP, leaderProps.getProperties()
-            .get(ZkStateReader.BASE_URL_PROP), ZkStateReader.CORE_NAME_PROP,
+    ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION,
+        ZkStateReader.LEADER_PROP, ZkStateReader.SHARD_ID_PROP, shardId,
+        ZkStateReader.COLLECTION_PROP, collection, ZkStateReader.BASE_URL_PROP,
+        leaderProps.getProperties().get(ZkStateReader.BASE_URL_PROP),
+        ZkStateReader.CORE_NAME_PROP,
         leaderProps.getProperties().get(ZkStateReader.CORE_NAME_PROP),
         ZkStateReader.STATE_PROP, ZkStateReader.ACTIVE);
     Overseer.getInQueue(zkClient).offer(ZkStateReader.toJSON(m));
-    
   }
-
+  
 }
 
 // add core container and stop passing core around...
@@ -334,9 +358,8 @@ final class ShardLeaderElectionContext e
     return false;
   }
 
-  private void waitForReplicasToComeUp(boolean weAreReplacement,
-      int timeout) throws InterruptedException {
-    long timeoutAt = System.currentTimeMillis() + timeout;
+  private void waitForReplicasToComeUp(boolean weAreReplacement, int timeoutms) throws InterruptedException {
+    long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutms, TimeUnit.MILLISECONDS);
     final String shardsElectZkPath = electionPath + LeaderElector.ELECTION_NODE;
     
     Slice slices = zkController.getClusterState().getSlice(collection, shardId);
@@ -360,11 +383,11 @@ final class ShardLeaderElectionContext e
           if (cnt % 40 == 0) {
             log.info("Waiting until we see more replicas up for shard " + shardId + ": total="
               + slices.getReplicasMap().size() + " found=" + found
-              + " timeoutin=" + (timeoutAt - System.currentTimeMillis()));
+              + " timeoutin=" + (timeoutAt - System.nanoTime() / (float)(10^9)) + "ms");
           }
         }
         
-        if (System.currentTimeMillis() > timeoutAt) {
+        if (System.nanoTime() > timeoutAt) {
           log.info("Was waiting for replicas to come up, but they are taking too long - assuming they won't come back till later");
           return;
         }
@@ -449,7 +472,7 @@ final class OverseerElectionContext exte
   }
 
   @Override
-  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws KeeperException,
+  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException,
       InterruptedException {
     log.info("I am going to be the leader {}", id);
     final String id = leaderSeqPath
@@ -458,10 +481,11 @@ final class OverseerElectionContext exte
 
     zkClient.makePath(leaderPath, ZkStateReader.toJSON(myProps),
         CreateMode.EPHEMERAL, true);
-    if(pauseBeforeStart >0){
+    if(pauseBeforeStartMs >0){
       try {
-        Thread.sleep(pauseBeforeStart);
+        Thread.sleep(pauseBeforeStartMs);
       } catch (InterruptedException e) {
+        Thread.interrupted();
         log.warn("Wait interrupted ", e);
       }
     }

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java Sun Mar 16 19:39:10 2014
@@ -212,7 +212,7 @@ public  class LeaderElector {
    * @return int seqs
    */
   private List<Integer> getSeqs(List<String> seqs) {
-    List<Integer> intSeqs = new ArrayList<Integer>(seqs.size());
+    List<Integer> intSeqs = new ArrayList<>(seqs.size());
     for (String seq : seqs) {
       intSeqs.add(getSeq(seq));
     }

Modified: lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Overseer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Overseer.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Overseer.java (original)
+++ lucene/dev/branches/lucene5376_2/solr/core/src/java/org/apache/solr/cloud/Overseer.java Sun Mar 16 19:39:10 2014
@@ -17,6 +17,11 @@ package org.apache.solr.cloud;
  * the License.
  */
 
+import static java.util.Collections.singletonMap;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERPROP;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -26,6 +31,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClosableThread;
@@ -46,11 +52,6 @@ import org.apache.zookeeper.KeeperExcept
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERPROP;
-
 /**
  * Cluster leader. Responsible node assignments, cluster state file?
  */
@@ -80,13 +81,23 @@ public class Overseer {
     //Internal queue where overseer stores events that have not yet been published into cloudstate
     //If Overseer dies while extracting the main queue a new overseer will start from this queue 
     private final DistributedQueue workQueue;
-    private volatile boolean isClosed;
+    // Internal map which holds the information about running tasks.
+    private final DistributedMap runningMap;
+    // Internal map which holds the information about successfully completed tasks.
+    private final DistributedMap completedMap;
+    // Internal map which holds the information about failed tasks.
+    private final DistributedMap failureMap;
+
     private Map clusterProps;
+    private boolean isClosed = false;
 
     public ClusterStateUpdater(final ZkStateReader reader, final String myId) {
       this.zkClient = reader.getZkClient();
       this.stateUpdateQueue = getInQueue(zkClient);
       this.workQueue = getInternalQueue(zkClient);
+      this.failureMap = getFailureMap(zkClient);
+      this.runningMap = getRunningMap(zkClient);
+      this.completedMap = getCompletedMap(zkClient);
       this.myId = myId;
       this.reader = reader;
       clusterProps = reader.getClusterProps();
@@ -122,10 +133,19 @@ public class Overseer {
                 else if (LeaderStatus.YES == isLeader) {
                   final ZkNodeProps message = ZkNodeProps.load(head);
                   final String operation = message.getStr(QUEUE_OPERATION);
-                  clusterState = processMessage(clusterState, message, operation);
+                  try {
+                    clusterState = processMessage(clusterState, message, operation);
+                  } catch (Exception e) {
+                    // generally there is nothing we can do - in most cases, we have
+                    // an issue that will fail again on retry or we cannot communicate with
+                    // ZooKeeper in which case another Overseer should take over
+                    // TODO: if ordering for the message is not important, we could
+                    // track retries and put it back on the end of the queue
+                    log.error("Overseer could not process the current clusterstate state update message, skipping the message.", e);
+                  }
                   zkClient.setData(ZkStateReader.CLUSTER_STATE,
                       ZkStateReader.toJSON(clusterState), true);
-                  
+
                   workQueue.poll(); // poll-ing removes the element we got by peek-ing
                 }
                 else {
@@ -188,18 +208,26 @@ public class Overseer {
             while (head != null) {
               final ZkNodeProps message = ZkNodeProps.load(head.getBytes());
               final String operation = message.getStr(QUEUE_OPERATION);
-
-              clusterState = processMessage(clusterState, message, operation);
+              try {
+                clusterState = processMessage(clusterState, message, operation);
+              } catch (Exception e) {
+                // generally there is nothing we can do - in most cases, we have
+                // an issue that will fail again on retry or we cannot communicate with
+                // ZooKeeper in which case another Overseer should take over
+                // TODO: if ordering for the message is not important, we could
+                // track retries and put it back on the end of the queue
+                log.error("Overseer could not process the current clusterstate state update message, skipping the message.", e);
+              }
               workQueue.offer(head.getBytes());
 
               stateUpdateQueue.poll();
 
-              if (System.currentTimeMillis() - lastUpdatedTime > STATE_UPDATE_DELAY) break;
+              if (System.nanoTime() - lastUpdatedTime > TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS)) break;
               
               // if an event comes in the next 100ms batch it together
               head = stateUpdateQueue.peek(100); 
             }
-            lastUpdatedTime = System.currentTimeMillis();
+            lastUpdatedTime = System.nanoTime();
             zkClient.setData(ZkStateReader.CLUSTER_STATE,
                 ZkStateReader.toJSON(clusterState), true);
             // clean work queue
@@ -293,6 +321,7 @@ public class Overseer {
     private ClusterState createReplica(ClusterState clusterState, ZkNodeProps message) {
       log.info("createReplica() {} ", message);
       String coll = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       String slice = message.getStr(ZkStateReader.SHARD_ID_PROP);
       Slice sl = clusterState.getSlice(coll, slice);
       if(sl == null){
@@ -318,7 +347,7 @@ public class Overseer {
         return clusterState;
       }
 
-      ArrayList<String> shardNames = new ArrayList<String>();
+      ArrayList<String> shardNames = new ArrayList<>();
 
       if(ImplicitDocRouter.NAME.equals( message.getStr("router.name",DocRouter.DEFAULT_NAME))){
         getShardNames(shardNames,message.getStr("shards",DocRouter.DEFAULT_NAME));
@@ -333,6 +362,7 @@ public class Overseer {
 
     private ClusterState updateShardState(ClusterState clusterState, ZkNodeProps message) {
       String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       log.info("Update shard state invoked for collection: " + collection + " with message: " + message);
       for (String key : message.keySet()) {
         if (ZkStateReader.COLLECTION_PROP.equals(key)) continue;
@@ -357,6 +387,7 @@ public class Overseer {
 
     private ClusterState addRoutingRule(ClusterState clusterState, ZkNodeProps message) {
       String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
       String routeKey = message.getStr("routeKey");
       String range = message.getStr("range");
@@ -371,10 +402,10 @@ public class Overseer {
 
       Map<String, RoutingRule> routingRules = slice.getRoutingRules();
       if (routingRules == null)
-        routingRules = new HashMap<String, RoutingRule>();
+        routingRules = new HashMap<>();
       RoutingRule r = routingRules.get(routeKey);
       if (r == null) {
-        Map<String, Object> map = new HashMap<String, Object>();
+        Map<String, Object> map = new HashMap<>();
         map.put("routeRanges", range);
         map.put("targetCollection", targetCollection);
         map.put("expireAt", expireAt);
@@ -396,8 +427,22 @@ public class Overseer {
       return clusterState;
     }
 
+    private boolean checkCollectionKeyExistence(ZkNodeProps message) {
+      return checkKeyExistence(message, ZkStateReader.COLLECTION_PROP);
+    }
+    
+    private boolean checkKeyExistence(ZkNodeProps message, String key) {
+      String value = message.getStr(key);
+      if (value == null || value.trim().length() == 0) {
+        log.error("Skipping invalid Overseer message because it has no " + key + " specified: " + message);
+        return false;
+      }
+      return true;
+    }
+
     private ClusterState removeRoutingRule(ClusterState clusterState, ZkNodeProps message) {
       String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
       String routeKeyStr = message.getStr("routeKey");
 
@@ -423,11 +468,12 @@ public class Overseer {
 
     private ClusterState createShard(ClusterState clusterState, ZkNodeProps message) {
       String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       String shardId = message.getStr(ZkStateReader.SHARD_ID_PROP);
       Slice slice = clusterState.getSlice(collection, shardId);
       if (slice == null)  {
         Map<String, Replica> replicas = Collections.EMPTY_MAP;
-        Map<String, Object> sliceProps = new HashMap<String, Object>();
+        Map<String, Object> sliceProps = new HashMap<>();
         String shardRange = message.getStr(ZkStateReader.SHARD_RANGE_PROP);
         String shardState = message.getStr(ZkStateReader.SHARD_STATE_PROP);
         String shardParent = message.getStr(ZkStateReader.SHARD_PARENT_PROP);
@@ -469,6 +515,7 @@ public class Overseer {
 
     private ClusterState updateStateNew(ClusterState clusterState, ZkNodeProps message) {
       String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
       String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
 
       if(collection==null || sliceName == null){
@@ -487,32 +534,30 @@ public class Overseer {
       /**
        * Try to assign core to the cluster. 
        */
-      private ClusterState updateState(ClusterState state, final ZkNodeProps message) {
+      private ClusterState updateState(ClusterState clusterState, final ZkNodeProps message) {
         final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
-        assert collection.length() > 0 : message;
-        
-
+        if (!checkCollectionKeyExistence(message)) return clusterState;
         Integer numShards = message.getInt(ZkStateReader.NUM_SHARDS_PROP, null);
         log.info("Update state numShards={} message={}", numShards, message);
 
-        List<String> shardNames  = new ArrayList<String>();
+        List<String> shardNames  = new ArrayList<>();
 
         //collection does not yet exist, create placeholders if num shards is specified
-        boolean collectionExists = state.hasCollection(collection);
+        boolean collectionExists = clusterState.hasCollection(collection);
         if (!collectionExists && numShards!=null) {
           getShardNames(numShards, shardNames);
-          state = createCollection(state, collection, shardNames, message);
+          clusterState = createCollection(clusterState, collection, shardNames, message);
         }
         String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
 
         String coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
         if (coreNodeName == null) {
-          coreNodeName = getAssignedCoreNodeName(state, message);
+          coreNodeName = getAssignedCoreNodeName(clusterState, message);
           if (coreNodeName != null) {
             log.info("node=" + coreNodeName + " is already registered");
           } else {
             // if coreNodeName is null, auto assign one
-            coreNodeName = Assign.assignNode(collection, state);
+            coreNodeName = Assign.assignNode(collection, clusterState);
           }
           message.getProperties().put(ZkStateReader.CORE_NODE_NAME_PROP,
               coreNodeName);
@@ -521,7 +566,7 @@ public class Overseer {
         // use the provided non null shardId
         if (sliceName == null) {
           //get shardId from ClusterState
-          sliceName = getAssignedId(state, coreNodeName, message);
+          sliceName = getAssignedId(clusterState, coreNodeName, message);
           if (sliceName != null) {
             log.info("shard=" + sliceName + " is already registered");
           }
@@ -530,16 +575,16 @@ public class Overseer {
           //request new shardId 
           if (collectionExists) {
             // use existing numShards
-            numShards = state.getCollection(collection).getSlices().size();
+            numShards = clusterState.getCollection(collection).getSlices().size();
             log.info("Collection already exists with " + ZkStateReader.NUM_SHARDS_PROP + "=" + numShards);
           }
-          sliceName = Assign.assignShard(collection, state, numShards);
+          sliceName = Assign.assignShard(collection, clusterState, numShards);
           log.info("Assigning new node to shard shard=" + sliceName);
         }
 
-        Slice slice = state.getSlice(collection, sliceName);
+        Slice slice = clusterState.getSlice(collection, sliceName);
         
-        Map<String,Object> replicaProps = new LinkedHashMap<String,Object>();
+        Map<String,Object> replicaProps = new LinkedHashMap<>();
 
         replicaProps.putAll(message.getProperties());
         // System.out.println("########## UPDATE MESSAGE: " + JSONUtil.toJSON(message));
@@ -559,7 +604,7 @@ public class Overseer {
           
           // remove any props with null values
           Set<Entry<String,Object>> entrySet = replicaProps.entrySet();
-          List<String> removeKeys = new ArrayList<String>();
+          List<String> removeKeys = new ArrayList<>();
           for (Entry<String,Object> entry : entrySet) {
             if (entry.getValue() == null) {
               removeKeys.add(entry.getKey());
@@ -583,14 +628,14 @@ public class Overseer {
           Map<String,Replica> replicas;
 
           if (slice != null) {
-            state = checkAndCompleteShardSplit(state, collection, coreNodeName, sliceName, replicaProps);
+            clusterState = checkAndCompleteShardSplit(clusterState, collection, coreNodeName, sliceName, replicaProps);
             // get the current slice again because it may have been updated due to checkAndCompleteShardSplit method
-            slice = state.getSlice(collection, sliceName);
+            slice = clusterState.getSlice(collection, sliceName);
             sliceProps = slice.getProperties();
             replicas = slice.getReplicasCopy();
           } else {
-            replicas = new HashMap<String, Replica>(1);
-            sliceProps = new HashMap<String, Object>();
+            replicas = new HashMap<>(1);
+            sliceProps = new HashMap<>();
             sliceProps.put(Slice.RANGE, shardRange);
             sliceProps.put(Slice.STATE, shardState);
             sliceProps.put(Slice.PARENT, shardParent);
@@ -599,7 +644,7 @@ public class Overseer {
           replicas.put(replica.getName(), replica);
           slice = new Slice(sliceName, replicas, sliceProps);
 
-          ClusterState newClusterState = updateSlice(state, collection, slice);
+          ClusterState newClusterState = updateSlice(clusterState, collection, slice);
           return newClusterState;
       }
 
@@ -626,8 +671,8 @@ public class Overseer {
           if (allActive)  {
             log.info("Shard: {} - all replicas are active. Finding status of fellow sub-shards", sliceName);
             // find out about other sub shards
-            Map<String, Slice> allSlicesCopy = new HashMap<String, Slice>(state.getSlicesMap(collection));
-            List<Slice> subShardSlices = new ArrayList<Slice>();
+            Map<String, Slice> allSlicesCopy = new HashMap<>(state.getSlicesMap(collection));
+            List<Slice> subShardSlices = new ArrayList<>();
             outer:
             for (Entry<String, Slice> entry : allSlicesCopy.entrySet()) {
               if (sliceName.equals(entry.getKey()))
@@ -653,7 +698,7 @@ public class Overseer {
               log.info("Shard: {} - All replicas across all fellow sub-shards are now ACTIVE. Preparing to switch shard states.", sliceName);
               String parentSliceName = (String) sliceProps.remove(Slice.PARENT);
 
-              Map<String, Object> propMap = new HashMap<String, Object>();
+              Map<String, Object> propMap = new HashMap<>();
               propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate");
               propMap.put(parentSliceName, Slice.INACTIVE);
               propMap.put(sliceName, Slice.ACTIVE);
@@ -682,7 +727,7 @@ public class Overseer {
 //        Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>();
 
 
-        Map<String, Slice> newSlices = new LinkedHashMap<String,Slice>();
+        Map<String, Slice> newSlices = new LinkedHashMap<>();
 //        newCollections.putAll(state.getCollectionStates());
         for (int i = 0; i < shards.size(); i++) {
           String sliceName = shards.get(i);
@@ -690,14 +735,14 @@ public class Overseer {
         for (int i = 0; i < numShards; i++) {
           final String sliceName = "shard" + (i+1);*/
 
-          Map<String, Object> sliceProps = new LinkedHashMap<String, Object>(1);
+          Map<String, Object> sliceProps = new LinkedHashMap<>(1);
           sliceProps.put(Slice.RANGE, ranges == null? null: ranges.get(i));
 
           newSlices.put(sliceName, new Slice(sliceName, null, sliceProps));
         }
 
         // TODO: fill in with collection properties read from the /collections/<collectionName> node
-        Map<String,Object> collectionProps = new HashMap<String,Object>();
+        Map<String,Object> collectionProps = new HashMap<>();
 
         for (Entry<String, Object> e : OverseerCollectionProcessor.COLL_PROPS.entrySet()) {
           Object val = message.get(e.getKey());
@@ -756,7 +801,7 @@ public class Overseer {
       private ClusterState updateSlice(ClusterState state, String collectionName, Slice slice) {
         // System.out.println("###!!!### OLD CLUSTERSTATE: " + JSONUtil.toJSON(state.getCollectionStates()));
         // System.out.println("Updating slice:" + slice);
-        Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>(state.getCollectionStates());  // make a shallow copy
+        Map<String, DocCollection> newCollections = new LinkedHashMap<>(state.getCollectionStates());  // make a shallow copy
         DocCollection coll = newCollections.get(collectionName);
         Map<String,Slice> slices;
         Map<String,Object> props;
@@ -765,14 +810,14 @@ public class Overseer {
         if (coll == null) {
           //  when updateSlice is called on a collection that doesn't exist, it's currently when a core is publishing itself
           // without explicitly creating a collection.  In this current case, we assume custom sharding with an "implicit" router.
-          slices = new HashMap<String, Slice>(1);
-          props = new HashMap<String,Object>(1);
+          slices = new HashMap<>(1);
+          props = new HashMap<>(1);
           props.put(DocCollection.DOC_ROUTER, ZkNodeProps.makeMap("name",ImplicitDocRouter.NAME));
           router = new ImplicitDocRouter();
         } else {
           props = coll.getProperties();
           router = coll.getRouter();
-          slices = new LinkedHashMap<String, Slice>(coll.getSlicesMap()); // make a shallow copy
+          slices = new LinkedHashMap<>(coll.getSlicesMap()); // make a shallow copy
         }
         slices.put(slice.getName(), slice);
         DocCollection newCollection = new DocCollection(collectionName, slices, props, router);
@@ -785,7 +830,7 @@ public class Overseer {
       
       private ClusterState setShardLeader(ClusterState state, String collectionName, String sliceName, String leaderUrl) {
 
-        final Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>(state.getCollectionStates());
+        final Map<String, DocCollection> newCollections = new LinkedHashMap<>(state.getCollectionStates());
         DocCollection coll = newCollections.get(collectionName);
         if(coll == null) {
           log.error("Could not mark shard leader for non existing collection:" + collectionName);
@@ -794,7 +839,7 @@ public class Overseer {
 
         Map<String, Slice> slices = coll.getSlicesMap();
         // make a shallow copy and add it to the new collection
-        slices = new LinkedHashMap<String,Slice>(slices);
+        slices = new LinkedHashMap<>(slices);
 
         Slice slice = slices.get(sliceName);
         if (slice == null) {
@@ -809,7 +854,7 @@ public class Overseer {
 
           Replica oldLeader = slice.getLeader();
 
-          final Map<String,Replica> newReplicas = new LinkedHashMap<String,Replica>();
+          final Map<String,Replica> newReplicas = new LinkedHashMap<>();
 
           for (Replica replica : slice.getReplicas()) {
 
@@ -817,11 +862,11 @@ public class Overseer {
             String coreURL = ZkCoreNodeProps.getCoreUrl(replica.getStr(ZkStateReader.BASE_URL_PROP), replica.getStr(ZkStateReader.CORE_NAME_PROP));
 
             if (replica == oldLeader && !coreURL.equals(leaderUrl)) {
-              Map<String,Object> replicaProps = new LinkedHashMap<String,Object>(replica.getProperties());
+              Map<String,Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
               replicaProps.remove(Slice.LEADER);
               replica = new Replica(replica.getName(), replicaProps);
             } else if (coreURL.equals(leaderUrl)) {
-              Map<String,Object> replicaProps = new LinkedHashMap<String,Object>(replica.getProperties());
+              Map<String,Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
               replicaProps.put(Slice.LEADER, "true");  // TODO: allow booleans instead of strings
               replica = new Replica(replica.getName(), replicaProps);
             }
@@ -848,13 +893,9 @@ public class Overseer {
        * Remove collection from cloudstate
        */
       private ClusterState removeCollection(final ClusterState clusterState, ZkNodeProps message) {
-
         final String collection = message.getStr("name");
+        if (!checkKeyExistence(message, "name")) return clusterState;
 
-//        final Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>(clusterState.getCollectionStates()); // shallow copy
-//        newCollections.remove(collection);
-
-//        ClusterState newState = new ClusterState(clusterState.getLiveNodes(), newCollections);
         return clusterState.copyWith(singletonMap(collection, (DocCollection)null));
       }
 
@@ -862,34 +903,30 @@ public class Overseer {
      * Remove collection slice from cloudstate
      */
     private ClusterState removeShard(final ClusterState clusterState, ZkNodeProps message) {
-      final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
       final String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
+      final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+      if (!checkCollectionKeyExistence(message)) return clusterState;
 
       log.info("Removing collection: " + collection + " shard: " + sliceId + " from clusterstate");
 
-//      final Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>(clusterState.getCollectionStates()); // shallow copy
       DocCollection coll = clusterState.getCollection(collection);
 
-      Map<String, Slice> newSlices = new LinkedHashMap<String, Slice>(coll.getSlicesMap());
+      Map<String, Slice> newSlices = new LinkedHashMap<>(coll.getSlicesMap());
       newSlices.remove(sliceId);
 
       DocCollection newCollection = new DocCollection(coll.getName(), newSlices, coll.getProperties(), coll.getRouter());
-//      newCollections.put(newCollection.getName(), newCollection);
       return newState(clusterState, singletonMap(collection,newCollection));
-
-//     return new ClusterState(clusterState.getLiveNodes(), newCollections);
     }
 
     /*
        * Remove core from cloudstate
        */
       private ClusterState removeCore(final ClusterState clusterState, ZkNodeProps message) {
-        
-        String cnn = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-
+        final String cnn = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
         final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+        if (!checkCollectionKeyExistence(message)) return clusterState;
 
-//        final Map<String, DocCollection> newCollections = new LinkedHashMap<String,DocCollection>(clusterState.getCollectionStates()); // shallow copy
+//        final Map<String, DocCollection> newCollections = new LinkedHashMap<>(clusterState.getCollectionStates()); // shallow copy
 //        DocCollection coll = newCollections.get(collection);
         DocCollection coll = clusterState.getCollectionOrNull(collection) ;
         if (coll == null) {
@@ -906,7 +943,7 @@ public class Overseer {
           return clusterState;
         }
 
-        Map<String, Slice> newSlices = new LinkedHashMap<String, Slice>();
+        Map<String, Slice> newSlices = new LinkedHashMap<>();
         boolean lastSlice = false;
         for (Slice slice : coll.getSlices()) {
           Replica replica = slice.getReplica(cnn);
@@ -1003,20 +1040,22 @@ public class Overseer {
 
   class OverseerThread extends Thread implements ClosableThread {
 
-    private volatile boolean isClosed;
+    protected volatile boolean isClosed;
+    private ClosableThread thread;
 
-    public OverseerThread(ThreadGroup tg,
-        ClusterStateUpdater clusterStateUpdater) {
-      super(tg, clusterStateUpdater);
+    public OverseerThread(ThreadGroup tg, ClosableThread thread) {
+      super(tg, (Runnable) thread);
+      this.thread = thread;
     }
 
-    public OverseerThread(ThreadGroup ccTg,
-        OverseerCollectionProcessor overseerCollectionProcessor, String string) {
-      super(ccTg, overseerCollectionProcessor, string);
+    public OverseerThread(ThreadGroup ccTg, ClosableThread thread, String name) {
+      super(ccTg, (Runnable) thread, name);
+      this.thread = thread;
     }
 
     @Override
     public void close() {
+      thread.close();
       this.isClosed = true;
     }
 
@@ -1057,8 +1096,7 @@ public class Overseer {
     ThreadGroup ccTg = new ThreadGroup("Overseer collection creation process.");
 
     ocp = new OverseerCollectionProcessor(reader, id, shardHandler, adminPath);
-    ccThread = new OverseerThread(ccTg, ocp,
-        "Overseer-" + id);
+    ccThread = new OverseerThread(ccTg, ocp, "Overseer-" + id);
     ccThread.setDaemon(true);
     
     updaterThread.start();
@@ -1107,6 +1145,24 @@ public class Overseer {
     createOverseerNode(zkClient);
     return new DistributedQueue(zkClient, "/overseer/queue-work", null);
   }
+
+  /* Internal map for failed tasks, not to be used outside of the Overseer */
+  static DistributedMap getRunningMap(final SolrZkClient zkClient) {
+    createOverseerNode(zkClient);
+    return new DistributedMap(zkClient, "/overseer/collection-map-running", null);
+  }
+
+  /* Internal map for successfully completed tasks, not to be used outside of the Overseer */
+  static DistributedMap getCompletedMap(final SolrZkClient zkClient) {
+    createOverseerNode(zkClient);
+    return new DistributedMap(zkClient, "/overseer/collection-map-completed", null);
+  }
+
+  /* Internal map for failed tasks, not to be used outside of the Overseer */
+  static DistributedMap getFailureMap(final SolrZkClient zkClient) {
+    createOverseerNode(zkClient);
+    return new DistributedMap(zkClient, "/overseer/collection-map-failure", null);
+  }
   
   /* Collection creation queue */
   static DistributedQueue getCollectionQueue(final SolrZkClient zkClient) {
@@ -1132,4 +1188,8 @@ public class Overseer {
     return !"false".equals(clusterProps.get(ZkStateReader.LEGACY_CLOUD));
   }
 
+  public ZkStateReader getZkStateReader() {
+    return reader;
+  }
+
 }



Mime
View raw message