lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From yo...@apache.org
Subject svn commit: r1420992 [6/7] - in /lucene/dev/branches/branch_4x: ./ dev-tools/ lucene/ lucene/analysis/ lucene/analysis/icu/src/java/org/apache/lucene/collation/ lucene/backwards/ lucene/benchmark/ lucene/codecs/ lucene/core/ lucene/core/src/test/org/ap...
Date Wed, 12 Dec 2012 21:41:26 GMT
Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java Wed Dec 12 21:41:06 2012
@@ -18,6 +18,7 @@ package org.apache.solr.common.cloud;
  */
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -30,7 +31,7 @@ import java.util.Set;
 import org.apache.noggit.JSONWriter;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.HashPartitioner.Range;
+import org.apache.solr.common.cloud.DocRouter.Range;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
@@ -39,29 +40,23 @@ import org.slf4j.LoggerFactory;
 /**
  * Immutable state of the cloud. Normally you can get the state by using
  * {@link ZkStateReader#getClusterState()}.
+ * @lucene.experimental
  */
 public class ClusterState implements JSONWriter.Writable {
   private static Logger log = LoggerFactory.getLogger(ClusterState.class);
   
   private Integer zkClusterStateVersion;
   
-  private final Map<String, Map<String,Slice>> collectionStates;  // Map<collectionName, Map<sliceName,Slice>>
+  private final Map<String, DocCollection> collectionStates;  // Map<collectionName, Map<sliceName,Slice>>
   private final Set<String> liveNodes;
-  
-  private final HashPartitioner hp = new HashPartitioner();
-  
-  private final Map<String,RangeInfo> rangeInfos = new HashMap<String,RangeInfo>();
-  private final Map<String,Map<String,ZkNodeProps>> leaders = new HashMap<String,Map<String,ZkNodeProps>>();
-
 
-  
   /**
    * Use this constr when ClusterState is meant for publication.
    * 
    * hashCode and equals will only depend on liveNodes and not clusterStateVersion.
    */
   public ClusterState(Set<String> liveNodes,
-      Map<String, Map<String,Slice>> collectionStates) {
+      Map<String, DocCollection> collectionStates) {
     this(null, liveNodes, collectionStates);
   }
   
@@ -69,87 +64,75 @@ public class ClusterState implements JSO
    * Use this constr when ClusterState is meant for consumption.
    */
   public ClusterState(Integer zkClusterStateVersion, Set<String> liveNodes,
-      Map<String, Map<String,Slice>> collectionStates) {
+      Map<String, DocCollection> collectionStates) {
     this.zkClusterStateVersion = zkClusterStateVersion;
     this.liveNodes = new HashSet<String>(liveNodes.size());
     this.liveNodes.addAll(liveNodes);
-    this.collectionStates = new HashMap<String, Map<String,Slice>>(collectionStates.size());
+    this.collectionStates = new HashMap<String, DocCollection>(collectionStates.size());
     this.collectionStates.putAll(collectionStates);
-    addRangeInfos(collectionStates.keySet());
-    getShardLeaders();
   }
 
-  private void getShardLeaders() {
-    Set<Entry<String,Map<String,Slice>>> collections = collectionStates.entrySet();
-    for (Entry<String,Map<String,Slice>> collection : collections) {
-      Map<String,Slice> state = collection.getValue();
-      Set<Entry<String,Slice>> slices = state.entrySet();
-      for (Entry<String,Slice> sliceEntry : slices) {
-        Slice slice = sliceEntry.getValue();
-        Map<String,Replica> shards = slice.getReplicasMap();
-        Set<Entry<String,Replica>> shardsEntries = shards.entrySet();
-        for (Entry<String,Replica> shardEntry : shardsEntries) {
-          ZkNodeProps props = shardEntry.getValue();
-          if (props.containsKey(ZkStateReader.LEADER_PROP)) {
-            Map<String,ZkNodeProps> leadersForCollection = leaders.get(collection.getKey());
-            if (leadersForCollection == null) {
-              leadersForCollection = new HashMap<String,ZkNodeProps>();
-              leaders.put(collection.getKey(), leadersForCollection);
-            }
-            leadersForCollection.put(sliceEntry.getKey(), props);
-            break; // we found the leader for this shard
-          }
-        }
-      }
-    }
-  }
 
   /**
-   * Get properties of a shard leader for specific collection.
+   * Get the lead replica for specific collection, or null if one currently doesn't exist.
    */
-  public ZkNodeProps getLeader(String collection, String shard) {
-    Map<String,ZkNodeProps> collectionLeaders = leaders.get(collection);
-    if (collectionLeaders == null) return null;
-    return collectionLeaders.get(shard);
+  public Replica getLeader(String collection, String sliceName) {
+    DocCollection coll = collectionStates.get(collection);
+    if (coll == null) return null;
+    Slice slice = coll.getSlice(sliceName);
+    if (slice == null) return null;
+    return slice.getLeader();
   }
   
   /**
-   * Get shard properties or null if shard is not found.
-   */
-  public Replica getShardProps(final String collection, final String coreNodeName) {
-    Map<String, Slice> slices = getSlices(collection);
-    if (slices == null) return null;
-    for(Slice slice: slices.values()) {
-      if(slice.getReplicasMap().get(coreNodeName)!=null) {
-        return slice.getReplicasMap().get(coreNodeName);
-      }
-    }
-    return null;
+   * Gets the replica by the core name (assuming the slice is unknown) or null if replica is not found.
+   * If the slice is known, do not use this method.
+   * coreNodeName is the same as replicaName
+   */
+  public Replica getReplica(final String collection, final String coreNodeName) {
+    return getReplica(collectionStates.get(collection), coreNodeName);
   }
 
-  private void addRangeInfos(Set<String> collections) {
-    for (String collection : collections) {
-      addRangeInfo(collection);
+  private Replica getReplica(DocCollection coll, String replicaName) {
+    if (coll == null) return null;
+    for(Slice slice: coll.getSlices()) {
+      Replica replica = slice.getReplica(replicaName);
+      if (replica != null) return replica;
     }
+    return null;
   }
 
+
   /**
-   * Get the index Slice for collection.
+   * Get the named Slice for collection, or null if not found.
    */
-  public Slice getSlice(String collection, String slice) {
-    if (collectionStates.containsKey(collection)
-        && collectionStates.get(collection).containsKey(slice))
-      return collectionStates.get(collection).get(slice);
-    return null;
+  public Slice getSlice(String collection, String sliceName) {
+    DocCollection coll = collectionStates.get(collection);
+    if (coll == null) return null;
+    return coll.getSlice(sliceName);
+  }
+
+  public Map<String, Slice> getSlicesMap(String collection) {
+    DocCollection coll = collectionStates.get(collection);
+    if (coll == null) return null;
+    return coll.getSlicesMap();
+  }
+
+  public Collection<Slice> getSlices(String collection) {
+    DocCollection coll = collectionStates.get(collection);
+    if (coll == null) return null;
+    return coll.getSlices();
   }
 
   /**
-   * Get all slices for collection.
+   * Get the named DocCollection object, or throw an exception if it doesn't exist.
    */
-  public Map<String, Slice> getSlices(String collection) {
-    if(!collectionStates.containsKey(collection))
-      return null;
-    return Collections.unmodifiableMap(collectionStates.get(collection));
+  public DocCollection getCollection(String collection) {
+    DocCollection coll = collectionStates.get(collection);
+    if (coll == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
+    }
+    return coll;
   }
 
   /**
@@ -162,7 +145,7 @@ public class ClusterState implements JSO
   /**
    * @return Map&lt;collectionName, Map&lt;sliceName,Slice&gt;&gt;
    */
-  public Map<String, Map<String, Slice>> getCollectionStates() {
+  public Map<String, DocCollection> getCollectionStates() {
     return Collections.unmodifiableMap(collectionStates);
   }
 
@@ -174,17 +157,14 @@ public class ClusterState implements JSO
   }
 
   /**
-   * Get shardId for core.
-   * @param coreNodeName in the form of nodeName_coreName
+   * Get the slice/shardId for a core.
+   * @param coreNodeName in the form of nodeName_coreName (the name of the replica)
    */
   public String getShardId(String coreNodeName) {
-    for (Entry<String, Map<String, Slice>> states: collectionStates.entrySet()){
-      for(Entry<String, Slice> slices: states.getValue().entrySet()) {
-        for(Entry<String, Replica> shards: slices.getValue().getReplicasMap().entrySet()){
-          if(coreNodeName.equals(shards.getKey())) {
-            return slices.getKey();
-          }
-        }
+     // System.out.println("###### getShardId("+coreNodeName+") in " + collectionStates);
+    for (DocCollection coll : collectionStates.values()) {
+      for (Slice slice : coll.getSlices()) {
+        if (slice.getReplicasMap().containsKey(coreNodeName)) return slice.getName();
       }
     }
     return null;
@@ -196,56 +176,6 @@ public class ClusterState implements JSO
   public boolean liveNodesContain(String name) {
     return liveNodes.contains(name);
   }
-  
-  public RangeInfo getRanges(String collection) {
-    // TODO: store this in zk
-    RangeInfo rangeInfo = rangeInfos.get(collection);
-
-    return rangeInfo;
-  }
-
-  private RangeInfo addRangeInfo(String collection) {
-    List<Range> ranges;
-    RangeInfo rangeInfo;
-    rangeInfo = new RangeInfo();
-
-    Map<String,Slice> slices = getSlices(collection);
-    
-    if (slices == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find collection "
-          + collection + " in " + this);
-    }
-    
-    Set<String> shards = slices.keySet();
-    ArrayList<String> shardList = new ArrayList<String>(shards.size());
-    shardList.addAll(shards);
-    Collections.sort(shardList);
-    
-    ranges = hp.partitionRange(shards.size(), Integer.MIN_VALUE, Integer.MAX_VALUE);
-    
-    rangeInfo.ranges = ranges;
-    rangeInfo.shardList = shardList;
-    rangeInfos.put(collection, rangeInfo);
-    return rangeInfo;
-  }
-
-  /**
-   * Get shard id for hash. This is used when determining which Slice the
-   * document is to be submitted to.
-   */
-  public String getShard(int hash, String collection) {
-    RangeInfo rangInfo = getRanges(collection);
-    
-    int cnt = 0;
-    for (Range range : rangInfo.ranges) {
-      if (range.includes(hash)) {
-        return rangInfo.shardList.get(cnt);
-      }
-      cnt++;
-    }
-    
-    throw new IllegalStateException("The HashPartitioner failed");
-  }
 
   @Override
   public String toString() {
@@ -278,35 +208,52 @@ public class ClusterState implements JSO
    * @return the ClusterState
    */
   public static ClusterState load(Integer version, byte[] bytes, Set<String> liveNodes) {
+    // System.out.println("######## ClusterState.load:" + (bytes==null ? null : new String(bytes)));
     if (bytes == null || bytes.length == 0) {
-      return new ClusterState(version, liveNodes, Collections.<String, Map<String,Slice>>emptyMap());
+      return new ClusterState(version, liveNodes, Collections.<String, DocCollection>emptyMap());
+    }
+    Map<String, Object> stateMap = (Map<String, Object>) ZkStateReader.fromJSON(bytes);
+    Map<String,DocCollection> collections = new LinkedHashMap<String,DocCollection>(stateMap.size());
+    for (Entry<String, Object> entry : stateMap.entrySet()) {
+      String collectionName = entry.getKey();
+      DocCollection coll = collectionFromObjects(collectionName, (Map<String,Object>)entry.getValue());
+      collections.put(collectionName, coll);
     }
-    // System.out.println("########## Loading ClusterState:" + new String(bytes));
-    LinkedHashMap<String, Object> stateMap = (LinkedHashMap<String, Object>) ZkStateReader.fromJSON(bytes);
-    HashMap<String,Map<String, Slice>> state = new HashMap<String,Map<String,Slice>>();
-
-    for(String collectionName: stateMap.keySet()){
-      Map<String, Object> collection = (Map<String, Object>)stateMap.get(collectionName);
-      Map<String, Slice> slices = new LinkedHashMap<String,Slice>();
-
-      for (Entry<String,Object> sliceEntry : collection.entrySet()) {
-        Slice slice = new Slice(sliceEntry.getKey(), null, (Map<String,Object>)sliceEntry.getValue());
-        slices.put(slice.getName(), slice);
+
+    // System.out.println("######## ClusterState.load result:" + collections);
+    return new ClusterState(version, liveNodes, collections);
+  }
+
+  private static DocCollection collectionFromObjects(String name, Map<String,Object> objs) {
+    Map<String,Object> props = (Map<String,Object>)objs.get(DocCollection.PROPERTIES);
+    if (props == null) props = Collections.emptyMap();
+    DocRouter router = DocRouter.getDocRouter(props.get(DocCollection.DOC_ROUTER));
+    Map<String,Slice> slices = makeSlices(objs);
+    return new DocCollection(name, slices, props, router);
+  }
+
+  private static Map<String,Slice> makeSlices(Map<String,Object> genericSlices) {
+    if (genericSlices == null) return Collections.emptyMap();
+    Map<String,Slice> result = new LinkedHashMap<String, Slice>(genericSlices.size());
+    for (Map.Entry<String,Object> entry : genericSlices.entrySet()) {
+      String name = entry.getKey();
+      if (DocCollection.PROPERTIES.equals(name)) continue;  // skip special properties entry
+      Object val = entry.getValue();
+      Slice s;
+      if (val instanceof Slice) {
+        s = (Slice)val;
+      } else {
+        s = new Slice(name, null, (Map<String,Object>)val);
       }
-      state.put(collectionName, slices);
+      result.put(name, s);
     }
-    return new ClusterState(version, liveNodes, state);
+    return result;
   }
 
   @Override
   public void write(JSONWriter jsonWriter) {
     jsonWriter.write(collectionStates);
   }
-  
-  private class RangeInfo {
-    private List<Range> ranges;
-    private ArrayList<String> shardList;
-  }
 
   /**
    * The version of clusterstate.json in ZooKeeper.

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,198 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.Hash;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+//
+// user!uniqueid
+// user/4!uniqueid
+//
+public class CompositeIdRouter extends HashBasedRouter {
+  public static final String NAME = "compositeId";
+
+  private int separator = '!';
+
+  // separator used to optionally specify number of bits to allocate toward first part.
+  private int bitsSepartor = '/';
+  private int bits = 16;
+  private int mask1 = 0xffff0000;
+  private int mask2 = 0x0000ffff;
+
+  protected void setBits(int firstBits) {
+    this.bits = firstBits;
+    // java can't shift 32 bits
+    mask1 = firstBits==0 ? 0 : (-1 << (32-firstBits));
+    mask2 = firstBits==32 ? 0 : (-1 >>> firstBits);
+  }
+
+  protected int getBits(String firstPart, int commaIdx) {
+    int v = 0;
+    for (int idx = commaIdx + 1; idx<firstPart.length(); idx++) {
+      char ch = firstPart.charAt(idx);
+      if (ch < '0' || ch > '9') return -1;
+      v = v * 10 + (ch - '0');
+    }
+    return v > 32 ? -1 : v;
+  }
+
+  @Override
+  protected int sliceHash(String id, SolrInputDocument doc, SolrParams params) {
+    int idx = id.indexOf(separator);
+    if (idx < 0) {
+      return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
+    }
+
+    int m1 = mask1;
+    int m2 = mask2;
+
+    String part1 = id.substring(0,idx);
+    int commaIdx = part1.indexOf(bitsSepartor);
+    if (commaIdx > 0) {
+      int firstBits = getBits(part1, commaIdx);
+      if (firstBits >= 0) {
+        m1 = firstBits==0 ? 0 : (-1 << (32-firstBits));
+        m2 = firstBits==32 ? 0 : (-1 >>> firstBits);
+        part1 = part1.substring(0, commaIdx);
+      }
+    }
+
+    String part2 = id.substring(idx+1);
+
+    int hash1 = Hash.murmurhash3_x86_32(part1, 0, part1.length(), 0);
+    int hash2 = Hash.murmurhash3_x86_32(part2, 0, part2.length(), 0);
+    return (hash1 & m1) | (hash2 & m2);
+  }
+
+  @Override
+  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams params, DocCollection collection) {
+    if (shardKey == null) {
+      // search across whole collection
+      // TODO: this may need modification in the future when shard splitting could cause an overlap
+      return collection.getSlices();
+    }
+    String id = shardKey;
+
+    int idx = shardKey.indexOf(separator);
+    if (idx < 0) {
+      // shardKey is a simple id, so don't do a range
+      return Collections.singletonList(hashToSlice(Hash.murmurhash3_x86_32(id, 0, id.length(), 0), collection));
+    }
+
+    int m1 = mask1;
+    int m2 = mask2;
+
+    String part1 = id.substring(0,idx);
+    int bitsSepIdx = part1.indexOf(bitsSepartor);
+    if (bitsSepIdx > 0) {
+      int firstBits = getBits(part1, bitsSepIdx);
+      if (firstBits >= 0) {
+        m1 = firstBits==0 ? 0 : (-1 << (32-firstBits));
+        m2 = firstBits==32 ? 0 : (-1 >>> firstBits);
+        part1 = part1.substring(0, bitsSepIdx);
+      }
+    }
+
+    //  If the upper bits are 0xF0000000, the range we want to cover is
+    //  0xF0000000 0xFfffffff
+
+    int hash1 = Hash.murmurhash3_x86_32(part1, 0, part1.length(), 0);
+    int upperBits = hash1 & m1;
+    int lowerBound = upperBits;
+    int upperBound = upperBits | m2;
+
+    if (m1 == 0) {
+      // no bits used from first part of key.. the code above will produce 0x000000000->0xffffffff which only works on unsigned space, but we're using signed space.
+      lowerBound = Integer.MIN_VALUE;
+      upperBound = Integer.MAX_VALUE;
+    }
+
+    Range completeRange = new Range(lowerBound, upperBound);
+
+    List<Slice> targetSlices = new ArrayList<Slice>(1);
+    for (Slice slice : collection.getSlices()) {
+      Range range = slice.getRange();
+      if (range != null && range.overlaps(completeRange)) {
+        targetSlices.add(slice);
+      }
+    }
+
+    return targetSlices;
+  }
+
+
+  @Override
+  public List<Range> partitionRange(int partitions, Range range) {
+    int min = range.min;
+    int max = range.max;
+
+    assert max >= min;
+    if (partitions == 0) return Collections.EMPTY_LIST;
+    long rangeSize = (long)max - (long)min;
+    long rangeStep = Math.max(1, rangeSize / partitions);
+
+    List<Range> ranges = new ArrayList<Range>(partitions);
+
+    long start = min;
+    long end = start;
+
+    // keep track of the idealized target to avoid accumulating rounding errors
+    long targetStart = min;
+    long targetEnd = targetStart;
+
+    // Round to avoid splitting hash domains across ranges if such rounding is not significant.
+    // With default bits==16, one would need to create more than 4000 shards before this
+    // becomes false by default.
+    boolean round = rangeStep >= (1<<bits)*16;
+
+    while (end < max) {
+      targetEnd = targetStart + rangeStep;
+      end = targetEnd;
+
+      if (round && ((end & mask2) != mask2)) {
+        // round up or down?
+        int increment = 1 << bits;  // 0x00010000
+        long roundDown = (end | mask2) - increment ;
+        long roundUp = (end | mask2) + increment;
+        if (end - roundDown < roundUp - end && roundDown > start) {
+          end = roundDown;
+        } else {
+          end = roundUp;
+        }
+      }
+
+      // make last range always end exactly on MAX_VALUE
+      if (ranges.size() == partitions - 1) {
+        end = max;
+      }
+      ranges.add(new Range((int)start, (int)end));
+      start = end + 1L;
+      targetStart = targetEnd + 1L;
+    }
+
+    return ranges;
+  }
+
+}

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,90 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.noggit.JSONUtil;
+import org.apache.noggit.JSONWriter;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Models a Collection in zookeeper (but that Java name is obviously taken, hence "DocCollection")
+ */
+public class DocCollection extends ZkNodeProps {
+  public static final String PROPERTIES = "properties";
+  public static final String DOC_ROUTER = "router";
+
+  private final String name;
+  private final Map<String, Slice> slices;
+  private final DocRouter router;
+
+  public DocCollection(String name, Map<String, Slice> slices, Map<String, Object> props, DocRouter router) {
+    super(props == null ? new HashMap<String,Object>(1) : props);
+    this.name = name;
+    this.slices = slices;
+    this.router = router;
+    assert name != null && slices != null;
+  }
+
+
+  /**
+   * Return collection name.
+   */
+  public String getName() {
+    return name;
+  }
+
+  public Slice getSlice(String sliceName) {
+    return slices.get(sliceName);
+  }
+
+  /**
+   * Gets the list of slices for this collection.
+   */
+  public Collection<Slice> getSlices() {
+    return slices.values();
+  }
+
+  /**
+   * Get the map of slices (sliceName->Slice) for this collection.
+   */
+  public Map<String, Slice> getSlicesMap() {
+    return slices;
+  }
+
+  public DocRouter getRouter() {
+    return router;
+  }
+
+  @Override
+  public String toString() {
+    return "DocCollection("+name+")=" + JSONUtil.toJSON(this);
+  }
+
+  @Override
+  public void write(JSONWriter jsonWriter) {
+    // write out the properties under "properties"
+    LinkedHashMap<String,Object> all = new LinkedHashMap<String,Object>(slices.size()+1);
+    all.put(PROPERTIES, propMap);
+    all.putAll(slices);
+    jsonWriter.write(all);
+  }
+}

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,187 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.noggit.JSONWriter;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.Hash;
+import org.apache.solr.common.util.StrUtils;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class to partition int range into n ranges.
+ * @lucene.experimental
+ */
+public abstract class DocRouter {
+  public static final String DEFAULT_NAME = CompositeIdRouter.NAME;
+  public static final DocRouter DEFAULT = new CompositeIdRouter();
+
+  public static DocRouter getDocRouter(Object routerSpec) {
+    DocRouter router = routerMap.get(routerSpec);
+    if (router != null) return router;
+    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown document router '"+ routerSpec + "'");
+  }
+
+  // currently just an implementation detail...
+  private final static Map<String, DocRouter> routerMap;
+  static {
+    routerMap = new HashMap<String, DocRouter>();
+    PlainIdRouter plain = new PlainIdRouter();
+    // instead of doing back compat this way, we could always convert the clusterstate on first read to "plain" if it doesn't have any properties.
+    routerMap.put(null, plain);     // back compat with 4.0
+    routerMap.put(PlainIdRouter.NAME, plain);
+    routerMap.put(CompositeIdRouter.NAME, DEFAULT_NAME.equals(CompositeIdRouter.NAME) ? DEFAULT : new CompositeIdRouter());
+    routerMap.put(ImplicitDocRouter.NAME, new ImplicitDocRouter());
+    // NOTE: careful that the map keys (the static .NAME members) are filled in by making them final
+  }
+
+
+  // Hash ranges can't currently "wrap" - i.e. max must be greater or equal to min.
+  // TODO: ranges may not be all contiguous in the future (either that or we will
+  // need an extra class to model a collection of ranges)
+  public static class Range implements JSONWriter.Writable {
+    public int min;  // inclusive
+    public int max;  // inclusive
+
+    public Range(int min, int max) {
+      assert min <= max;
+      this.min = min;
+      this.max = max;
+    }
+
+    public boolean includes(int hash) {
+      return hash >= min && hash <= max;
+    }
+
+    public boolean isSubsetOf(Range superset) {
+      return superset.min <= min && superset.max >= max;
+    }
+
+    public boolean overlaps(Range other) {
+      return includes(other.min) || includes(other.max) || isSubsetOf(other);
+    }
+
+    @Override
+    public String toString() {
+      return Integer.toHexString(min) + '-' + Integer.toHexString(max);
+    }
+
+
+    @Override
+    public int hashCode() {
+      // difficult numbers to hash... only the highest bits will tend to differ.
+      // ranges will only overlap during a split, so we can just hash the lower range.
+      return (min>>28) + (min>>25) + (min>>21) + min;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj.getClass() != getClass()) return false;
+      Range other = (Range)obj;
+      return this.min == other.min && this.max == other.max;
+    }
+
+    @Override
+    public void write(JSONWriter writer) {
+      writer.write(toString());
+    }
+  }
+
+  public Range fromString(String range) {
+    int middle = range.indexOf('-');
+    String minS = range.substring(0, middle);
+    String maxS = range.substring(middle+1);
+    long min = Long.parseLong(minS, 16);  // use long to prevent the parsing routines from potentially worrying about overflow
+    long max = Long.parseLong(maxS, 16);
+    return new Range((int)min, (int)max);
+  }
+
+  public Range fullRange() {
+    return new Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
+  }
+
+  /**
+   * Returns the range for each partition
+   */
+  public List<Range> partitionRange(int partitions, Range range) {
+    int min = range.min;
+    int max = range.max;
+
+    assert max >= min;
+    if (partitions == 0) return Collections.EMPTY_LIST;
+    long rangeSize = (long)max - (long)min;
+    long rangeStep = Math.max(1, rangeSize / partitions);
+
+    List<Range> ranges = new ArrayList<Range>(partitions);
+
+    long start = min;
+    long end = start;
+
+    while (end < max) {
+      end = start + rangeStep;
+      // make last range always end exactly on MAX_VALUE
+      if (ranges.size() == partitions - 1) {
+        end = max;
+      }
+      ranges.add(new Range((int)start, (int)end));
+      start = end + 1L;
+    }
+
+    return ranges;
+  }
+
+  /** Returns the Slice that the document should reside on, or null if there is not enough information */
+  public abstract Slice getTargetSlice(String id, SolrInputDocument sdoc, SolrParams params, DocCollection collection);
+
+  /** This method is consulted to determine what slices should be queried for a request when
+   *  an explicit shards parameter was not used.
+   *  This method only accepts a single shard key (or null).  If you have a comma separated list of shard keys,
+   *  call getSearchSlices
+   **/
+  public abstract Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams params, DocCollection collection);
+
+
+  /** This method is consulted to determine what slices should be queried for a request when
+   *  an explicit shards parameter was not used.
+   *  This method accepts a multi-valued shardKeys parameter (normally comma separated from the shard.keys request parameter)
+   *  and aggregates the slices returned by getSearchSlicesSingle for each shardKey.
+   **/
+  public Collection<Slice> getSearchSlices(String shardKeys, SolrParams params, DocCollection collection) {
+    if (shardKeys == null || shardKeys.indexOf(',') < 0) {
+      return getSearchSlicesSingle(shardKeys, params, collection);
+    }
+
+    List<String> shardKeyList = StrUtils.splitSmart(shardKeys, ",", true);
+    HashSet<Slice> allSlices = new HashSet<Slice>();
+    for (String shardKey : shardKeyList) {
+      allSlices.addAll( getSearchSlicesSingle(shardKey, params, collection) );
+    }
+    return allSlices;
+  }
+
+}
+

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,68 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.Hash;
+
+import java.util.Collection;
+import java.util.Collections;
+
+public abstract class HashBasedRouter extends DocRouter {
+
+  @Override
+  public Slice getTargetSlice(String id, SolrInputDocument sdoc, SolrParams params, DocCollection collection) {
+    if (id == null) id = getId(sdoc, params);
+    int hash = sliceHash(id, sdoc, params);
+    return hashToSlice(hash, collection);
+  }
+
+  protected int sliceHash(String id, SolrInputDocument sdoc, SolrParams params) {
+    return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
+  }
+
+  protected String getId(SolrInputDocument sdoc, SolrParams params) {
+    Object  idObj = sdoc.getFieldValue("id");  // blech
+    String id = idObj != null ? idObj.toString() : "null";  // should only happen on client side
+    return id;
+  }
+
+  protected Slice hashToSlice(int hash, DocCollection collection) {
+    for (Slice slice : collection.getSlices()) {
+      Range range = slice.getRange();
+      if (range != null && range.includes(hash)) return slice;
+    }
+    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No slice servicing hash code " + Integer.toHexString(hash) + " in " + collection);
+  }
+
+
+  @Override
+  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams params, DocCollection collection) {
+    if (shardKey == null) {
+      // search across whole collection
+      // TODO: this may need modification in the future when shard splitting could cause an overlap
+      return collection.getSlices();
+    }
+
+    // use the shardKey as an id for plain hashing
+    Slice slice = getTargetSlice(shardKey, null, params, collection);
+    return slice == null ? Collections.<Slice>emptyList() : Collections.singletonList(slice);
+  }
+}

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ImplicitDocRouter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ImplicitDocRouter.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ImplicitDocRouter.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ImplicitDocRouter.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,72 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
+
+import java.util.Collection;
+import java.util.Collections;
+
+/** This document router is for custom sharding
+ */
+public class ImplicitDocRouter extends DocRouter {
+  public static final String NAME = "implicit";
+
+  @Override
+  public Slice getTargetSlice(String id, SolrInputDocument sdoc, SolrParams params, DocCollection collection) {
+    String shard = null;
+    if (sdoc != null) {
+      Object o = sdoc.getFieldValue("_shard_");
+      if (o != null) {
+        shard = o.toString();
+      }
+    }
+
+    if (shard == null) {
+      shard = params.get("_shard_");
+    }
+
+    if (shard != null) {
+      Slice slice = collection.getSlice(shard);
+      if (slice == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No _shard_=" + shard + " in " + collection);
+      }
+      return slice;
+    }
+
+    return null;  // no shard specified... use default.
+  }
+
+  @Override
+  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams params, DocCollection collection) {
+    if (shardKey == null) {
+      return collection.getSlices();
+    }
+
+    // assume the shardKey is just a slice name
+    Slice slice = collection.getSlice(shardKey);
+    if (slice == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "implicit router can't find shard " + shardKey + " in collection " + collection.getName());
+    }
+
+    return Collections.singleton(slice);
+  }
+
+}

Added: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/PlainIdRouter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/PlainIdRouter.java?rev=1420992&view=auto
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/PlainIdRouter.java (added)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/PlainIdRouter.java Wed Dec 12 21:41:06 2012
@@ -0,0 +1,23 @@
+package org.apache.solr.common.cloud;
+
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class PlainIdRouter extends HashBasedRouter {
+  public static final String NAME = "plain";
+}

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java Wed Dec 12 21:41:06 2012
@@ -24,11 +24,12 @@ import java.util.Map;
 
 public class Replica extends ZkNodeProps {
   private final String name;
+  private final String nodeName;
 
   public Replica(String name, Map<String,Object> propMap) {
     super(propMap);
     this.name = name;
-    String nodeName = (String)propMap.get(ZkStateReader.NODE_NAME_PROP);
+    nodeName = (String)propMap.get(ZkStateReader.NODE_NAME_PROP);
     assert nodeName == null || name.startsWith(nodeName);
   }
 
@@ -36,9 +37,13 @@ public class Replica extends ZkNodeProps
     return name;
   }
 
+  /** The name of the node this replica resides on */
+  public String getNodeName() {
+    return nodeName;
+  }
+
   @Override
   public String toString() {
     return name + ':' + JSONUtil.toJSON(propMap, -1); // small enough, keep it on one line (i.e. no indent)
   }
-
 }

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java Wed Dec 12 21:41:06 2012
@@ -34,7 +34,7 @@ public class Slice extends ZkNodeProps {
   public static String LEADER = "leader";       // FUTURE: do we want to record the leader as a slice property in the JSON (as opposed to isLeader as a replica property?)
 
   private final String name;
-  private final HashPartitioner.Range range;
+  private final DocRouter.Range range;
   private final Integer replicationFactor;      // FUTURE: optional per-slice override of the collection replicationFactor
   private final Map<String,Replica> replicas;
   private final Replica leader;
@@ -49,15 +49,21 @@ public class Slice extends ZkNodeProps {
     this.name = name;
 
     Object rangeObj = propMap.get(RANGE);
-    HashPartitioner.Range tmpRange = null;
-    if (rangeObj instanceof HashPartitioner.Range) {
-      tmpRange = (HashPartitioner.Range)rangeObj;
+    DocRouter.Range tmpRange = null;
+    if (rangeObj instanceof DocRouter.Range) {
+      tmpRange = (DocRouter.Range)rangeObj;
     } else if (rangeObj != null) {
-      HashPartitioner hp = new HashPartitioner();
-      tmpRange = hp.fromString(rangeObj.toString());
+      // Doesn't support custom implementations of Range, but currently not needed.
+      tmpRange = DocRouter.DEFAULT.fromString(rangeObj.toString());
     }
     range = tmpRange;
 
+    /** debugging.  this isn't an error condition for custom sharding.
+    if (range == null) {
+      System.out.println("###### NO RANGE for " + name + " props=" + props);
+    }
+    **/
+
     replicationFactor = null;  // future
 
     // add the replicas *after* the other properties (for aesthetics, so it's easy to find slice properties in the JSON output)
@@ -121,6 +127,14 @@ public class Slice extends ZkNodeProps {
     return leader;
   }
 
+  public Replica getReplica(String replicaName) {
+    return replicas.get(replicaName);
+  }
+
+  public DocRouter.Range getRange() {
+    return range;
+  }
+
   @Override
   public String toString() {
     return name + ':' + JSONUtil.toJSON(propMap);

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java Wed Dec 12 21:41:06 2012
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.StringReader;
 import java.io.StringWriter;
 import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
@@ -461,6 +462,28 @@ public class SolrZkClient {
   }
 
   /**
+   * Returns the baseURL corrisponding to a given node's nodeName -- 
+   * NOTE: does not (currently) imply that the nodeName (or resulting 
+   * baseURL) exists in the cluster.
+   * @lucene.experimental
+   */
+  public String getBaseUrlForNodeName(final String nodeName) {
+    final int _offset = nodeName.indexOf("_");
+    if (_offset < 0) {
+      throw new IllegalArgumentException("nodeName does not contain expected '_' seperator: " + nodeName);
+    }
+    final String hostAndPort = nodeName.substring(0,_offset);
+    try {
+      final String path = URLDecoder.decode(nodeName.substring(1+_offset),
+                                            "UTF-8");
+      return "http://" + hostAndPort + "/" + path;
+    } catch (UnsupportedEncodingException e) {
+      throw new IllegalStateException("JVM Does not seem to support UTF-8", e);
+    }
+  }
+
+
+  /**
    * Fills string with printout of current ZooKeeper layout.
    */
   public void printLayout(String path, int indent, StringBuilder string)

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java Wed Dec 12 21:41:06 2012
@@ -39,6 +39,8 @@ public class ZkNodeProps implements JSON
    */
   public ZkNodeProps(Map<String,Object> propMap) {
     this.propMap = propMap;
+    // TODO: store an unmodifiable map, but in a way that guarantees not to wrap more than once.
+    // Always wrapping introduces a memory leak.
   }
 
 
@@ -70,14 +72,14 @@ public class ZkNodeProps implements JSON
    * Get property keys.
    */
   public Set<String> keySet() {
-    return Collections.unmodifiableSet(propMap.keySet());
+    return propMap.keySet();
   }
 
   /**
    * Get all properties as map.
    */
   public Map<String, Object> getProperties() {
-    return Collections.unmodifiableMap(propMap);
+    return propMap;
   }
 
   /** Returns a shallow writable copy of the properties */

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java Wed Dec 12 21:41:06 2012
@@ -187,7 +187,7 @@ public class ZkStateReader {
           if (EventType.None.equals(event.getType())) {
             return;
           }
-          log.info("A cluster state change has occurred - updating... ({})", ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size());
+          log.info("A cluster state change: {}, has occurred - updating... (live nodes size: {})", (event) , ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size());
           try {
             
             // delayed approach
@@ -393,25 +393,28 @@ public class ZkStateReader {
   
   public String getLeaderUrl(String collection, String shard, int timeout)
       throws InterruptedException, KeeperException {
-    ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderProps(collection,
+    ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection,
         shard, timeout));
     return props.getCoreUrl();
   }
   
   /**
-   * Get shard leader properties.
+   * Get shard leader properties, with retry if none exist.
    */
-  public ZkNodeProps getLeaderProps(String collection, String shard) throws InterruptedException {
-    return getLeaderProps(collection, shard, 1000);
+  public Replica getLeaderRetry(String collection, String shard) throws InterruptedException {
+    return getLeaderRetry(collection, shard, 1000);
   }
-  
-  public ZkNodeProps getLeaderProps(String collection, String shard, int timeout) throws InterruptedException {
+
+  /**
+   * Get shard leader properties, with retry if none exist.
+   */
+  public Replica getLeaderRetry(String collection, String shard, int timeout) throws InterruptedException {
     long timeoutAt = System.currentTimeMillis() + timeout;
     while (System.currentTimeMillis() < timeoutAt) {
       if (clusterState != null) {    
-        final ZkNodeProps nodeProps = clusterState.getLeader(collection, shard);     
-        if (nodeProps != null && getClusterState().liveNodesContain((String) nodeProps.get(ZkStateReader.NODE_NAME_PROP))) {
-          return nodeProps;
+        Replica replica = clusterState.getLeader(collection, shard);
+        if (replica != null && getClusterState().liveNodesContain(replica.getNodeName())) {
+          return replica;
         }
       }
       Thread.sleep(50);
@@ -452,7 +455,7 @@ public class ZkStateReader {
     if (clusterState == null) {
       return null;
     }
-    Map<String,Slice> slices = clusterState.getSlices(collection);
+    Map<String,Slice> slices = clusterState.getSlicesMap(collection);
     if (slices == null) {
       throw new ZooKeeperException(ErrorCode.BAD_REQUEST,
           "Could not find collection in zk: " + collection + " "

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/params/ShardParams.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/params/ShardParams.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/params/ShardParams.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/java/org/apache/solr/common/params/ShardParams.java Wed Dec 12 21:41:06 2012
@@ -45,4 +45,7 @@ public interface ShardParams {
 
   /** Should things fail if there is an error? (true/false) */
   public static final String SHARDS_TOLERANT = "shards.tolerant";
+
+  /** Should things fail if there is an error? (true/false) */
+  public static final String SHARD_KEYS = "shard.keys";
 }

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/shared/solr.xml Wed Dec 12 21:41:06 2012
@@ -30,7 +30,7 @@
   adminPath: RequestHandler path to manage cores.  
     If 'null' (or absent), cores will not be manageable via REST
   -->
-  <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="solr" zkClientTimeout="8000">
+  <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:}" zkClientTimeout="8000">
     <core name="collection1" instanceDir="." />
     <core name="core0" instanceDir="${theInstanceDir:./}" collection="${collection:acollection}">
       <property name="version" value="3.5"/>

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test-files/solrj/solr/solr.xml Wed Dec 12 21:41:06 2012
@@ -28,7 +28,7 @@
   adminPath: RequestHandler path to manage cores.  
     If 'null' (or absent), cores will not be manageable via request handler
   -->
-  <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="solr" zkClientTimeout="8000" numShards="${numShards:3}">
+  <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:}" zkClientTimeout="8000" numShards="${numShards:3}">
     <core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"/>
   </cores>
 </solr>

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java Wed Dec 12 21:41:06 2012
@@ -20,6 +20,7 @@ package org.apache.solr.client.solrj;
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
@@ -29,6 +30,7 @@ import org.apache.solr.core.CoreContaine
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.util.ExternalPaths;
 import org.junit.AfterClass;
+import org.junit.BeforeClass;
 
 /**
  * Abstract base class for testing merge indexes command
@@ -37,7 +39,6 @@ import org.junit.AfterClass;
  *
  */
 public abstract class MergeIndexesExampleTestBase extends SolrExampleTestBase {
-  // protected static final CoreContainer cores = new CoreContainer();
   protected static CoreContainer cores;
   private String saveProp;
   private File dataDir2;
@@ -47,28 +48,31 @@ public abstract class MergeIndexesExampl
     return ExternalPaths.EXAMPLE_MULTICORE_HOME;
   }
 
-  @Override
-  public String getSchemaFile() {
-    return getSolrHome() + "/core0/conf/schema.xml";
+  @BeforeClass
+  public static void beforeClass2() throws Exception {
+    if (dataDir == null) {
+      createTempDir();
+    }
+    cores = new CoreContainer();
   }
-
-  @Override
-  public String getSolrConfigFile() {
-    return getSolrHome() + "/core0/conf/solrconfig.xml";
+  
+  @AfterClass
+  public static void afterClass() {
+    cores.shutdown();
+    cores = null;
   }
-
+  
   @Override
   public void setUp() throws Exception {
     saveProp = System.getProperty("solr.directoryFactory");
     System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
     super.setUp();
 
-    cores = h.getCoreContainer();
     SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
     cores.setPersistent(false);
     
     // setup datadirs
-    System.setProperty( "solr.core0.data.dir", this.dataDir.getCanonicalPath() ); 
+    System.setProperty( "solr.core0.data.dir", SolrTestCaseJ4.dataDir.getCanonicalPath() ); 
     
     dataDir2 = new File(TEMP_DIR, getClass().getName() + "-"
         + System.currentTimeMillis());
@@ -83,21 +87,16 @@ public abstract class MergeIndexesExampl
     
     String skip = System.getProperty("solr.test.leavedatadir");
     if (null != skip && 0 != skip.trim().length()) {
-      System.err.println("NOTE: per solr.test.leavedatadir, dataDir will not be removed: " + dataDir.getAbsolutePath());
+      System.err.println("NOTE: per solr.test.leavedatadir, dataDir will not be removed: " + dataDir2.getAbsolutePath());
     } else {
-      if (!recurseDelete(dataDir)) {
-        System.err.println("!!!! WARNING: best effort to remove " + dataDir.getAbsolutePath() + " FAILED !!!!!");
+      if (!recurseDelete(dataDir2)) {
+        System.err.println("!!!! WARNING: best effort to remove " + dataDir2.getAbsolutePath() + " FAILED !!!!!");
       }
     }
     
     if (saveProp == null) System.clearProperty("solr.directoryFactory");
     else System.setProperty("solr.directoryFactory", saveProp);
   }
-  
-  @AfterClass
-  public static void afterClass() {
-    cores = null;
-  }
 
   @Override
   protected final SolrServer getSolrServer() {

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java Wed Dec 12 21:41:06 2012
@@ -19,16 +19,19 @@ package org.apache.solr.client.solrj;
 
 import java.io.File;
 
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.util.ExternalPaths;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 
@@ -38,18 +41,26 @@ import org.junit.Test;
  */
 public abstract class MultiCoreExampleTestBase extends SolrExampleTestBase 
 {
-  // protected static final CoreContainer cores = new CoreContainer();
-  protected CoreContainer cores;
+  protected static CoreContainer cores;
+
   private File dataDir2;
 
   @Override public String getSolrHome() { return ExternalPaths.EXAMPLE_MULTICORE_HOME; }
+
   
-  @Override public String getSchemaFile()     { return getSolrHome()+"/core0/conf/schema.xml";     }
-  @Override public String getSolrConfigFile() { return getSolrHome()+"/core0/conf/solrconfig.xml"; }
+  @BeforeClass
+  public static void beforeThisClass2() throws Exception {
+    cores = new CoreContainer();
+  }
+  
+  @AfterClass
+  public static void afterClass() {
+    cores.shutdown();
+  }
   
   @Override public void setUp() throws Exception {
     super.setUp();
-    cores = h.getCoreContainer();
+
     SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
     cores.setPersistent(false);
     
@@ -57,7 +68,7 @@ public abstract class MultiCoreExampleTe
         + System.currentTimeMillis());
     dataDir2.mkdirs();
     
-    System.setProperty( "solr.core0.data.dir", this.dataDir.getCanonicalPath() ); 
+    System.setProperty( "solr.core0.data.dir", SolrTestCaseJ4.dataDir.getCanonicalPath() ); 
     System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() ); 
   }
   
@@ -73,8 +84,6 @@ public abstract class MultiCoreExampleTe
         System.err.println("!!!! WARNING: best effort to remove " + dataDir2.getAbsolutePath() + " FAILED !!!!!");
       }
     }
-    
-    cores = null;
   }
 
   @Override
@@ -208,6 +217,12 @@ public abstract class MultiCoreExampleTe
     NamedList<Object> response = getSolrCore("corefoo").query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
     NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
     String indexDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("index");
+    
+    
+
+    System.out.println( (String) ((NamedList<Object>) coreInfo.get("directory")).get("dirimpl"));
+
+    
     // test delete index on core
     CoreAdminRequest.unloadCore("corefoo", true, coreadmin);
     File dir = new File(indexDir);

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java Wed Dec 12 21:41:06 2012
@@ -19,6 +19,7 @@ package org.apache.solr.client.solrj;
 
 
 import org.apache.solr.util.AbstractSolrTestCase;
+import org.junit.BeforeClass;
 
 /**
  * This should include tests against the example solr config
@@ -32,9 +33,13 @@ abstract public class SolrExampleTestBas
 {
   @Override
   public String getSolrHome() { return "../../../example/solr/"; }
-  
-  @Override public String getSchemaFile()     { return getSolrHome()+"conf/schema.xml";     }
-  @Override public String getSolrConfigFile() { return getSolrHome()+"conf/solrconfig.xml"; }
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    if (dataDir == null) {
+      createTempDir();
+    }
+  }
  
   @Override
   public void setUp() throws Exception

Modified: lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreExampleJettyTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreExampleJettyTest.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreExampleJettyTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreExampleJettyTest.java Wed Dec 12 21:41:06 2012
@@ -56,7 +56,7 @@ public class MultiCoreExampleJettyTest e
     jetty.start(false);
     port = jetty.getLocalPort();
 
-    h.getCoreContainer().setPersistent(false);    
+    cores.setPersistent(false);    
   }
 
   @Override public void tearDown() throws Exception 

Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java Wed Dec 12 21:41:06 2012
@@ -33,6 +33,7 @@ import java.util.Set;
 import junit.framework.Assert;
 
 import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.util. _TestUtil;
 import org.apache.lucene.util.Constants;
 import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
@@ -50,6 +51,7 @@ import org.apache.solr.common.util.Named
 import org.apache.solr.schema.TrieDateField;
 import org.apache.solr.util.AbstractSolrTestCase;
 import org.junit.BeforeClass;
+import org.junit.AfterClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -68,6 +70,100 @@ public abstract class BaseDistributedSea
     assumeFalse("SOLR-4147: ibm 64bit has jvm bugs!", Constants.JRE_IS_64BIT && Constants.JAVA_VENDOR.startsWith("IBM"));
     r = new Random(random().nextLong());
   }
+  
+  /**
+   * Set's the value of the "hostContext" system property to a random path 
+   * like string (which may or may not contain sub-paths).  This is used 
+   * in the default constructor for this test to help ensure no code paths have
+   * hardcoded assumptions about the servlet context used to run solr.
+   * <p>
+   * Test configs may use the <code>${hostContext}</code> variable to access 
+   * this system property.
+   * </p>
+   * @see #BaseDistributedSearchTestCase()
+   * @see #clearHostContext
+   */
+  @BeforeClass
+  public static void initHostContext() {
+    // Can't use randomRealisticUnicodeString because unescaped unicode is 
+    // not allowed in URL paths
+    // Can't use URLEncoder.encode(randomRealisticUnicodeString) because
+    // Jetty freaks out and returns 404's when the context uses escapes
+
+    StringBuilder hostContext = new StringBuilder("/");
+    if (random().nextBoolean()) {
+      // half the time we use the root context, the other half...
+
+      // Remember: randomSimpleString might be the empty string
+      hostContext.append(_TestUtil.randomSimpleString(random(), 2));
+      if (random().nextBoolean()) {
+        hostContext.append("_");
+      }
+      hostContext.append(_TestUtil.randomSimpleString(random(), 3));
+      if ( ! "/".equals(hostContext)) {
+        // if our random string is empty, this might add a trailing slash, 
+        // but our code should be ok with that
+        hostContext.append("/").append(_TestUtil.randomSimpleString(random(), 2));
+      } else {
+        // we got 'lucky' and still just have the root context,
+        // NOOP: don't try to add a subdir to nothing (ie "//" is bad)
+      }
+    }
+
+    log.info("Setting hostContext system property: " + hostContext.toString());
+    System.setProperty("hostContext", hostContext.toString());
+  }
+
+  /**
+   * Clears the "hostContext" system property
+   * @see #initHostContext
+   */
+  @AfterClass
+  public static void clearHostContext() throws Exception {
+    System.clearProperty("hostContext");
+  }
+
+  private static String getHostContextSuitableForServletContext() {
+    String ctx = System.getProperty("hostContext","/solr");
+    if ("".equals(ctx)) ctx = "/solr";
+    if (ctx.endsWith("/")) ctx = ctx.substring(0,ctx.length()-1);;
+    if (!ctx.startsWith("/")) ctx = "/" + ctx;
+    return ctx;
+  }
+
+  /**
+   * Constructs a test in which the jetty+solr instances as well as the 
+   * solr clients all use the value of the "hostContext" system property.
+   * <p>
+   * If the system property is not set, or is set to the empty string 
+   * (neither of which should normally happen unless a subclass explicitly 
+   * modifies the property set by {@link #initHostContext} prior to calling 
+   * this constructor) a servlet context of "/solr" is used. (this is for 
+   * consistency with the default behavior of solr.xml parsing when using 
+   * <code>hostContext="${hostContext:}"</code>
+   * </p>
+   * <p>
+   * If the system property is set to a value which does not begin with a 
+   * "/" (which should normally happen unless a subclass explicitly 
+   * modifies the property set by {@link #initHostContext} prior to calling 
+   * this constructor) a leading "/" will be prepended.
+   * </p>
+   *
+   * @see #initHostContext
+   */
+  protected BaseDistributedSearchTestCase() {
+    this(getHostContextSuitableForServletContext());
+  }
+
+  /**
+   * @param context explicit servlet context path to use (eg: "/solr")
+   */
+  protected BaseDistributedSearchTestCase(final String context) {
+    this.context = context;
+    this.deadServers = new String[] {"[ff01::114]:33332" + context, 
+                                     "[ff01::083]:33332" + context, 
+                                     "[ff01::213]:33332" + context};
+  }
 
   protected int shardCount = 4;      // the actual number of solr cores that will be created in the cluster
 
@@ -84,12 +180,10 @@ public abstract class BaseDistributedSea
   protected List<SolrServer> clients = new ArrayList<SolrServer>();
   protected List<JettySolrRunner> jettys = new ArrayList<JettySolrRunner>();
   
-  protected String context = "/solr";
+  protected String context;
+  protected String[] deadServers;
   protected String shards;
   protected String[] shardsArr;
-  // Some ISPs redirect to their own web site for domains that don't exist, causing this to fail
-  // protected String[] deadServers = {"does_not_exist_54321.com:33331/solr","127.0.0.1:33332/solr"};
-  protected String[] deadServers = {"[ff01::114]:33332/solr", "[ff01::083]:33332/solr", "[ff01::213]:33332/solr"};
   protected File testDir;
   protected SolrServer controlClient;
 
@@ -258,7 +352,7 @@ public abstract class BaseDistributedSea
   
   public JettySolrRunner createJetty(File solrHome, String dataDir, String shardList, String solrConfigOverride, String schemaOverride) throws Exception {
 
-    JettySolrRunner jetty = new JettySolrRunner(solrHome.getAbsolutePath(), "/solr", 0, solrConfigOverride, schemaOverride);
+    JettySolrRunner jetty = new JettySolrRunner(solrHome.getAbsolutePath(), context, 0, solrConfigOverride, schemaOverride);
     jetty.setShards(shardList);
     jetty.setDataDir(dataDir);
     jetty.start();

Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java Wed Dec 12 21:41:06 2012
@@ -178,6 +178,7 @@ public abstract class SolrTestCaseJ4 ext
   /** Call initCore in @BeforeClass to instantiate a solr core in your test class.
    * deleteCore will be called for you via SolrTestCaseJ4 @AfterClass */
   public static void initCore(String config, String schema, String solrHome) throws Exception {
+    assertNotNull(solrHome);
     configString = config;
     schemaString = schema;
     testSolrHome = solrHome;
@@ -378,6 +379,7 @@ public abstract class SolrTestCaseJ4 ext
   }
 
   public static void createCore() {
+    assertNotNull(testSolrHome);
     solrConfig = TestHarness.createConfig(testSolrHome, coreName, getSolrConfigFile());
     h = new TestHarness( dataDir.getAbsolutePath(),
             solrConfig,

Modified: lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java?rev=1420992&r1=1420991&r2=1420992&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java (original)
+++ lucene/dev/branches/branch_4x/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java Wed Dec 12 21:41:06 2012
@@ -44,7 +44,8 @@ public abstract class AbstractDistribZkT
 
   @BeforeClass
   public static void beforeThisClass() throws Exception {
-    useFactory(null);
+    // Only For Manual Testing: this will force an fs based dir factory
+    //useFactory(null);
   }
 
 
@@ -63,8 +64,9 @@ public abstract class AbstractDistribZkT
     System.setProperty("enable.update.log", "true");
     System.setProperty("remove.version.field", "true");
 
-
-    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
+    String schema = getSchemaFile();
+    if (schema == null) schema = "schema.xml";
+    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", schema);
 
     // set some system properties for use by tests
     System.setProperty("solr.test.sys.prop1", "propone");
@@ -78,8 +80,16 @@ public abstract class AbstractDistribZkT
     FileUtils.copyDirectory(new File(getSolrHome()), controlHome);
     
     System.setProperty("collection", "control_collection");
-    controlJetty = createJetty(controlHome, null, "control_shard");
+    String numShardsS = System.getProperty(ZkStateReader.NUM_SHARDS_PROP);
+    System.setProperty(ZkStateReader.NUM_SHARDS_PROP, "1");
+    controlJetty = createJetty(controlHome, null);      // let the shardId default to shard1
     System.clearProperty("collection");
+    if(numShardsS != null) {
+      System.setProperty(ZkStateReader.NUM_SHARDS_PROP, numShardsS);
+    } else {
+      System.clearProperty(ZkStateReader.NUM_SHARDS_PROP);
+    }
+
     controlClient = createNewSolrServer(controlJetty.getLocalPort());
 
     StringBuilder sb = new StringBuilder();
@@ -101,7 +111,7 @@ public abstract class AbstractDistribZkT
       ZkStateReader zkStateReader = ((SolrDispatchFilter) jettys.get(0)
           .getDispatchFilter().getFilter()).getCores().getZkController()
           .getZkStateReader();
-      zkStateReader.getLeaderProps("collection1", "shard" + (i + 2), 15000);
+      zkStateReader.getLeaderRetry("collection1", "shard" + (i + 2), 15000);
     }
   }
   
@@ -127,7 +137,7 @@ public abstract class AbstractDistribZkT
       boolean sawLiveRecovering = false;
       zkStateReader.updateClusterState(true);
       ClusterState clusterState = zkStateReader.getClusterState();
-      Map<String,Slice> slices = clusterState.getSlices(collection);
+      Map<String,Slice> slices = clusterState.getSlicesMap(collection);
       assertNotNull("Could not find collection:" + collection, slices);
       for (Map.Entry<String,Slice> entry : slices.entrySet()) {
         Map<String,Replica> shards = entry.getValue().getReplicasMap();
@@ -173,6 +183,8 @@ public abstract class AbstractDistribZkT
       }
       cnt++;
     }
+
+    log.info("Recoveries finished - collection: " + collection);
   }
 
   protected void assertAllActive(String collection,ZkStateReader zkStateReader)
@@ -180,7 +192,7 @@ public abstract class AbstractDistribZkT
 
       zkStateReader.updateClusterState(true);
       ClusterState clusterState = zkStateReader.getClusterState();
-      Map<String,Slice> slices = clusterState.getSlices(collection);
+      Map<String,Slice> slices = clusterState.getSlicesMap(collection);
       if (slices == null) {
         throw new IllegalArgumentException("Cannot find collection:" + collection);
       }



Mime
View raw message