Return-Path: X-Original-To: apmail-lucene-commits-archive@www.apache.org Delivered-To: apmail-lucene-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 2EE1ADF13 for ; Sat, 15 Sep 2012 17:40:15 +0000 (UTC) Received: (qmail 65343 invoked by uid 500); 15 Sep 2012 17:40:15 -0000 Mailing-List: contact commits-help@lucene.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@lucene.apache.org Delivered-To: mailing list commits@lucene.apache.org Received: (qmail 65336 invoked by uid 99); 15 Sep 2012 17:40:15 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 15 Sep 2012 17:40:15 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 15 Sep 2012 17:40:12 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id CA1BB23889D7; Sat, 15 Sep 2012 17:39:29 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1385136 - in /lucene/dev/trunk/solr: ./ core/src/java/org/apache/solr/cloud/ core/src/java/org/apache/solr/update/processor/ core/src/test/org/apache/solr/cloud/ test-framework/src/java/org/apache/solr/ test-framework/src/java/org/apache/s... Date: Sat, 15 Sep 2012 17:39:29 -0000 To: commits@lucene.apache.org From: markrmiller@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120915173929.CA1BB23889D7@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: markrmiller Date: Sat Sep 15 17:39:28 2012 New Revision: 1385136 URL: http://svn.apache.org/viewvc?rev=1385136&view=rev Log: SOLR-3831: Atomic updates do not distribute correctly to other nodes. Modified: lucene/dev/trunk/solr/CHANGES.txt lucene/dev/trunk/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java Modified: lucene/dev/trunk/solr/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/CHANGES.txt?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/CHANGES.txt (original) +++ lucene/dev/trunk/solr/CHANGES.txt Sat Sep 15 17:39:28 2012 @@ -225,6 +225,9 @@ Bug Fixes * SOLR-3844: SolrCore reload can fail because it tries to remove the index write lock while already holding it. (Mark Miller) +* SOLR-3831: Atomic updates do not distribute correctly to other nodes. + (Jim Musil, Mark Miller) + Other Changes ---------------------- Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java (original) +++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java Sat Sep 15 17:39:28 2012 @@ -266,7 +266,7 @@ public class OverseerCollectionProcessor sreq.shards = new String[] {replica}; sreq.actualShards = sreq.shards; sreq.params = params; - + log.info("Collection Admin sending CoreAdmin cmd to " + replica); shardHandler.submit(sreq, replica, sreq.params); } } Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java (original) +++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java Sat Sep 15 17:39:28 2012 @@ -321,10 +321,7 @@ public class DistributedUpdateProcessor boolean dropCmd = false; if (!forwardToLeader) { - // clone the original doc - SolrInputDocument clonedDoc = cmd.solrDoc.deepCopy(); - dropCmd = versionAdd(cmd, clonedDoc); - cmd.solrDoc = clonedDoc; + dropCmd = versionAdd(cmd); } if (dropCmd) { @@ -441,11 +438,10 @@ public class DistributedUpdateProcessor /** * @param cmd - * @param cloneDoc needs the version if it's assigned * @return whether or not to drop this cmd * @throws IOException */ - private boolean versionAdd(AddUpdateCommand cmd, SolrInputDocument cloneDoc) throws IOException { + private boolean versionAdd(AddUpdateCommand cmd) throws IOException { BytesRef idBytes = cmd.getIndexedId(); if (vinfo == null || idBytes == null) { @@ -518,7 +514,6 @@ public class DistributedUpdateProcessor long version = vinfo.getNewClock(); cmd.setVersion(version); cmd.getSolrInputDocument().setField(VersionInfo.VERSION_FIELD, version); - cloneDoc.setField(VersionInfo.VERSION_FIELD, version); bucket.updateHighest(version); } else { // The leader forwarded us this update. @@ -550,9 +545,20 @@ public class DistributedUpdateProcessor } } } - + + boolean willDistrib = isLeader && nodes != null && nodes.size() > 0; + + SolrInputDocument clonedDoc = null; + if (willDistrib) { + clonedDoc = cmd.solrDoc.deepCopy(); + } + // TODO: possibly set checkDeleteByQueries as a flag on the command? doLocalAdd(cmd); + + if (willDistrib) { + cmd.solrDoc = clonedDoc; + } } // end synchronized (bucket) } finally { Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java (original) +++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java Sat Sep 15 17:39:28 2012 @@ -70,13 +70,14 @@ import org.apache.solr.common.params.Upd import org.apache.solr.common.util.NamedList; import org.apache.solr.update.SolrCmdDistributor.Request; import org.apache.solr.util.DefaultSolrThreadFactory; +import org.junit.Before; /** * This test simply does a bunch of basic things in solrcloud mode and asserts things * work as expected. */ @Slow -public class BasicDistributedZkTest extends AbstractDistribZkTestBase { +public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { private static final String DEFAULT_COLLECTION = "collection1"; private static final boolean DEBUG = false; @@ -109,8 +110,19 @@ public class BasicDistributedZkTest exte CompletionService completionService; Set> pending; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + System.setProperty("numShards", Integer.toString(sliceCount)); + } + + public BasicDistributedZkTest() { fixShardCount = true; + + sliceCount = 2; shardCount = 3; completionService = new ExecutorCompletionService(executor); pending = new HashSet>(); @@ -138,6 +150,14 @@ public class BasicDistributedZkTest exte public void doTest() throws Exception { // setLoggingLevel(null); + + // make sure we have leaders for each shard + for (int j = 1; j < sliceCount; j++) { + zkStateReader.getLeaderProps(DEFAULT_COLLECTION, "shard" + j, 10000); + } // make sure we again have leaders for each shard + + waitForRecoveriesToFinish(false); + del("*:*"); indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men" @@ -180,74 +200,74 @@ public class BasicDistributedZkTest exte // random value sort for (String f : fieldNames) { - query("q","*:*", "sort",f+" desc"); - query("q","*:*", "sort",f+" asc"); + query(false, new String[] {"q","*:*", "sort",f+" desc"}); + query(false, new String[] {"q","*:*", "sort",f+" asc"}); } // these queries should be exactly ordered and scores should exactly match - query("q","*:*", "sort",i1+" desc"); - query("q","*:*", "sort",i1+" asc"); - query("q","*:*", "sort",i1+" desc", "fl","*,score"); - query("q","*:*", "sort","n_tl1 asc", "fl","*,score"); - query("q","*:*", "sort","n_tl1 desc"); + query(false, new String[] {"q","*:*", "sort",i1+" desc"}); + query(false, new String[] {"q","*:*", "sort",i1+" asc"}); + query(false, new String[] {"q","*:*", "sort",i1+" desc", "fl","*,score"}); + query(false, new String[] {"q","*:*", "sort","n_tl1 asc", "fl","*,score"}); + query(false, new String[] {"q","*:*", "sort","n_tl1 desc"}); handle.put("maxScore", SKIPVAL); - query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() + query(false, new String[] {"q","{!func}"+i1});// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() //is agnostic of request params. handle.remove("maxScore"); - query("q","{!func}"+i1, "fl","*,score"); // even scores should match exactly here + query(false, new String[] {"q","{!func}"+i1, "fl","*,score"}); // even scores should match exactly here handle.put("highlighting", UNORDERED); handle.put("response", UNORDERED); handle.put("maxScore", SKIPVAL); - query("q","quick"); - query("q","all","fl","id","start","0"); - query("q","all","fl","foofoofoo","start","0"); // no fields in returned docs - query("q","all","fl","id","start","100"); + query(false, new String[] {"q","quick"}); + query(false, new String[] {"q","all","fl","id","start","0"}); + query(false, new String[] {"q","all","fl","foofoofoo","start","0"}); // no fields in returned docs + query(false, new String[] {"q","all","fl","id","start","100"}); handle.put("score", SKIPVAL); - query("q","quick","fl","*,score"); - query("q","all","fl","*,score","start","1"); - query("q","all","fl","*,score","start","100"); - - query("q","now their fox sat had put","fl","*,score", - "hl","true","hl.fl",t1); - - query("q","now their fox sat had put","fl","foofoofoo", - "hl","true","hl.fl",t1); - - query("q","matchesnothing","fl","*,score"); - - query("q","*:*", "rows",100, "facet","true", "facet.field",t1); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1); - query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2); + query(false, new String[] {"q","quick","fl","*,score"}); + query(false, new String[] {"q","all","fl","*,score","start","1"}); + query(false, new String[] {"q","all","fl","*,score","start","100"}); + + query(false, new String[] {"q","now their fox sat had put","fl","*,score", + "hl","true","hl.fl",t1}); + + query(false, new String[] {"q","now their fox sat had put","fl","foofoofoo", + "hl","true","hl.fl",t1}); + + query(false, new String[] {"q","matchesnothing","fl","*,score"}); + + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1}); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2}); // test faceting multiple things at once - query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*" - ,"facet.field",t1); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*" + ,"facet.field",t1}); // test filter tagging, facet exclusion, and naming (multi-select facet support) - query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*" + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*" ,"facet.field","{!key=mykey ex=a}"+t1 ,"facet.field","{!key=other ex=b}"+t1 ,"facet.field","{!key=again ex=a,b}"+t1 ,"facet.field",t1 - ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]" + ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]"} ); - query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"); + query(false, new Object[] {"q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"}); // test field that is valid in schema but missing in all shards - query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2}); // test field that is valid in schema and missing in some shards - query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2); + query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2}); - query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1); + query(false, new Object[] {"q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1}); /*** TODO: the failure may come back in "exception" try { @@ -263,9 +283,9 @@ public class BasicDistributedZkTest exte // This makes it much more likely that we may not get the top facet values and hence // we turn of that checking. handle.put("facet_fields", SKIPVAL); - query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5); + query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5}); // check a complex key name - query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5); + query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5}); handle.remove("facet_fields"); @@ -277,20 +297,20 @@ public class BasicDistributedZkTest exte index_specific(i, id,100, i1, 107 ,t1,"oh no, a duplicate!"); } commit(); - query("q","duplicate", "hl","true", "hl.fl", t1); - query("q","fox duplicate horses", "hl","true", "hl.fl", t1); - query("q","*:*", "rows",100); + query(false, new Object[] {"q","duplicate", "hl","true", "hl.fl", t1}); + query(false, new Object[] {"q","fox duplicate horses", "hl","true", "hl.fl", t1}); + query(false, new Object[] {"q","*:*", "rows",100}); } // test debugging handle.put("explain", SKIPVAL); handle.put("debug", UNORDERED); handle.put("time", SKIPVAL); - query("q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true"); - query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true"); - query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING); - query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS); - query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY); + query(false, new Object[] {"q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true"}); + query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true"}); + query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING}); + query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS}); + query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY}); // TODO: This test currently fails because debug info is obtained only // on shards with matches. @@ -343,9 +363,15 @@ public class BasicDistributedZkTest exte SolrRequest request = new QueryRequest(params); request.setPath("/admin/collections"); - clients.get(clientIndex).request(request); + final String baseUrl = ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().substring( + 0, + ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().length() + - DEFAULT_COLLECTION.length() - 1); + + createNewSolrServer("", baseUrl).request(request); } + Set>> collectionInfosEntrySet = collectionInfos.entrySet(); for (Entry> entry : collectionInfosEntrySet) { String collection = entry.getKey(); @@ -357,7 +383,11 @@ public class BasicDistributedZkTest exte HttpSolrServer collectionClient = new HttpSolrServer(url); // poll for a second - it can take a moment before we are ready to serve - waitForNon404or503(collectionClient); + waitForNon403or404or503(collectionClient); + } + + for (int i = 0; i < cnt; i++) { + waitForRecoveriesToFinish("awholynewcollection_" + i, zkStateReader, false); } List collectionNameList = new ArrayList(); @@ -400,7 +430,12 @@ public class BasicDistributedZkTest exte request.setPath("/admin/collections"); // we can use this client because we just want base url - clients.get(0).request(request); + final String baseUrl = ((HttpSolrServer) clients.get(0)).getBaseURL().substring( + 0, + ((HttpSolrServer) clients.get(0)).getBaseURL().length() + - DEFAULT_COLLECTION.length() - 1); + + createNewSolrServer("", baseUrl).request(request); // reloads make take a short while boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore); @@ -413,7 +448,7 @@ public class BasicDistributedZkTest exte request = new QueryRequest(params); request.setPath("/admin/collections"); - clients.get(0).request(request); + createNewSolrServer("", baseUrl).request(request); // ensure its out of the state checkForMissingCollection(collectionName); @@ -502,7 +537,7 @@ public class BasicDistributedZkTest exte throw new RuntimeException("Could not find a live node for collection:" + collection); } - private void waitForNon404or503(HttpSolrServer collectionClient) + private void waitForNon403or404or503(HttpSolrServer collectionClient) throws Exception { SolrException exp = null; long timeoutAt = System.currentTimeMillis() + 30000; @@ -513,7 +548,7 @@ public class BasicDistributedZkTest exte try { collectionClient.query(new SolrQuery("*:*")); } catch (SolrException e) { - if (!(e.code() == 403 || e.code() == 503)) { + if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) { throw e; } exp = e; @@ -535,7 +570,6 @@ public class BasicDistributedZkTest exte boolean found = false; boolean sliceMatch = false; while (System.currentTimeMillis() < timeoutAt) { - solrj.getZkStateReader().updateClusterState(true); ClusterState clusterState = solrj.getZkStateReader().getClusterState(); Map> collections = clusterState .getCollectionStates(); @@ -544,35 +578,16 @@ public class BasicDistributedZkTest exte // did we find expectedSlices slices/shards? if (slices.size() == expectedSlices) { sliceMatch = true; - found = true; - // also make sure each are active - Iterator> it = slices.entrySet().iterator(); - while (it.hasNext()) { - Entry sliceEntry = it.next(); - Map sliceShards = sliceEntry.getValue() - .getReplicasMap(); - Iterator> shardIt = sliceShards - .entrySet().iterator(); - while (shardIt.hasNext()) { - Entry shardEntry = shardIt.next(); - if (!shardEntry.getValue().getStr(ZkStateReader.STATE_PROP) - .equals(ZkStateReader.ACTIVE)) { - found = false; - break; - } - } - } - if (found) break; + } + found = true; + break; } - Thread.sleep(100); + Thread.sleep(500); } if (!found) { - printLayout(); if (!sliceMatch) { fail("Could not find new " + expectedSlices + " slice collection called " + collectionName); - } else { - fail("Found expected # of slices, but some nodes are not active for collection called " + collectionName); } } } @@ -648,6 +663,8 @@ public class BasicDistributedZkTest exte // cloud level test mainly needed just to make sure that versions and errors are propagated correctly private void doOptimisticLockingAndUpdating() throws Exception { + printLayout(); + SolrInputDocument sd = sdoc("id", 1000, "_version_", -1); indexDoc(sd); @@ -717,10 +734,14 @@ public class BasicDistributedZkTest exte } private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception { + System.clearProperty("numShards"); List collectionClients = new ArrayList(); SolrServer client = clients.get(0); otherCollectionClients.put(oneInstanceCollection2, collectionClients); - String baseUrl = ((HttpSolrServer) client).getBaseURL(); + final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring( + 0, + ((HttpSolrServer) client).getBaseURL().length() + - DEFAULT_COLLECTION.length() - 1); createCollection(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1"); createCollection(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2"); createCollection(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2"); @@ -743,6 +764,8 @@ public class BasicDistributedZkTest exte assertAllActive(oneInstanceCollection2, solrj.getZkStateReader()); + printLayout(); + // TODO: enable when we don't falsely get slice1... // solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000); // solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000); @@ -784,7 +807,10 @@ public class BasicDistributedZkTest exte private void testSearchByCollectionName() throws SolrServerException { SolrServer client = clients.get(0); - String baseUrl = ((HttpSolrServer) client).getBaseURL(); + final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring( + 0, + ((HttpSolrServer) client).getBaseURL().length() + - DEFAULT_COLLECTION.length() - 1); // the cores each have different names, but if we add the collection name to the url // we should get mapped to the right core @@ -798,7 +824,10 @@ public class BasicDistributedZkTest exte List collectionClients = new ArrayList(); SolrServer client = clients.get(0); otherCollectionClients.put(oneInstanceCollection , collectionClients); - String baseUrl = ((HttpSolrServer) client).getBaseURL(); + final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring( + 0, + ((HttpSolrServer) client).getBaseURL().length() + - DEFAULT_COLLECTION.length() - 1); createCollection(oneInstanceCollection, collectionClients, baseUrl, 1); createCollection(oneInstanceCollection, collectionClients, baseUrl, 2); createCollection(oneInstanceCollection, collectionClients, baseUrl, 3); @@ -961,13 +990,17 @@ public class BasicDistributedZkTest exte int unique = 0; for (final SolrServer client : clients) { unique++; + final String baseUrl = ((HttpSolrServer) client).getBaseURL() + .substring( + 0, + ((HttpSolrServer) client).getBaseURL().length() + - DEFAULT_COLLECTION.length() -1); final int frozeUnique = unique; Callable call = new Callable() { public Object call() { HttpSolrServer server; try { - server = new HttpSolrServer( - ((HttpSolrServer) client).getBaseURL()); + server = new HttpSolrServer(baseUrl); Create createCmd = new Create(); createCmd.setCoreName(collection); @@ -983,8 +1016,7 @@ public class BasicDistributedZkTest exte } }; - collectionClients.add(createNewSolrServer(collection, - ((HttpSolrServer) client).getBaseURL())); + collectionClients.add(createNewSolrServer(collection, baseUrl)); pending.add(completionService.submit(call)); while (pending != null && pending.size() > 0) { @@ -1043,6 +1075,7 @@ public class BasicDistributedZkTest exte if (solrj != null) { solrj.shutdown(); } + System.clearProperty("numShards"); System.clearProperty("zkHost"); } } Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java (original) +++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java Sat Sep 15 17:39:28 2012 @@ -373,6 +373,10 @@ public abstract class BaseDistributedSea } protected void query(Object... q) throws Exception { + query(true, q); + } + + protected void query(boolean setDistribParams, Object[] q) throws Exception { final ModifiableSolrParams params = new ModifiableSolrParams(); @@ -385,7 +389,7 @@ public abstract class BaseDistributedSea validateControlData(controlRsp); params.remove("distrib"); - setDistributedParams(params); + if (setDistribParams) setDistributedParams(params); QueryResponse rsp = queryServer(params); Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java (original) +++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java Sat Sep 15 17:39:28 2012 @@ -128,6 +128,7 @@ public abstract class AbstractDistribZkT zkStateReader.updateClusterState(true); ClusterState clusterState = zkStateReader.getClusterState(); Map slices = clusterState.getSlices(collection); + assertNotNull("Could not find collection:" + collection, slices); for (Map.Entry entry : slices.entrySet()) { Map shards = entry.getValue().getReplicasMap(); for (Map.Entry shard : shards.entrySet()) { Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java?rev=1385136&r1=1385135&r2=1385136&view=diff ============================================================================== --- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java (original) +++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java Sat Sep 15 17:39:28 2012 @@ -288,8 +288,13 @@ public abstract class AbstractFullDistri StringBuilder sb = new StringBuilder(); for (int i = 1; i <= numJettys; i++) { if (sb.length() > 0) sb.append(','); - JettySolrRunner j = createJetty(new File(getSolrHome()), testDir + "/jetty" - + this.jettyIntCntr.incrementAndGet(), null, "solrconfig.xml", null); + int cnt = this.jettyIntCntr.incrementAndGet(); + File jettyDir = new File(TEMP_DIR, + "solrtest-" + "jetty" + cnt + "-" + System.currentTimeMillis()); + jettyDir.mkdirs(); + org.apache.commons.io.FileUtils.copyDirectory(new File(getSolrHome()), jettyDir); + JettySolrRunner j = createJetty(jettyDir, testDir + "/jetty" + + cnt, null, "solrconfig.xml", null); jettys.add(j); SolrServer client = createNewSolrServer(j.getLocalPort()); clients.add(client);