lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r1649347 [14/31] - in /lucene/dev/branches/lucene6005: ./ dev-tools/ dev-tools/idea/solr/contrib/dataimporthandler-extras/ dev-tools/idea/solr/contrib/extraction/ dev-tools/idea/solr/contrib/map-reduce/ dev-tools/idea/solr/contrib/velocity/...
Date Sun, 04 Jan 2015 14:53:21 GMT
Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/schema.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/schema.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/schema.xml Sun Jan  4 14:53:12 2015
@@ -264,7 +264,7 @@
 
 
     <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
-    <fieldtype name="binary" class="solr.BinaryField"/>
+    <fieldType name="binary" class="solr.BinaryField"/>
 
     <!-- The "RandomSortField" is not used to store or search any
          data.  You can declare fields of this type it in your schema
@@ -496,14 +496,14 @@
       </analyzer>
     </fieldType>
     
-    <fieldtype name="phonetic" stored="false" indexed="true" class="solr.TextField" >
+    <fieldType name="phonetic" stored="false" indexed="true" class="solr.TextField" >
       <analyzer>
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
-    <fieldtype name="payloads" stored="false" indexed="true" class="solr.TextField" >
+    <fieldType name="payloads" stored="false" indexed="true" class="solr.TextField" >
       <analyzer>
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!--
@@ -519,7 +519,7 @@
          -->
         <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
     <!-- lowercases the entire field value, keeping it as a single token.  -->
     <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
@@ -556,7 +556,7 @@
 
     <!-- since fields of this type are by default not stored or indexed,
          any data added to them will be ignored outright.  --> 
-    <fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+    <fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
 
     <!-- This point type indexes the coordinates as separate fields (subFields)
       If subFieldType is defined, it references a type, and a dynamic field

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml Sun Jan  4 14:53:12 2015
@@ -140,7 +140,7 @@
        index format, but hooks into the schema to provide per-field customization of
        the postings lists and per-document values in the fieldType element
        (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, its a good
+       are experimental, so if you choose to customize the index format, it's a good
        idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
        before upgrading to a newer version to avoid unnecessary reindexing.
   -->
@@ -866,104 +866,6 @@
      </lst>
   </requestHandler>
 
- 
-  <!-- A Robust Example 
-       
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-
-       <!-- VelocityResponseWriter settings -->
-       <str name="wt">velocity</str>
-       <str name="v.template">browse</str>
-       <str name="v.layout">layout</str>
-       <str name="title">Solritas</str>
-
-       <!-- Query settings -->
-       <str name="defType">edismax</str>
-       <str name="qf">
-          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="df">text</str>
-       <str name="mm">100%</str>
-       <str name="q.alt">*:*</str>
-       <str name="rows">10</str>
-       <str name="fl">*,score</str>
-
-       <str name="mlt.qf">
-         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
-       <int name="mlt.count">3</int>
-
-       <!-- Faceting defaults -->
-       <str name="facet">on</str>
-       <str name="facet.field">cat</str>
-       <str name="facet.field">manu_exact</str>
-       <str name="facet.field">content_type</str>
-       <str name="facet.field">author_s</str>
-       <str name="facet.query">ipod</str>
-       <str name="facet.query">GB</str>
-       <str name="facet.mincount">1</str>
-       <str name="facet.pivot">cat,inStock</str>
-       <str name="facet.range.other">after</str>
-       <str name="facet.range">price</str>
-       <int name="f.price.facet.range.start">0</int>
-       <int name="f.price.facet.range.end">600</int>
-       <int name="f.price.facet.range.gap">50</int>
-       <str name="facet.range">popularity</str>
-       <int name="f.popularity.facet.range.start">0</int>
-       <int name="f.popularity.facet.range.end">10</int>
-       <int name="f.popularity.facet.range.gap">3</int>
-       <str name="facet.range">manufacturedate_dt</str>
-       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
-       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
-       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
-       <str name="f.manufacturedate_dt.facet.range.other">before</str>
-       <str name="f.manufacturedate_dt.facet.range.other">after</str>
-
-       <!-- Highlighting defaults -->
-       <str name="hl">on</str>
-       <str name="hl.fl">content features title name</str>
-       <str name="hl.encoder">html</str>
-       <str name="hl.simple.pre">&lt;b&gt;</str>
-       <str name="hl.simple.post">&lt;/b&gt;</str>
-       <str name="f.title.hl.fragsize">0</str>
-       <str name="f.title.hl.alternateField">title</str>
-       <str name="f.name.hl.fragsize">0</str>
-       <str name="f.name.hl.alternateField">name</str>
-       <str name="f.content.hl.snippets">3</str>
-       <str name="f.content.hl.fragsize">200</str>
-       <str name="f.content.hl.alternateField">content</str>
-       <str name="f.content.hl.maxAlternateFieldLength">750</str>
-
-       <!-- Spell checking defaults -->
-       <str name="spellcheck">on</str>
-       <str name="spellcheck.extendedResults">false</str>       
-       <str name="spellcheck.count">5</str>
-       <str name="spellcheck.alternativeTermCount">2</str>
-       <str name="spellcheck.maxResultsForSuggest">5</str>       
-       <str name="spellcheck.collate">true</str>
-       <str name="spellcheck.collateExtendedResults">true</str>  
-       <str name="spellcheck.maxCollationTries">5</str>
-       <str name="spellcheck.maxCollations">3</str>           
-     </lst>
-
-     <!-- append spellchecking to our list of components -->
-     <arr name="last-components">
-       <str>spellcheck</str>
-     </arr>
-  </requestHandler>
-
 
   <!-- Field Analysis Request Handler
 

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/schema.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/schema.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/schema.xml Sun Jan  4 14:53:12 2015
@@ -216,7 +216,7 @@
 
 
     <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
-    <fieldtype name="binary" class="solr.BinaryField"/>
+    <fieldType name="binary" class="solr.BinaryField"/>
 
     <!-- The "RandomSortField" is not used to store or search any
          data.  You can declare fields of this type it in your schema
@@ -449,15 +449,15 @@
     </fieldType>
 
     <!--    
-    <fieldtype name="phonetic" stored="false" indexed="true" class="solr.TextField" >
+    <fieldType name="phonetic" stored="false" indexed="true" class="solr.TextField" >
       <analyzer>
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
       </analyzer>
-    </fieldtype>
+    </fieldType>
     -->
     
-    <fieldtype name="payloads" stored="false" indexed="true" class="solr.TextField" >
+    <fieldType name="payloads" stored="false" indexed="true" class="solr.TextField" >
       <analyzer>
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!--
@@ -473,7 +473,7 @@
          -->
         <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
     <!-- lowercases the entire field value, keeping it as a single token.  -->
     <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
@@ -510,7 +510,7 @@
 
     <!-- since fields of this type are by default not stored or indexed,
          any data added to them will be ignored outright.  --> 
-    <fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+    <fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
 
     <!-- This point type indexes the coordinates as separate fields (subFields)
       If subFieldType is defined, it references a type, and a dynamic field
@@ -532,7 +532,7 @@
     A Geohash is a compact representation of a latitude longitude pair in a single field.
     See http://wiki.apache.org/solr/SpatialSearch
    -->
-    <fieldtype name="geohash" class="solr.GeoHashField"/>
+    <fieldType name="geohash" class="solr.GeoHashField"/>
 
    <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType
         Parameters:

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml Sun Jan  4 14:53:12 2015
@@ -122,7 +122,7 @@
        index format, but hooks into the schema to provide per-field customization of
        the postings lists and per-document values in the fieldType element
        (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, its a good
+       are experimental, so if you choose to customize the index format, it's a good
        idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
        before upgrading to a newer version to avoid unnecessary reindexing.
   -->
@@ -843,104 +843,6 @@
      </lst>
   </requestHandler>
 
- 
-  <!-- A Robust Example 
-       
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-
-       <!-- VelocityResponseWriter settings -->
-       <str name="wt">velocity</str>
-       <str name="v.template">browse</str>
-       <str name="v.layout">layout</str>
-       <str name="title">Solritas</str>
-
-       <!-- Query settings -->
-       <str name="defType">edismax</str>
-       <str name="qf">
-          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="df">text</str>
-       <str name="mm">100%</str>
-       <str name="q.alt">*:*</str>
-       <str name="rows">10</str>
-       <str name="fl">*,score</str>
-
-       <str name="mlt.qf">
-         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
-       <int name="mlt.count">3</int>
-
-       <!-- Faceting defaults -->
-       <str name="facet">on</str>
-       <str name="facet.field">cat</str>
-       <str name="facet.field">manu_exact</str>
-       <str name="facet.field">content_type</str>
-       <str name="facet.field">author_s</str>
-       <str name="facet.query">ipod</str>
-       <str name="facet.query">GB</str>
-       <str name="facet.mincount">1</str>
-       <str name="facet.pivot">cat,inStock</str>
-       <str name="facet.range.other">after</str>
-       <str name="facet.range">price</str>
-       <int name="f.price.facet.range.start">0</int>
-       <int name="f.price.facet.range.end">600</int>
-       <int name="f.price.facet.range.gap">50</int>
-       <str name="facet.range">popularity</str>
-       <int name="f.popularity.facet.range.start">0</int>
-       <int name="f.popularity.facet.range.end">10</int>
-       <int name="f.popularity.facet.range.gap">3</int>
-       <str name="facet.range">manufacturedate_dt</str>
-       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
-       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
-       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
-       <str name="f.manufacturedate_dt.facet.range.other">before</str>
-       <str name="f.manufacturedate_dt.facet.range.other">after</str>
-
-       <!-- Highlighting defaults -->
-       <str name="hl">on</str>
-       <str name="hl.fl">content features title name</str>
-       <str name="hl.encoder">html</str>
-       <str name="hl.simple.pre">&lt;b&gt;</str>
-       <str name="hl.simple.post">&lt;/b&gt;</str>
-       <str name="f.title.hl.fragsize">0</str>
-       <str name="f.title.hl.alternateField">title</str>
-       <str name="f.name.hl.fragsize">0</str>
-       <str name="f.name.hl.alternateField">name</str>
-       <str name="f.content.hl.snippets">3</str>
-       <str name="f.content.hl.fragsize">200</str>
-       <str name="f.content.hl.alternateField">content</str>
-       <str name="f.content.hl.maxAlternateFieldLength">750</str>
-
-       <!-- Spell checking defaults -->
-       <str name="spellcheck">on</str>
-       <str name="spellcheck.extendedResults">false</str>       
-       <str name="spellcheck.count">5</str>
-       <str name="spellcheck.alternativeTermCount">2</str>
-       <str name="spellcheck.maxResultsForSuggest">5</str>       
-       <str name="spellcheck.collate">true</str>
-       <str name="spellcheck.collateExtendedResults">true</str>  
-       <str name="spellcheck.maxCollationTries">5</str>
-       <str name="spellcheck.maxCollations">3</str>           
-     </lst>
-
-     <!-- append spellchecking to our list of components -->
-     <arr name="last-components">
-       <str>spellcheck</str>
-     </arr>
-  </requestHandler>
-
 
   <!-- Update Request Handler.  
        
@@ -1101,7 +1003,7 @@
 
        http://wiki.apache.org/solr/SolrReplication 
 
-       It is also neccessary for SolrCloud to function (in Cloud mode, the 
+       It is also necessary for SolrCloud to function (in Cloud mode, the
        replication handler is used to bulk transfer segments when nodes 
        are added or need to recover).
 
@@ -1110,7 +1012,7 @@
   <requestHandler name="/replication" class="solr.ReplicationHandler" > 
     <!--
        To enable simple master/slave replication, uncomment one of the 
-       sections below, depending on wether this solr instance should be 
+       sections below, depending on whether this solr instance should be
        the "master" or a "slave".  If this instance is a "slave" you will 
        also need to fill in the masterUrl to point to a real machine.
     -->

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml Sun Jan  4 14:53:12 2015
@@ -141,7 +141,7 @@
        index format, but hooks into the schema to provide per-field customization of
        the postings lists and per-document values in the fieldType element
        (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, its a good
+       are experimental, so if you choose to customize the index format, it's a good
        idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
        before upgrading to a newer version to avoid unnecessary reindexing.
   -->
@@ -865,103 +865,6 @@
      </lst>
   </requestHandler>
 
- 
-  <!-- A Robust Example 
-       
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-
-       <!-- VelocityResponseWriter settings -->
-       <str name="wt">velocity</str>
-       <str name="v.template">browse</str>
-       <str name="v.layout">layout</str>
-       <str name="title">Solritas</str>
-
-       <!-- Query settings -->
-       <str name="defType">edismax</str>
-       <str name="qf">
-          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="df">text</str>
-       <str name="mm">100%</str>
-       <str name="q.alt">*:*</str>
-       <str name="rows">10</str>
-       <str name="fl">*,score</str>
-
-       <str name="mlt.qf">
-         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
-       <int name="mlt.count">3</int>
-
-       <!-- Faceting defaults -->
-       <str name="facet">on</str>
-       <str name="facet.field">cat</str>
-       <str name="facet.field">manu_exact</str>
-       <str name="facet.field">content_type</str>
-       <str name="facet.field">author_s</str>
-       <str name="facet.query">ipod</str>
-       <str name="facet.query">GB</str>
-       <str name="facet.mincount">1</str>
-       <str name="facet.pivot">cat,inStock</str>
-       <str name="facet.range.other">after</str>
-       <str name="facet.range">price</str>
-       <int name="f.price.facet.range.start">0</int>
-       <int name="f.price.facet.range.end">600</int>
-       <int name="f.price.facet.range.gap">50</int>
-       <str name="facet.range">popularity</str>
-       <int name="f.popularity.facet.range.start">0</int>
-       <int name="f.popularity.facet.range.end">10</int>
-       <int name="f.popularity.facet.range.gap">3</int>
-       <str name="facet.range">manufacturedate_dt</str>
-       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
-       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
-       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
-       <str name="f.manufacturedate_dt.facet.range.other">before</str>
-       <str name="f.manufacturedate_dt.facet.range.other">after</str>
-
-       <!-- Highlighting defaults -->
-       <str name="hl">on</str>
-       <str name="hl.fl">content features title name</str>
-       <str name="hl.encoder">html</str>
-       <str name="hl.simple.pre">&lt;b&gt;</str>
-       <str name="hl.simple.post">&lt;/b&gt;</str>
-       <str name="f.title.hl.fragsize">0</str>
-       <str name="f.title.hl.alternateField">title</str>
-       <str name="f.name.hl.fragsize">0</str>
-       <str name="f.name.hl.alternateField">name</str>
-       <str name="f.content.hl.snippets">3</str>
-       <str name="f.content.hl.fragsize">200</str>
-       <str name="f.content.hl.alternateField">content</str>
-       <str name="f.content.hl.maxAlternateFieldLength">750</str>
-
-       <!-- Spell checking defaults -->
-       <str name="spellcheck">on</str>
-       <str name="spellcheck.extendedResults">false</str>       
-       <str name="spellcheck.count">5</str>
-       <str name="spellcheck.alternativeTermCount">2</str>
-       <str name="spellcheck.maxResultsForSuggest">5</str>       
-       <str name="spellcheck.collate">true</str>
-       <str name="spellcheck.collateExtendedResults">true</str>  
-       <str name="spellcheck.maxCollationTries">5</str>
-       <str name="spellcheck.maxCollations">3</str>           
-     </lst>
-
-     <!-- append spellchecking to our list of components -->
-     <arr name="last-components">
-       <str>spellcheck</str>
-     </arr>
-  </requestHandler>
 
   <!-- Field Analysis Request Handler
 

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/test-morphlines/tutorialReadAvroContainer.conf
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/test-morphlines/tutorialReadAvroContainer.conf?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/test-morphlines/tutorialReadAvroContainer.conf (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test-files/test-morphlines/tutorialReadAvroContainer.conf Sun Jan  4 14:53:12 2015
@@ -34,7 +34,7 @@ SOLR_LOCATOR : {
 # transformation chain. A morphline consists of one or more (potentially 
 # nested) commands. A morphline is a way to consume records (e.g. Flume events, 
 # HDFS files or blocks), turn them into a stream of records, and pipe the stream 
-# of records through a set of easily configurable transformations on it's way to 
+# of records through a set of easily configurable transformations on its way to 
 # Solr.
 morphlines : [
   {

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/AbstractSolrMorphlineTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/AbstractSolrMorphlineTestBase.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/AbstractSolrMorphlineTestBase.java (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/AbstractSolrMorphlineTestBase.java Sun Jan  4 14:53:12 2015
@@ -34,9 +34,9 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.io.FileUtils;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.XMLResponseParser;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -65,7 +65,7 @@ public class AbstractSolrMorphlineTestBa
   private static Locale savedLocale;
   protected Collector collector;
   protected Command morphline;
-  protected SolrServer solrServer;
+  protected SolrClient solrClient;
   protected DocumentLoader testServer;
   
   protected static final boolean TEST_WITH_EMBEDDED_SOLR_SERVER = true;
@@ -119,19 +119,19 @@ public class AbstractSolrMorphlineTestBa
     if (EXTERNAL_SOLR_SERVER_URL != null) {
       //solrServer = new ConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
       //solrServer = new SafeConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
-      solrServer = new HttpSolrServer(EXTERNAL_SOLR_SERVER_URL);
-      ((HttpSolrServer)solrServer).setParser(new XMLResponseParser());
+      solrClient = new HttpSolrClient(EXTERNAL_SOLR_SERVER_URL);
+      ((HttpSolrClient) solrClient).setParser(new XMLResponseParser());
     } else {
       if (TEST_WITH_EMBEDDED_SOLR_SERVER) {
-        solrServer = new EmbeddedTestSolrServer(h.getCoreContainer(), "");
+        solrClient = new EmbeddedTestSolrServer(h.getCoreContainer(), "");
       } else {
         throw new RuntimeException("Not yet implemented");
-        //solrServer = new TestSolrServer(getSolrServer());
+        //solrServer = new TestSolrServer(getSolrClient());
       }
     }
 
     int batchSize = SEQ_NUM2.incrementAndGet() % 2 == 0 ? 100 : 1; //SolrInspector.DEFAULT_SOLR_SERVER_BATCH_SIZE : 1;
-    testServer = new SolrServerDocumentLoader(solrServer, batchSize);
+    testServer = new SolrClientDocumentLoader(solrClient, batchSize);
     deleteAllDocuments();
     
     tempDir = createTempDir().toFile().getAbsolutePath();
@@ -140,8 +140,8 @@ public class AbstractSolrMorphlineTestBa
   @After
   public void tearDown() throws Exception {
     collector = null;
-    solrServer.shutdown();
-    solrServer = null;
+    solrClient.shutdown();
+    solrClient = null;
     super.tearDown();
   }
 
@@ -201,8 +201,8 @@ public class AbstractSolrMorphlineTestBa
 //    return collector.getRecords().size();
     try {
       testServer.commitTransaction();
-      solrServer.commit(false, true, true);
-      QueryResponse rsp = solrServer.query(new SolrQuery(query).setRows(Integer.MAX_VALUE));
+      solrClient.commit(false, true, true);
+      QueryResponse rsp = solrClient.query(new SolrQuery(query).setRows(Integer.MAX_VALUE));
       LOGGER.debug("rsp: {}", rsp);
       int i = 0;
       for (SolrDocument doc : rsp.getResults()) {
@@ -217,7 +217,7 @@ public class AbstractSolrMorphlineTestBa
   
   private void deleteAllDocuments() throws SolrServerException, IOException {
     collector.reset();
-    SolrServer s = solrServer;
+    SolrClient s = solrClient;
     s.deleteByQuery("*:*"); // delete everything!
     s.commit();
   }
@@ -255,7 +255,7 @@ public class AbstractSolrMorphlineTestBa
 
   protected void testDocumentContent(HashMap<String, ExpectedResult> expectedResultMap)
   throws Exception {
-    QueryResponse rsp = solrServer.query(new SolrQuery("*:*").setRows(Integer.MAX_VALUE));
+    QueryResponse rsp = solrClient.query(new SolrQuery("*:*").setRows(Integer.MAX_VALUE));
     // Check that every expected field/values shows up in the actual query
     for (Entry<String, ExpectedResult> current : expectedResultMap.entrySet()) {
       String field = current.getKey();

Modified: lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/EmbeddedTestSolrServer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/EmbeddedTestSolrServer.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/EmbeddedTestSolrServer.java (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/EmbeddedTestSolrServer.java Sun Jan  4 14:53:12 2015
@@ -16,13 +16,13 @@
  */
 package org.apache.solr.morphlines.solr;
 
-import java.io.IOException;
-
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
 import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.core.CoreContainer;
 
+import java.io.IOException;
+
 /**
  * An EmbeddedSolrServer that supresses close and rollback requests as
  * necessary for testing

Modified: lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml Sun Jan  4 14:53:12 2015
@@ -76,7 +76,7 @@
       Binary data type. The data should be sent/retrieved in as Base64
       encoded Strings
     -->
-    <fieldtype name="binary" class="solr.BinaryField" />
+    <fieldType name="binary" class="solr.BinaryField" />
 
     <!--
       If sortMissingLast="true", then a sort on this field will cause
@@ -368,15 +368,15 @@
       </analyzer>
     </fieldType>
 
-    <fieldtype name="phonetic" stored="false" indexed="true"
+    <fieldType name="phonetic" stored="false" indexed="true"
       class="solr.TextField">
       <analyzer>
         <tokenizer class="solr.StandardTokenizerFactory" />
         <filter class="solr.DoubleMetaphoneFilterFactory" inject="false" />
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
-    <fieldtype name="payloads" stored="false" indexed="true"
+    <fieldType name="payloads" stored="false" indexed="true"
       class="solr.TextField">
       <analyzer>
         <tokenizer class="solr.MockTokenizerFactory" />
@@ -395,7 +395,7 @@
         <filter class="solr.DelimitedPayloadTokenFilterFactory"
           encoder="float" />
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
     <!--
       lowercases the entire field value, keeping it as a single token.
@@ -413,7 +413,7 @@
       since fields of this type are by default not stored or indexed,
       any data added to them will be ignored outright.
     -->
-    <fieldtype name="ignored" stored="false" indexed="false"
+    <fieldType name="ignored" stored="false" indexed="false"
       multiValued="true" class="solr.StrField" />
 
   </types>

Modified: lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml Sun Jan  4 14:53:12 2015
@@ -398,7 +398,7 @@
 
   <!--
     DisMaxRequestHandler allows easy searching across multiple fields
-    for simple user-entered phrases. It's implementation is now just the
+    for simple user-entered phrases. Its implementation is now just the
     standard SearchHandler with a default query parser of "dismax". see
     http://wiki.apache.org/solr/DisMaxRequestHandler
   -->

Modified: lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml Sun Jan  4 14:53:12 2015
@@ -76,7 +76,7 @@
       Binary data type. The data should be sent/retrieved in as Base64
       encoded Strings
     -->
-    <fieldtype name="binary" class="solr.BinaryField" />
+    <fieldType name="binary" class="solr.BinaryField" />
 
     <!--
       If sortMissingLast="true", then a sort on this field will cause
@@ -364,15 +364,15 @@
       </analyzer>
     </fieldType>
 
-    <fieldtype name="phonetic" stored="false" indexed="true"
+    <fieldType name="phonetic" stored="false" indexed="true"
       class="solr.TextField">
       <analyzer>
         <tokenizer class="solr.StandardTokenizerFactory" />
         <filter class="solr.DoubleMetaphoneFilterFactory" inject="false" />
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
-    <fieldtype name="payloads" stored="false" indexed="true"
+    <fieldType name="payloads" stored="false" indexed="true"
       class="solr.TextField">
       <analyzer>
         <tokenizer class="solr.MockTokenizerFactory" />
@@ -391,7 +391,7 @@
         <filter class="solr.DelimitedPayloadTokenFilterFactory"
           encoder="float" />
       </analyzer>
-    </fieldtype>
+    </fieldType>
 
     <!--
       lowercases the entire field value, keeping it as a single token.
@@ -409,7 +409,7 @@
       since fields of this type are by default not stored or indexed,
       any data added to them will be ignored outright.
     -->
-    <fieldtype name="ignored" stored="false" indexed="false"
+    <fieldType name="ignored" stored="false" indexed="false"
       multiValued="true" class="solr.StrField" />
 
   </types>

Modified: lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml Sun Jan  4 14:53:12 2015
@@ -397,7 +397,7 @@
 
   <!--
     DisMaxRequestHandler allows easy searching across multiple fields
-    for simple user-entered phrases. It's implementation is now just the
+    for simple user-entered phrases. Its implementation is now just the
     standard SearchHandler with a default query parser of "dismax". see
     http://wiki.apache.org/solr/DisMaxRequestHandler
   -->

Modified: lucene/dev/branches/lucene6005/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java (original)
+++ lucene/dev/branches/lucene6005/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java Sun Jan  4 14:53:12 2015
@@ -35,6 +35,7 @@ import org.apache.velocity.VelocityConte
 import org.apache.velocity.app.VelocityEngine;
 import org.apache.velocity.runtime.RuntimeConstants;
 import org.apache.velocity.tools.generic.ComparisonDateTool;
+import org.apache.velocity.tools.generic.DisplayTool;
 import org.apache.velocity.tools.generic.EscapeTool;
 import org.apache.velocity.tools.generic.ListTool;
 import org.apache.velocity.tools.generic.MathTool;
@@ -87,6 +88,7 @@ public class VelocityResponseWriter impl
     context.put("math", new MathTool());
     context.put("number", new NumberTool());
     context.put("sort", new SortTool());
+    context.put("display", new DisplayTool());
 
     context.put("engine", engine);  // for $engine.resourceExists(...)
 

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java Sun Jan  4 14:53:12 2015
@@ -17,13 +17,8 @@
 
 package org.apache.solr.client.solrj.embedded;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.StreamingResponseCallback;
 import org.apache.solr.common.SolrDocument;
@@ -45,8 +40,13 @@ import org.apache.solr.response.ResultCo
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.servlet.SolrRequestParsers;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
 /**
- * SolrServer that connects directly to SolrCore.
+ * SolrClient that connects directly to SolrCore.
  * <p>
  * TODO -- this implementation sends the response to XML and then parses it.  
  * It *should* be able to convert the response directly into a named list.
@@ -54,7 +54,7 @@ import org.apache.solr.servlet.SolrReque
  *
  * @since solr 1.3
  */
-public class EmbeddedSolrServer extends SolrServer
+public class EmbeddedSolrServer extends SolrClient
 {
   protected final CoreContainer coreContainer;
   protected final String coreName;
@@ -65,7 +65,7 @@ public class EmbeddedSolrServer extends
    * @deprecated use {@link #EmbeddedSolrServer(CoreContainer, String)} instead.
    */
   @Deprecated
-  public EmbeddedSolrServer( SolrCore core )
+  public EmbeddedSolrServer(SolrCore core)
   {
     if ( core == null ) {
       throw new NullPointerException("SolrCore instance required");
@@ -88,7 +88,7 @@ public class EmbeddedSolrServer extends
    * @param coreContainer the core container
    * @param coreName the core name
    */
-  public EmbeddedSolrServer(  CoreContainer coreContainer, String coreName )
+  public EmbeddedSolrServer(CoreContainer coreContainer, String coreName)
   {
     if ( coreContainer == null ) {
       throw new NullPointerException("CoreContainer instance required");

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java Sun Jan  4 14:53:12 2015
@@ -17,28 +17,6 @@
 
 package org.apache.solr.client.solrj.embedded;
 
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.EnumSet;
-import java.util.LinkedList;
-import java.util.Random;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.servlet.DispatcherType;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.eclipse.jetty.server.Connector;
 import org.eclipse.jetty.server.Server;
@@ -57,6 +35,27 @@ import org.eclipse.jetty.util.log.Logger
 import org.eclipse.jetty.util.ssl.SslContextFactory;
 import org.eclipse.jetty.util.thread.QueuedThreadPool;
 
+import javax.servlet.DispatcherType;
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.EnumSet;
+import java.util.LinkedList;
+import java.util.Random;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
 /**
  * Run solr using jetty
  * 
@@ -254,7 +253,7 @@ public class JettySolrRunner {
       // Connectors by default inherit server's thread pool.
       QueuedThreadPool qtp = new QueuedThreadPool();
       qtp.setMaxThreads(10000);
-      qtp.setMaxIdleTimeMs((int) TimeUnit.SECONDS.toMillis(5));
+      qtp.setMaxIdleTimeMs((int) TimeUnit.MILLISECONDS.toMillis(200));
       qtp.setMaxStopTimeMs((int) TimeUnit.MINUTES.toMillis(1));
       server.setThreadPool(qtp);
 

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Assign.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Assign.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Assign.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Assign.java Sun Jan  4 14:53:12 2015
@@ -174,7 +174,7 @@ public class Assign {
           + collectionName
           + " is higher than or equal to the number of Solr instances currently live or part of your " + CREATE_NODE_SET + "("
           + nodeList.size()
-          + "). Its unusual to run two replica of the same slice on the same Solr-instance.");
+          + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
     }
 
     int maxCoresAllowedToCreate = maxShardsPerNode * nodeList.size();

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java Sun Jan  4 14:53:12 2015
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCmdExecutor;
@@ -86,7 +87,7 @@ public  class LeaderElector {
    *
    * @param replacement has someone else been the leader already?
    */
-  private void checkIfIamLeader(final int seq, final ElectionContext context, boolean replacement) throws KeeperException,
+  private void checkIfIamLeader(final ElectionContext context, boolean replacement) throws KeeperException,
       InterruptedException, IOException {
     context.checkIfIamLeaderFired();
     // get all other numbers...
@@ -99,10 +100,44 @@ public  class LeaderElector {
       log.warn("Our node is no longer in line to be leader");
       return;
     }
+    // We can't really rely on the sequence number stored in the old watcher, it may be stale, thus this check.
+
+    int seq = -1;
+
+    // See if we've already been re-added, and this is an old context. In which case, use our current sequence number.
+    String newLeaderSeq = "";
+    for (String elec : seqs) {
+      if (getNodeName(elec).equals(getNodeName(context.leaderSeqPath)) && seq < getSeq(elec)) {
+        seq = getSeq(elec); // so use the current sequence number.
+        newLeaderSeq = elec;
+        break;
+      }
+    }
+
+    // Now, if we've been re-added, presumably we've also set up watchers and all that kind of thing, so we're done
+    if (StringUtils.isNotBlank(newLeaderSeq) && seq > getSeq(context.leaderSeqPath)) {
+      log.info("Node " + context.leaderSeqPath + " already in queue as " + newLeaderSeq + " nothing to do.");
+      return;
+    }
+
+    // Fallback in case we're all coming in here fresh and there is no node for this core already in the election queue.
+    if (seq == -1) {
+      seq = getSeq(context.leaderSeqPath);
+    }
+
     if (seq <= intSeqs.get(0)) {
-      if(seq == intSeqs.get(0) && !context.leaderSeqPath.equals(holdElectionPath+"/"+seqs.get(0)) ) {//somebody else already  became the leader with the same sequence id , not me
-        log.info("was going be leader {} , seq(0) {}",context.leaderSeqPath,holdElectionPath+"/"+seqs.get(0));//but someone else jumped the line
-        retryElection(context,false);//join at the tail again
+      if (seq == intSeqs.get(0) && !context.leaderSeqPath.equals(holdElectionPath + "/" + seqs.get(0))) {//somebody else already  became the leader with the same sequence id , not me
+        log.info("was going to be leader {} , seq(0) {}", context.leaderSeqPath, holdElectionPath + "/" + seqs.get(0));//but someone else jumped the line
+
+        // The problem is that deleting the ZK node that's watched by others
+        // results in an unpredictable sequencing of the events and sometime the context that comes in for checking
+        // this happens to be after the node has already taken over leadership. So just leave out of here.
+        // This caused one of the tests to fail on having two nodes with the same name in the queue. I'm not sure
+        // the assumption that this is a bad state is valid.
+        if (getNodeName(context.leaderSeqPath).equals(getNodeName(seqs.get(0)))) {
+          return;
+        }
+        retryElection(context, false);//join at the tail again
         return;
       }
       // first we delete the node advertising the old leader in case the ephem is still there
@@ -129,21 +164,22 @@ public  class LeaderElector {
       }
     } else {
       // I am not the leader - watch the node below me
-      int i = 1;
-      for (; i < intSeqs.size(); i++) {
-        int s = intSeqs.get(i);
-        if (seq < s) {
-          // we found who we come before - watch the guy in front
+      int toWatch = -1;
+      for (int idx = 0; idx < intSeqs.size(); idx++) {
+        if (intSeqs.get(idx) < seq && ! getNodeName(context.leaderSeqPath).equals(getNodeName(seqs.get(idx)))) {
+          toWatch = idx;
+        }
+        if (intSeqs.get(idx) >= seq) {
           break;
         }
       }
-      int index = i - 2;
-      if (index < 0) {
+      if (toWatch < 0) {
         log.warn("Our node is no longer in line to be leader");
         return;
       }
       try {
-        String watchedNode = holdElectionPath + "/" + seqs.get(index);
+        String watchedNode = holdElectionPath + "/" + seqs.get(toWatch);
+
         zkClient.getData(watchedNode, watcher = new ElectionWatcher(context.leaderSeqPath , watchedNode,seq, context) , null, true);
       } catch (KeeperException.SessionExpiredException e) {
         throw e;
@@ -151,7 +187,7 @@ public  class LeaderElector {
         log.warn("Failed setting watch", e);
         // we couldn't set our watch - the node before us may already be down?
         // we need to check if we are the leader again
-        checkIfIamLeader(seq, context, true);
+        checkIfIamLeader(context, true);
       }
     }
   }
@@ -309,15 +345,13 @@ public  class LeaderElector {
         }
       }
     }
-    int seq = getSeq(leaderSeqPath);
-    checkIfIamLeader(seq, context, replacement);
-    
-    return seq;
+    checkIfIamLeader(context, replacement);
+
+    return getSeq(context.leaderSeqPath);
   }
 
   private class ElectionWatcher implements Watcher {
     final String myNode,watchedNode;
-    final int seq;
     final ElectionContext context;
 
     private boolean canceled = false;
@@ -325,11 +359,10 @@ public  class LeaderElector {
     private ElectionWatcher(String myNode, String watchedNode, int seq, ElectionContext context) {
       this.myNode = myNode;
       this.watchedNode = watchedNode;
-      this.seq = seq;
       this.context = context;
     }
 
-    void cancel(String leaderSeqPath){
+    void cancel() {
       canceled = true;
 
     }
@@ -354,7 +387,7 @@ public  class LeaderElector {
       }
       try {
         // am I the next leader?
-        checkIfIamLeader(seq, context, true);
+        checkIfIamLeader(context, true);
       } catch (Exception e) {
         log.warn("", e);
       }
@@ -390,7 +423,7 @@ public  class LeaderElector {
   void retryElection(ElectionContext context, boolean joinAtHead) throws KeeperException, InterruptedException, IOException {
     ElectionWatcher watcher = this.watcher;
     ElectionContext ctx = context.copy();
-    if(watcher!= null) watcher.cancel(this.context.leaderSeqPath);
+    if (watcher != null) watcher.cancel();
     this.context.cancelElection();
     this.context = ctx;
     joinElection(ctx, true, joinAtHead);

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java Sun Jan  4 14:53:12 2015
@@ -1,12 +1,8 @@
 package org.apache.solr.cloud;
 
-import java.net.ConnectException;
-import java.net.SocketException;
-import java.util.List;
-
 import org.apache.http.NoHttpResponseException;
 import org.apache.http.conn.ConnectTimeoutException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -18,6 +14,10 @@ import org.apache.solr.core.CoreContaine
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.util.List;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -114,12 +114,12 @@ public class LeaderInitiatedRecoveryThre
         log.info("Asking core={} coreNodeName={} on " + recoveryUrl + " to recover", coreNeedingRecovery, replicaCoreNodeName);
       }
       
-      HttpSolrServer server = new HttpSolrServer(recoveryUrl);
+      HttpSolrClient client = new HttpSolrClient(recoveryUrl);
       try {
-        server.setSoTimeout(60000);
-        server.setConnectionTimeout(15000);
+        client.setSoTimeout(60000);
+        client.setConnectionTimeout(15000);
         try {
-          server.request(recoverRequestCmd);
+          client.request(recoverRequestCmd);
           
           log.info("Successfully sent " + CoreAdminAction.REQUESTRECOVERY +
               " command to core={} coreNodeName={} on " + recoveryUrl, coreNeedingRecovery, replicaCoreNodeName);
@@ -140,7 +140,7 @@ public class LeaderInitiatedRecoveryThre
           }                                                
         }
       } finally {
-        server.shutdown();
+        client.shutdown();
       }
       
       // wait a few seconds

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Overseer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Overseer.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Overseer.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/Overseer.java Sun Jan  4 14:53:12 2015
@@ -167,7 +167,7 @@ public class Overseer implements Closeab
                 else if (LeaderStatus.YES == isLeader) {
                   final ZkNodeProps message = ZkNodeProps.load(head);
                   log.info("processMessage: queueSize: {}, message = {}", workQueue.getStats().getQueueLength(), message);
-                  clusterState = processQueueItem(message, clusterState, zkStateWriter);
+                  clusterState = processQueueItem(message, clusterState, zkStateWriter, false, null);
                   workQueue.poll(); // poll-ing removes the element we got by peek-ing
                 }
                 else {
@@ -242,7 +242,9 @@ public class Overseer implements Closeab
                 while (data != null)  {
                   final ZkNodeProps message = ZkNodeProps.load(data);
                   log.info("processMessage: queueSize: {}, message = {}", workQueue.getStats().getQueueLength(), message);
-                  clusterState = processQueueItem(message, clusterState, zkStateWriter);
+                  // force flush to ZK after each message because there is no fallback if workQueue items
+                  // are removed from workQueue but fail to be written to ZK
+                  clusterState = processQueueItem(message, clusterState, zkStateWriter, false, null);
                   workQueue.poll(); // poll-ing removes the element we got by peek-ing
                   data = workQueue.peek();
                 }
@@ -253,11 +255,25 @@ public class Overseer implements Closeab
               }
 
               while (head != null) {
+                final byte[] data = head.getBytes();
                 final ZkNodeProps message = ZkNodeProps.load(head.getBytes());
                 log.info("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
-                clusterState = processQueueItem(message, clusterState, zkStateWriter);
-                workQueue.offer(head.getBytes());
+                // we can batch here because workQueue is our fallback in case a ZK write failed
+                clusterState = processQueueItem(message, clusterState, zkStateWriter, true, new ZkStateWriter.ZkWriteCallback() {
+                  @Override
+                  public void onEnqueue() throws Exception {
+                    workQueue.offer(data);
+                  }
+
+                  @Override
+                  public void onWrite() throws Exception {
+                    // remove everything from workQueue
+                    while (workQueue.poll() != null);
+                  }
+                });
 
+                // it is safer to keep this poll here because an invalid message might never be queued
+                // and therefore we can't rely on the ZkWriteCallback to remove the item
                 stateUpdateQueue.poll();
 
                 if (isClosed) break;
@@ -299,7 +315,7 @@ public class Overseer implements Closeab
       }
     }
 
-    private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter) throws KeeperException, InterruptedException {
+    private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
       final String operation = message.getStr(QUEUE_OPERATION);
       ZkWriteCommand zkWriteCommand = null;
       final TimerContext timerContext = stats.time(operation);
@@ -318,7 +334,10 @@ public class Overseer implements Closeab
         timerContext.stop();
       }
       if (zkWriteCommand != null) {
-        clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommand);
+        clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommand, callback);
+        if (!enableBatching)  {
+          clusterState = zkStateWriter.writePendingUpdates();
+        }
       }
       return clusterState;
     }
@@ -379,10 +398,11 @@ public class Overseer implements Closeab
             return new SliceMutator(getZkStateReader()).addReplica(clusterState, message);
           case CLUSTERPROP:
             handleProp(message);
+            break;
           case ADDREPLICAPROP:
             return new ReplicaMutator(getZkStateReader()).addReplicaProperty(clusterState, message);
           case DELETEREPLICAPROP:
-            return new ReplicaMutator(getZkStateReader()).removeReplicaProperty(clusterState, message);
+            return new ReplicaMutator(getZkStateReader()).deleteReplicaProperty(clusterState, message);
           case BALANCESHARDUNIQUE:
             ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(clusterState, message);
             if (dProp.balanceProperty()) {
@@ -451,8 +471,8 @@ public class Overseer implements Closeab
       else m.put(name,val);
 
       try {
-        if(reader.getZkClient().exists(ZkStateReader.CLUSTER_PROPS,true))
-          reader.getZkClient().setData(ZkStateReader.CLUSTER_PROPS,ZkStateReader.toJSON(m),true);
+        if (reader.getZkClient().exists(ZkStateReader.CLUSTER_PROPS, true))
+          reader.getZkClient().setData(ZkStateReader.CLUSTER_PROPS, ZkStateReader.toJSON(m), true);
         else
           reader.getZkClient().create(ZkStateReader.CLUSTER_PROPS, ZkStateReader.toJSON(m),CreateMode.PERSISTENT, true);
         clusterProps = reader.getClusterProps();

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerAutoReplicaFailoverThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerAutoReplicaFailoverThread.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerAutoReplicaFailoverThread.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerAutoReplicaFailoverThread.java Sun Jan  4 14:53:12 2015
@@ -30,7 +30,7 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
@@ -128,7 +128,7 @@ public class OverseerAutoReplicaFailover
         doWork();
       } catch (Exception e) {
         SolrException.log(log, this.getClass().getSimpleName()
-            + " had an error it's thread work loop.", e);
+            + " had an error in its thread work loop.", e);
       }
       
       if (!this.isClosed) {
@@ -145,6 +145,11 @@ public class OverseerAutoReplicaFailover
     
     // TODO: extract to configurable strategy class ??
     ClusterState clusterState = zkStateReader.getClusterState();
+    //check if we have disabled autoAddReplicas cluster wide
+    String autoAddReplicas = (String) zkStateReader.getClusterProps().get(ZkStateReader.AUTO_ADD_REPLICAS);
+    if (autoAddReplicas !=null && autoAddReplicas.equals("false")) {
+      return;
+    }
     if (clusterState != null) {
       if (lastClusterStateVersion == clusterState.getZkClusterStateVersion() && baseUrlForBadNodes.size() == 0) {
         // nothing has changed, no work to do
@@ -242,7 +247,7 @@ public class OverseerAutoReplicaFailover
       });
       
       // wait to see state for core we just created
-      boolean success = ClusterStateUtil.waitToSeeLive(zkStateReader, collection, coreNodeName, createUrl, 30);
+      boolean success = ClusterStateUtil.waitToSeeLive(zkStateReader, collection, coreNodeName, createUrl, 30000);
       if (!success) {
         log.error("Creating new replica appears to have failed, timed out waiting to see created SolrCore register in the clusterstate.");
         return false;
@@ -413,10 +418,10 @@ public class OverseerAutoReplicaFailover
   private boolean createSolrCore(final String collection,
       final String createUrl, final String dataDir, final String ulogDir,
       final String coreNodeName, final String coreName) {
-    HttpSolrServer server = null;
+    HttpSolrClient server = null;
     try {
       log.debug("create url={}", createUrl);
-      server = new HttpSolrServer(createUrl);
+      server = new HttpSolrClient(createUrl);
       server.setConnectionTimeout(30000);
       server.setSoTimeout(60000);
       Create createCmd = new Create();

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java Sun Jan  4 14:53:12 2015
@@ -17,50 +17,11 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import static org.apache.solr.cloud.Assign.getNodesForNewShard;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
+import com.google.common.collect.ImmutableSet;
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -110,7 +71,48 @@ import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.ImmutableSet;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.solr.cloud.Assign.getNodesForNewShard;
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
 
 
 public class OverseerCollectionProcessor implements Runnable, Closeable {
@@ -123,6 +125,8 @@ public class OverseerCollectionProcessor
   // @Deprecated- see on ZkStateReader
   public static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode";
   
+  static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
+  public static final String CREATE_NODE_SET_SHUFFLE = "createNodeSet.shuffle";
   public static final String CREATE_NODE_SET = "createNodeSet";
 
   /**
@@ -163,7 +167,8 @@ public class OverseerCollectionProcessor
 
   public int maxParallelThreads = 10;
 
-  public static final Set<String> KNOWN_CLUSTER_PROPS = ImmutableSet.of(ZkStateReader.LEGACY_CLOUD, ZkStateReader.URL_SCHEME);
+  public static final Set<String> KNOWN_CLUSTER_PROPS = ImmutableSet.of(ZkStateReader.LEGACY_CLOUD, ZkStateReader.URL_SCHEME,
+      ZkStateReader.AUTO_ADD_REPLICAS);
 
   public static final Map<String,Object> COLL_PROPS = ZkNodeProps.makeMap(
       ROUTER, DocRouter.DEFAULT_NAME,
@@ -171,7 +176,7 @@ public class OverseerCollectionProcessor
       ZkStateReader.MAX_SHARDS_PER_NODE, "1",
       ZkStateReader.AUTO_ADD_REPLICAS, "false");
 
-  private static final Random RANDOM;
+  static final Random RANDOM;
   static {
     // We try to make things reproducible in the context of our tests by initializing the random instance
     // based on the current seed
@@ -656,7 +661,7 @@ public class OverseerCollectionProcessor
             balanceProperty(message);
             break;
           case REBALANCELEADERS:
-            processAssignLeaders(message);
+            processRebalanceLeaders(message);
             break;
           default:
             throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
@@ -684,42 +689,36 @@ public class OverseerCollectionProcessor
   }
 
   @SuppressWarnings("unchecked")
-  // re-purpose BALANCELEADERS to reassign a single leader over here
-  private void processAssignLeaders(ZkNodeProps message) throws KeeperException, InterruptedException {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shardId = message.getStr(SHARD_ID_PROP);
-    String baseURL = message.getStr(BASE_URL_PROP);
-    String coreName = message.getStr(CORE_NAME_PROP);
+  private void processRebalanceLeaders(ZkNodeProps message) throws KeeperException, InterruptedException {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
+        NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
 
-    if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(shardId) || StringUtils.isBlank(baseURL) ||
-        StringUtils.isBlank(coreName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "The '%s', '%s', '%s' and '%s' parameters are required when assigning a leader",
-              COLLECTION_PROP, SHARD_ID_PROP, BASE_URL_PROP, CORE_NAME_PROP));
-    }
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getInQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower());
-    propMap.put(COLLECTION_PROP, collectionName);
-    propMap.put(SHARD_ID_PROP, shardId);
-    propMap.put(BASE_URL_PROP, baseURL);
-    propMap.put(CORE_NAME_PROP, coreName);
-    inQueue.offer(zkStateReader.toJSON(propMap));
-  }
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
+    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
+    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
+    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
+    params.set(NODE_NAME_PROP, message.getStr(NODE_NAME_PROP));
+    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
+    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
 
+    String baseUrl = message.getStr(BASE_URL_PROP);
+    ShardRequest sreq = new ShardRequest();
+    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
+    // yes, they must use same admin handler path everywhere...
+    params.set("qt", adminPath);
+    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
+    sreq.shards = new String[] {baseUrl};
+    sreq.actualShards = sreq.shards;
+    sreq.params = params;
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    shardHandler.submit(sreq, baseUrl, sreq.params);
+  }
 
   @SuppressWarnings("unchecked")
   private void processReplicaAddPropertyCommand(ZkNodeProps message) throws KeeperException, InterruptedException {
-    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) ||
-        StringUtils.isBlank(message.getStr(SHARD_ID_PROP)) ||
-        StringUtils.isBlank(message.getStr(REPLICA_PROP)) ||
-        StringUtils.isBlank(message.getStr(PROPERTY_PROP)) ||
-        StringUtils.isBlank(message.getStr(PROPERTY_VALUE_PROP))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "The '%s', '%s', '%s', '%s', and '%s' parameters are required for all replica properties add/delete operations",
-              COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP));
-    }
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
     SolrZkClient zkClient = zkStateReader.getZkClient();
     DistributedQueue inQueue = Overseer.getInQueue(zkClient);
     Map<String, Object> propMap = new HashMap<>();
@@ -730,14 +729,7 @@ public class OverseerCollectionProcessor
   }
 
   private void processReplicaDeletePropertyCommand(ZkNodeProps message) throws KeeperException, InterruptedException {
-    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) ||
-        StringUtils.isBlank(message.getStr(SHARD_ID_PROP)) ||
-        StringUtils.isBlank(message.getStr(REPLICA_PROP)) ||
-        StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "The '%s', '%s', '%s', and '%s' parameters are required for all replica properties add/delete operations",
-              COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP));
-    }
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
     SolrZkClient zkClient = zkStateReader.getZkClient();
     DistributedQueue inQueue = Overseer.getInQueue(zkClient);
     Map<String, Object> propMap = new HashMap<>();
@@ -1809,18 +1801,18 @@ public class OverseerCollectionProcessor
 
 
   static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
-    HttpSolrServer server = null;
+    HttpSolrClient client = null;
     try {
-      server = new HttpSolrServer(url);
-      server.setConnectionTimeout(30000);
-      server.setSoTimeout(120000);
+      client = new HttpSolrClient(url);
+      client.setConnectionTimeout(30000);
+      client.setSoTimeout(120000);
       UpdateRequest ureq = new UpdateRequest();
       ureq.setParams(new ModifiableSolrParams());
       ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
-      return ureq.process(server);
+      return ureq.process(client);
     } finally {
-      if (server != null) {
-        server.shutdown();
+      if (client != null) {
+        client.shutdown();
       }
     }
   }
@@ -2297,6 +2289,30 @@ public class OverseerCollectionProcessor
     }
   }
 
+  private static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
+    // TODO: add smarter options that look at the current number of cores per
+    // node?
+    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
+
+    List<String> nodeList;
+
+    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
+    final List<String> createNodeList = (createNodeSetStr == null)?null:StrUtils.splitSmart(createNodeSetStr, ",", true);
+
+    if (createNodeList != null) {
+      nodeList = new ArrayList<>(createNodeList);
+      nodeList.retainAll(liveNodes);
+      if (message.getBool(CREATE_NODE_SET_SHUFFLE, CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
+        Collections.shuffle(nodeList, random);
+      }
+    } else {
+      nodeList = new ArrayList<>(liveNodes);
+      Collections.shuffle(nodeList, random);
+    }
+    
+    return nodeList;    
+  }
+  
   private void createCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
     String collectionName = message.getStr("name");
     if (clusterState.hasCollection(collectionName)) {
@@ -2328,8 +2344,6 @@ public class OverseerCollectionProcessor
       }
 
       int maxShardsPerNode = message.getInt(ZkStateReader.MAX_SHARDS_PER_NODE, 1);
-      String createNodeSetStr; 
-      List<String> createNodeList = ((createNodeSetStr = message.getStr(CREATE_NODE_SET)) == null)?null:StrUtils.splitSmart(createNodeSetStr, ",", true);
       
       if (repFactor <= 0) {
         throw new SolrException(ErrorCode.BAD_REQUEST, ZkStateReader.REPLICATION_FACTOR + " must be greater than 0");
@@ -2343,19 +2357,7 @@ public class OverseerCollectionProcessor
       // add our new cores to existing nodes serving the least number of cores
       // but (for now) require that each core goes on a distinct node.
       
-      // TODO: add smarter options that look at the current number of cores per
-      // node?
-      // for now we just go random
-      Set<String> nodes = clusterState.getLiveNodes();
-      List<String> nodeList = new ArrayList<>(nodes.size());
-      nodeList.addAll(nodes);
-      if (createNodeList != null) nodeList.retainAll(createNodeList);
-      Collections.shuffle(nodeList, RANDOM);
-      
-      if (nodeList.size() <= 0) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName
-            + ". No live Solr-instances" + ((createNodeList != null)?" among Solr-instances specified in " + CREATE_NODE_SET + ":" + createNodeSetStr:""));
-      }
+      final List<String> nodeList = getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM);
       
       if (repFactor > nodeList.size()) {
         log.warn("Specified "
@@ -2364,9 +2366,9 @@ public class OverseerCollectionProcessor
             + repFactor
             + " on collection "
             + collectionName
-            + " is higher than or equal to the number of Solr instances currently live or part of your " + CREATE_NODE_SET + "("
+            + " is higher than or equal to the number of Solr instances currently live or live and part of your " + CREATE_NODE_SET + "("
             + nodeList.size()
-            + "). Its unusual to run two replica of the same slice on the same Solr-instance.");
+            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
       }
       
       int maxShardsAllowedToCreate = maxShardsPerNode * nodeList.size();
@@ -2374,7 +2376,7 @@ public class OverseerCollectionProcessor
       if (maxShardsAllowedToCreate < requestedShardsToCreate) {
         throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
             + ZkStateReader.MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
-            + ", and the number of live nodes is " + nodeList.size()
+            + ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
             + ". This allows a maximum of " + maxShardsAllowedToCreate
             + " to be created. Value of " + NUM_SLICES + " is " + numSlices
             + " and value of " + ZkStateReader.REPLICATION_FACTOR + " is " + repFactor

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java Sun Jan  4 14:53:12 2015
@@ -21,8 +21,8 @@ import org.apache.http.client.methods.Ht
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer.HttpUriRequestResponse;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.HttpUriRequestResponse;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -97,7 +97,7 @@ public class RecoveryStrategy extends Th
     this.cc = cc;
     this.coreName = cd.getName();
     this.recoveryListener = recoveryListener;
-    setName("RecoveryThread");
+    setName("RecoveryThread-"+this.coreName);
     zkController = cc.getZkController();
     zkStateReader = zkController.getZkStateReader();
     baseUrl = zkController.getBaseUrl();
@@ -200,7 +200,7 @@ public class RecoveryStrategy extends Th
 
   private void commitOnLeader(String leaderUrl) throws SolrServerException,
       IOException {
-    HttpSolrServer server = new HttpSolrServer(leaderUrl);
+    HttpSolrClient server = new HttpSolrClient(leaderUrl);
     try {
       server.setConnectionTimeout(30000);
       UpdateRequest ureq = new UpdateRequest();
@@ -594,7 +594,7 @@ public class RecoveryStrategy extends Th
   
   private void sendPrepRecoveryCmd(String leaderBaseUrl, String leaderCoreName, Slice slice)
       throws SolrServerException, IOException, InterruptedException, ExecutionException {
-    HttpSolrServer server = new HttpSolrServer(leaderBaseUrl);
+    HttpSolrClient server = new HttpSolrClient(leaderBaseUrl);
     try {
       server.setConnectionTimeout(30000);
       WaitForState prepCmd = new WaitForState();

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java Sun Jan  4 14:53:12 2015
@@ -17,20 +17,13 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-
 import org.apache.http.client.HttpClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -49,6 +42,11 @@ import org.apache.solr.update.UpdateShar
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+
 public class SyncStrategy {
   protected final Logger log = LoggerFactory.getLogger(getClass());
 
@@ -269,18 +267,18 @@ public class SyncStrategy {
         recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
         recoverRequestCmd.setCoreName(coreName);
         
-        HttpSolrServer server = new HttpSolrServer(baseUrl, client);
+        HttpSolrClient client = new HttpSolrClient(baseUrl, SyncStrategy.this.client);
         try {
-          server.setConnectionTimeout(30000);
-          server.setSoTimeout(120000);
-          server.request(recoverRequestCmd);
+          client.setConnectionTimeout(30000);
+          client.setSoTimeout(120000);
+          client.request(recoverRequestCmd);
         } catch (Throwable t) {
           SolrException.log(log, ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Could not tell a replica to recover", t);
           if (t instanceof Error) {
             throw (Error) t;
           }
         } finally {
-          server.shutdown();
+          client.shutdown();
         }
       }
     };

Modified: lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java Sun Jan  4 14:53:12 2015
@@ -254,16 +254,27 @@ public class ZkCLI {
             System.out.println("-" + PUT + " requires two args - the path to create and the data string");
             System.exit(1);
           }
-          zkClient.create(arglist.get(0).toString(), arglist.get(1).toString().getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
+          String path = arglist.get(0).toString();
+          if (zkClient.exists(path, true)) {
+            zkClient.setData(path, arglist.get(1).toString().getBytes(StandardCharsets.UTF_8), true);
+          } else {
+            zkClient.create(path, arglist.get(1).toString().getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
+          }
         } else if (line.getOptionValue(CMD).equals(PUT_FILE)) {
           List arglist = line.getArgList();
           if (arglist.size() != 2) {
             System.out.println("-" + PUT_FILE + " requires two args - the path to create in ZK and the path to the local file");
             System.exit(1);
           }
+
+          String path = arglist.get(0).toString();
           InputStream is = new FileInputStream(arglist.get(1).toString());
           try {
-            zkClient.create(arglist.get(0).toString(), IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
+            if (zkClient.exists(path, true)) {
+              zkClient.setData(path, IOUtils.toByteArray(is), true);
+            } else {
+              zkClient.create(path, IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
+            }
           } finally {
             IOUtils.closeQuietly(is);
           }



Mime
View raw message