lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rm...@apache.org
Subject svn commit: r1631928 [4/5] - in /lucene/dev/branches/lucene5969: ./ dev-tools/ dev-tools/idea/.idea/libraries/ lucene/ lucene/analysis/ lucene/analysis/common/ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ lucene/analysis/common/...
Date Wed, 15 Oct 2014 01:26:31 GMT
Modified: lucene/dev/branches/lucene5969/solr/bin/solr
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/bin/solr?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/bin/solr (original)
+++ lucene/dev/branches/lucene5969/solr/bin/solr Wed Oct 15 01:26:26 2014
@@ -48,6 +48,7 @@ SOLR_SCRIPT="$0"
 verbose=false
 THIS_OS=`uname -s`
 hasLsof=$(which lsof)
+stop_all=false
 
 # for now, we don't support running this script from cygwin due to problems
 # like not having lsof, ps waux, curl, and awkward directory handling
@@ -79,6 +80,23 @@ else
   DEFAULT_SERVER_DIR=$SOLR_TIP/example
 fi
 
+# If an include wasn't specified in the environment, then search for one...
+if [ "x$SOLR_INCLUDE" == "x" ]; then
+    # Locations (in order) to use when searching for an include file.
+    for include in "`dirname "$0"`/solr.in.sh" \
+                   "$HOME/.solr.in.sh" \
+                   /usr/share/solr/solr.in.sh \
+                   /usr/local/share/solr/solr.in.sh \
+                   /opt/solr/solr.in.sh; do
+        if [ -r "$include" ]; then
+            . "$include"
+            break
+        fi
+    done
+elif [ -r "$SOLR_INCLUDE" ]; then
+    . "$SOLR_INCLUDE"
+fi
+
 if [ "$SOLR_JAVA_HOME" != "" ]; then
   JAVA=$SOLR_JAVA_HOME/bin/java
 elif [ -n "$JAVA_HOME" ]; then
@@ -108,16 +126,20 @@ function print_usage() {
     echo "Usage: solr COMMAND OPTIONS"
     echo "       where COMMAND is one of: start, stop, restart, healthcheck"
     echo ""
-    echo "  Example: Start Solr running in the background on port 8984:" 
+    echo "  Standalone server example (start Solr running in the background on port 8984):"
     echo ""
     echo "    ./solr start -p 8984"
     echo ""
+    echo "  SolrCloud example (start Solr running in SolrCloud mode using localhost:2181 to connect to ZooKeeper, with 1g max heap size and remote Java debug options enabled):"
+    echo ""
+    echo "    ./solr start -c -m 1g -z localhost:2181 -a \"-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1044\""
+    echo ""
     echo "Pass -help after any COMMAND to see command-specific usage information,"
-    echo "  such as:    ./solr start -help" 
+    echo "  such as:    ./solr start -help or ./solr stop -help"
     echo ""
   elif [[ "$CMD" == "start" || "$CMD" == "restart" ]]; then
     echo ""
-    echo "Usage: solr $CMD [-f] [-c] [-h hostname] [-p port] [-d directory] [-z zkHost] [-m memory] [-e example] [-a \"additional-options\"] [-V]"
+    echo "Usage: solr $CMD [-f] [-c] [-h hostname] [-p port] [-d directory] [-z zkHost] [-m memory] [-e example] [-s solr.solr.home] [-a \"additional-options\"] [-V]"
     echo ""
     echo "  -f            Start Solr in foreground; default starts Solr in the background"
     echo "                  and sends stdout / stderr to solr-PORT-console.log"
@@ -137,6 +159,13 @@ function print_usage() {
     echo "  -m <memory>   Sets the min (-Xms) and max (-Xmx) heap size for the JVM, such as: -m 4g"
     echo "                  results in: -Xms4g -Xmx4g; by default, this script sets the heap size to 512m"
     echo ""
+    echo "  -s <dir>      Sets the solr.solr.home system property; Solr will create core directories under"
+    echo "                  this directory. This allows you to run multiple Solr instances on the same host"
+    echo "                  while reusing the same server directory set using the -d parameter. If set, the"
+    echo "                  specified directory should contain a solr.xml file. The default value is example/solr."
+    echo "                  This parameter is ignored when running examples (-e), as the solr.solr.home depends"
+    echo "                  on which example is run."
+    echo ""
     echo "  -e <example>  Name of the example to run; available examples:"
     echo "      cloud:         SolrCloud example"
     echo "      default:       Solr default example"
@@ -161,6 +190,8 @@ function print_usage() {
     echo ""
     echo "  -p <port>     Specify the port the Solr HTTP listener is bound to; default is 8983"
     echo ""
+    echo "  -all          Find and stop all running Solr servers on this host"
+    echo ""
     echo "  -V            Verbose messages from this script"
     echo ""
     echo "NOTE: If port is not specified, then all running Solr servers are stopped."
@@ -192,6 +223,19 @@ spinner()
   printf "    \b\b\b\b"
 }
 
+# given a port, find the pid for a Solr process
+function solr_pid_by_port() {
+  THE_PORT="$1"
+  if [ -e "$SOLR_TIP/bin/solr-$THE_PORT.pid" ]; then
+    PID=`cat $SOLR_TIP/bin/solr-$THE_PORT.pid`
+    CHECK_PID=`ps waux | awk '{print $2}' | grep $PID | sort -r | tr -d ' '`
+    if [ "$CHECK_PID" != "" ]; then
+      local solrPID=$PID
+    fi
+  fi
+  echo "$solrPID"
+}
+
 # extract the value of the -Djetty.port parameter from a running Solr process 
 function jetty_port() {
   SOLR_PID="$1"
@@ -213,7 +257,7 @@ function jetty_port() {
 function run_tool() {
   
   # Extract the solr.war if it hasn't been done already (so we can access the SolrCLI class)
-  if [ ! -d "$DEFAULT_SERVER_DIR/solr-webapp/webapp" ]; then
+  if [[ -e $DEFAULT_SERVER_DIR/webapps/solr.war && ! -d "$DEFAULT_SERVER_DIR/solr-webapp/webapp" ]]; then
     (mkdir -p $DEFAULT_SERVER_DIR/solr-webapp/webapp && cd $DEFAULT_SERVER_DIR/solr-webapp/webapp && jar xf $DEFAULT_SERVER_DIR/webapps/solr.war)    
   fi
   
@@ -226,11 +270,12 @@ function run_tool() {
 # get information about any Solr nodes running on this host
 function get_info() {
   # first, see if Solr is running
-  numSolrs=`ps waux | grep java | grep start.jar | wc -l | sed -e 's/^[ \t]*//'`
+  numSolrs=`find $SOLR_TIP/bin -name "solr-*.pid" -type f | wc -l | tr -d ' '`
   if [ "$numSolrs" != "0" ]; then
     echo -e "\nFound $numSolrs Solr nodes: "
-    for ID in `ps waux | grep java | grep start.jar | awk '{print $2}' | sort -r`
+    for PIDF in `find $SOLR_TIP/bin -name "solr-*.pid" -type f`
       do
+        ID=`cat $PIDF`
         port=`jetty_port "$ID"`
         if [ "$port" != "" ]; then
           echo ""
@@ -240,7 +285,23 @@ function get_info() {
        fi
     done
   else
-    echo -e "\nNo Solr nodes are running.\n"
+    # no pid files but check using ps just to be sure
+    numSolrs=`ps waux | grep java | grep start.jar | wc -l | sed -e 's/^[ \t]*//'`
+    if [ "$numSolrs" != "0" ]; then
+      echo -e "\nFound $numSolrs Solr nodes: "
+      for ID in `ps waux | grep java | grep start.jar | awk '{print $2}' | sort -r`
+        do
+          port=`jetty_port "$ID"`
+          if [ "$port" != "" ]; then
+            echo ""
+            echo "Found Solr process $ID running on port $port"
+            run_tool status -solr http://localhost:$port/solr
+            echo ""
+          fi
+      done
+    else
+      echo -e "\nNo Solr nodes are running.\n"
+    fi
   fi
   
 } # end get_info
@@ -253,27 +314,30 @@ function stop_solr() {
   SOLR_PORT="$2"
   STOP_PORT="79${SOLR_PORT: -2}"
   STOP_KEY="$3"
-  
-  SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+  SOLR_PID="$4"
+
   if [ "$SOLR_PID" != "" ]; then
     echo -e "Sending stop command to Jetty stop port $STOP_PORT ... waiting 5 seconds to allow process $SOLR_PID to stop gracefully."
     $JAVA -jar $DIR/start.jar STOP.PORT=$STOP_PORT STOP.KEY=$STOP_KEY --stop || true
     (sleep 5) &
     spinner $!
+    rm -f $SOLR_TIP/bin/solr-$SOLR_PORT.pid
   else
     echo -e "No Solr nodes found to stop."
     exit 0
   fi
 
-  SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
-  if [ "$SOLR_PID" != "" ]; then
+  CHECK_PID=`ps waux | awk '{print $2}' | grep $SOLR_PID | sort -r | tr -d ' '`
+  if [ "$CHECK_PID" != "" ]; then
     echo -e "Solr process $SOLR_PID is still running; forcefully killing it now."
     kill -9 $SOLR_PID
     echo "Killed process $SOLR_PID"
+    rm -f $SOLR_TIP/bin/solr-$SOLR_PORT.pid
+    sleep 1
   fi
 
-  SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
-  if [ "$SOLR_PID" != "" ]; then
+  CHECK_PID=`ps waux | awk '{print $2}' | grep $SOLR_PID | sort -r | tr -d ' '`
+  if [ "$CHECK_PID" != "" ]; then
     echo "ERROR: Failed to kill previous Solr Java process $SOLR_PID ... script fails."
     exit 1
   fi
@@ -370,23 +434,6 @@ if [ "$SCRIPT_CMD" != "stop" ] && [ "$SC
   exit 1
 fi
 
-# If an include wasn't specified in the environment, then search for one...
-if [ "x$SOLR_INCLUDE" == "x" ]; then
-    # Locations (in order) to use when searching for an include file.
-    for include in "`dirname "$0"`/solr.in.sh" \
-                   "$HOME/.solr.in.sh" \
-                   /usr/share/solr/solr.in.sh \
-                   /usr/local/share/solr/solr.in.sh \
-                   /opt/solr/solr.in.sh; do
-        if [ -r "$include" ]; then
-            . "$include"
-            break
-        fi
-    done
-elif [ -r "$SOLR_INCLUDE" ]; then
-    . "$SOLR_INCLUDE"
-fi
-
 # Run in foreground (default is to run in the background)
 FG="false"
 noprompt=false
@@ -405,13 +452,22 @@ if [ $# -gt 0 ]; then
             fi
 
             # see if the arg value is relative to the tip vs full path
-            if [ -d "$SOLR_TIP/$2" ]; then
-              SOLR_SERVER_DIR="$SOLR_TIP/$2"              
-            else            
+            if [[ $2 != /* ]] && [[ -d "$SOLR_TIP/$2" ]]; then
+              SOLR_SERVER_DIR="$SOLR_TIP/$2"
+            else
               SOLR_SERVER_DIR="$2"
             fi
             shift 2
         ;;
+        -s|-solr.home)
+            if [ "${2:0:1}" == "-" ]; then
+              print_usage "$SCRIPT_CMD" "Expected directory but found $2 instead!"
+              exit 1
+            fi
+
+            SOLR_HOME="$2"
+            shift 2
+        ;;
         -e|-example)
             if [ "${2:0:1}" == "-" ]; then
               print_usage "$SCRIPT_CMD" "Expected example name but found $2 instead!"
@@ -461,6 +517,10 @@ if [ $# -gt 0 ]; then
             ADDITIONAL_CMD_OPTS="$2"
             shift 2
         ;;
+        -k|-key)
+            STOP_KEY="$2"
+            shift 2
+        ;;
         -help|-usage)
             print_usage "$SCRIPT_CMD"
             exit 0
@@ -473,6 +533,10 @@ if [ $# -gt 0 ]; then
             verbose=true
             shift
         ;;
+        -all)
+            stop_all=true
+            shift
+        ;;
         --)
             shift
             break
@@ -611,28 +675,32 @@ if [ "$EXAMPLE" != "" ]; then
     esac
 fi
 
-if [ "$SOLR_HOME" == "" ]; then
-  SOLR_HOME="$SOLR_SERVER_DIR/solr"
-fi
-
 if [ "$STOP_KEY" == "" ]; then
   STOP_KEY="solrrocks"
 fi
 
 # stop all if no port specified
 if [[ "$SCRIPT_CMD" == "stop" && "$SOLR_PORT" == "" ]]; then
-  numSolrs=`ps waux | grep java | grep start.jar | wc -l | sed -e 's/^[ \t]*//'`
-  if [ "$numSolrs" != "0" ]; then
-    echo -e "\nFound $numSolrs Solr nodes to stop."
-    for ID in `ps waux | grep java | grep start.jar | awk '{print $2}' | sort -r`
+  if $stop_all; then
+    none_stopped=true
+    for PIDF in `find $SOLR_TIP/bin -name "solr-*.pid" -type f`
       do
-        port=`jetty_port "$ID"`
-        stop_solr "$SOLR_SERVER_DIR" "$port" "$STOP_KEY"      
-      done
+        NEXT_PID=`cat $PIDF`
+        port=`jetty_port "$NEXT_PID"`
+        if [ "$port" != "" ]; then
+          stop_solr "$SOLR_SERVER_DIR" "$port" "$STOP_KEY" "$NEXT_PID"
+          none_stopped=false
+        fi
+        rm -f $PIDF
+    done
+    if $none_stopped; then
+      echo -e "\nNo Solr nodes found to stop.\n"
+    fi
   else
-    echo -e "\nNo Solr nodes found to stop.\n"
+    echo -e "\nERROR: Must either specify a port using -p or -all to stop all Solr nodes on this host.\n"
+    exit 1
   fi
-  exit  
+  exit
 fi
 
 if [ "$SOLR_PORT" == "" ]; then
@@ -645,7 +713,13 @@ fi
 
 if [[ "$SCRIPT_CMD" == "start" ]]; then
   # see if Solr is already running
-  SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+  SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
+
+  if [ "$SOLR_PID" == "" ]; then
+    # not found using the pid file ... but use ps to ensure not found
+    SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+  fi
+
   if [ "$SOLR_PID" != "" ]; then
     echo -e "\nSolr already running on port $SOLR_PORT (pid: $SOLR_PID)!"
     echo -e "Please use the 'restart' command if you want to restart this node.\n"
@@ -653,7 +727,17 @@ if [[ "$SCRIPT_CMD" == "start" ]]; then
   fi
 else
   # either stop or restart
-  stop_solr "$SOLR_SERVER_DIR" "$SOLR_PORT" "$STOP_KEY"
+  # see if Solr is already running
+  SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
+  if [ "$SOLR_PID" == "" ]; then
+    # not found using the pid file ... but use ps to ensure not found
+    SOLR_PID=`ps waux | grep start.jar | grep $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+  fi
+  if [ "$SOLR_PID" != "" ]; then
+    stop_solr "$SOLR_SERVER_DIR" "$SOLR_PORT" "$STOP_KEY" "$SOLR_PID"
+  else
+    echo -e "No process found for Solr node running on port $SOLR_PORT"
+  fi
 fi
 
 # backup the log files
@@ -677,6 +761,22 @@ if [ "$SCRIPT_CMD" == "stop" ]; then
 fi
 
 # if we get here, then we're starting a new node up ...
+if [ "$SOLR_HOME" == "" ]; then
+  SOLR_HOME="$SOLR_SERVER_DIR/solr"
+else
+  if [[ $SOLR_HOME != /* ]] && [[ -d "$SOLR_SERVER_DIR/$SOLR_HOME" ]]; then
+    SOLR_HOME="$SOLR_SERVER_DIR/$SOLR_HOME"
+  fi
+fi
+
+if [ ! -e "$SOLR_HOME" ]; then
+  echo -e "\nSolr home directory $SOLR_HOME not found!\n"
+  exit 1
+fi
+if [ ! -e "$SOLR_HOME/solr.xml" ]; then
+  echo -e "\nSolr home directory $SOLR_HOME must contain a solr.xml file!\n"
+  exit 1
+fi
 
 # if verbose gc logging enabled, setup the location of the log file
 if [ "$GC_LOG_OPTS" != "" ]; then
@@ -789,7 +889,7 @@ $SOLR_HOST_ARG -Djetty.port=$SOLR_PORT \
     $JAVA $SOLR_START_OPTS $SOLR_ADDL_ARGS -XX:OnOutOfMemoryError="$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT" -jar start.jar
   else
     # run Solr in the background
-    nohup $JAVA $SOLR_START_OPTS $SOLR_ADDL_ARGS -XX:OnOutOfMemoryError="$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT" -jar start.jar 1>$SOLR_SERVER_DIR/logs/solr-$SOLR_PORT-console.log 2>&1 &
+    nohup $JAVA $SOLR_START_OPTS $SOLR_ADDL_ARGS -XX:OnOutOfMemoryError="$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT" -jar start.jar 1>$SOLR_SERVER_DIR/logs/solr-$SOLR_PORT-console.log 2>&1 & echo $! > $SOLR_TIP/bin/solr-$SOLR_PORT.pid
   
     # no lsof on cygwin though
     if [ "$hasLsof" != "" ]; then
@@ -832,15 +932,26 @@ else
   SOLR_SERVER_DIR=$SOLR_TIP/node1
   SOLR_HOME=$SOLR_TIP/node1/solr
   SOLR_PORT=${CLOUD_PORTS[0]}
+
   if [ "$ZK_HOST" != "" ]; then
     DASHZ="-z $ZK_HOST"
   fi
 
+  if [ "$SOLR_HEAP" != "" ]; then
+    DASHM="-m $SOLR_HEAP"
+  fi
+
+  if [ "$ADDITIONAL_CMD_OPTS" != "" ]; then
+    DASHA="-a $ADDITIONAL_CMD_OPTS"
+  fi
+
   echo -e "\nStarting up SolrCloud node1 on port ${CLOUD_PORTS[0]} using command:\n"
-  echo -e "solr start -cloud -d node1 -p $SOLR_PORT $DASHZ\n\n"
+  echo -e "solr start -cloud -d node1 -p $SOLR_PORT $DASHZ $DASHM $DASHA\n\n"
     
   # can't launch this node in the foreground else we can't run anymore commands
-  launch_solr "false" ""
+  launch_solr "false" "$ADDITIONAL_CMD_OPTS"
+
+  sleep 5
 
   # if user did not define a specific -z parameter, assume embedded in first cloud node we launched above
   zk_host=$ZK_HOST
@@ -848,15 +959,15 @@ else
     zk_port=$[$SOLR_PORT+1000]
     zk_host=localhost:$zk_port
   fi
-  
+
   for (( s=1; s<$CLOUD_NUM_NODES; s++ ))
   do
     ndx=$[$s+1]
     next_port=${CLOUD_PORTS[$s]}
     echo -e "\n\nStarting node$ndx on port $next_port using command:\n"
-    echo -e "solr start -cloud -d node$ndx -p $next_port -z $zk_host \n\n"
+    echo -e "solr start -cloud -d node$ndx -p $next_port -z $zk_host $DASHM $DASHA \n\n"
     # call this script again with correct args for next node    
-    $SOLR_TIP/bin/solr start -cloud -d node$ndx -p $next_port -z $zk_host
+    $SOLR_TIP/bin/solr start -cloud -d node$ndx -p $next_port -z $zk_host $DASHM $DASHA
   done
   
   # TODO: better (shorter) name??

Modified: lucene/dev/branches/lucene5969/solr/bin/solr.cmd
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/bin/solr.cmd?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/bin/solr.cmd (original)
+++ lucene/dev/branches/lucene5969/solr/bin/solr.cmd Wed Oct 15 01:26:26 2014
@@ -30,8 +30,16 @@ REM Used to report errors before exiting
 set SCRIPT_ERROR=
 set NO_USER_PROMPT=0
 
+REM Allow user to import vars from an include file
+REM vars set in the include file can be overridden with
+REM command line args
+IF "%SOLR_INCLUDE%"=="" set SOLR_INCLUDE=solr.in.cmd
+IF EXIST "%SOLR_INCLUDE%" CALL "%SOLR_INCLUDE%"
+
 REM Verify Java is available
-if NOT DEFINED JAVA_HOME goto need_java_home
+IF DEFINED SOLR_JAVA_HOME set "JAVA_HOME=%SOLR_JAVA_HOME%"
+IF NOT DEFINED JAVA_HOME goto need_java_home
+set JAVA_HOME=%JAVA_HOME:"=%
 "%JAVA_HOME%"\bin\java -version:1.8 -version > nul 2>&1
 IF ERRORLEVEL 1 "%JAVA_HOME%"\bin\java -version:1.7 -version > nul 2>&1
 IF ERRORLEVEL 1 goto need_java_vers
@@ -59,11 +67,11 @@ IF "%1"=="start" goto set_script_cmd
 IF "%1"=="stop" goto set_script_cmd
 IF "%1"=="restart" goto set_script_cmd
 IF "%1"=="healthcheck" (
-REM healthcheck uses different arg parsing strategy
-SHIFT
-goto parse_healthcheck_args
+  REM healthcheck uses different arg parsing strategy
+  SHIFT
+  goto parse_healthcheck_args
 )
-goto include_vars
+goto parse_args
 
 :usage
 IF NOT "%SCRIPT_ERROR%"=="" ECHO %SCRIPT_ERROR%
@@ -81,18 +89,22 @@ goto done
 @echo Usage: solr COMMAND OPTIONS
 @echo        where COMMAND is one of: start, stop, restart, healthcheck
 @echo.
-@echo   Example: Start Solr running in the background on port 8984:
+@echo   Standalone server example (start Solr running in the background on port 8984):
+@echo.
+@echo     solr start -p 8984
+@echo.
+@echo   SolrCloud example (start Solr running in SolrCloud mode using localhost:2181 to connect to ZooKeeper, with 1g max heap size and remote Java debug options enabled):
 @echo.
-@echo     ./solr start -p 8984
+@echo     solr start -c -m 1g -z localhost:2181 -a "-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1044"
 @echo.
 @echo Pass -help after any COMMAND to see command-specific usage information,
-@echo   such as:    ./solr start -help
+@echo   such as:    solr start -help or solr stop -help
 @echo.
 goto done
 
 :start_usage
 @echo.
-@echo Usage: solr %SCRIPT_CMD% [-f] [-c] [-h hostname] [-p port] [-d directory] [-z zkHost] [-m memory] [-e example] [-a "additional-options"] [-V]
+@echo Usage: solr %SCRIPT_CMD% [-f] [-c] [-h hostname] [-p port] [-d directory] [-z zkHost] [-m memory] [-e example] [-s solr.solr.home] [-a "additional-options"] [-V]
 @echo.
 @echo   -f            Start Solr in foreground; default starts Solr in the background
 @echo                   and sends stdout / stderr to solr-PORT-console.log
@@ -112,6 +124,13 @@ goto done
 @echo   -m memory     Sets the min (-Xms) and max (-Xmx) heap size for the JVM, such as: -m 4g
 @echo                   results in: -Xms4g -Xmx4g; by default, this script sets the heap size to 512m
 @echo.
+@echo   -s dir        Sets the solr.solr.home system property; Solr will create core directories under
+@echo                   this directory. This allows you to run multiple Solr instances on the same host
+@echo                   while reusing the same server directory set using the -d parameter. If set, the
+@echo                   specified directory should contain a solr.xml file. The default value is example/solr.
+@echo                   This parameter is ignored when running examples (-e), as the solr.solr.home depends
+@echo                   on which example is run.
+@echo.
 @echo   -e example    Name of the example to run; available examples:
 @echo       cloud:          SolrCloud example
 @echo       default:        Solr default example
@@ -154,14 +173,6 @@ goto done
 @echo.
 goto done
 
-REM Allow user to import vars from an include file
-REM vars set in the include file can be overridden with
-REM command line args
-:include_vars
-IF "%SOLR_INCLUDE%"=="" set SOLR_INCLUDE=solr.in.cmd
-IF EXIST "%SOLR_INCLUDE%" CALL "%SOLR_INCLUDE%"
-goto parse_args
-
 REM Really basic command-line arg parsing
 :parse_args
 IF "%SCRIPT_CMD%"=="" set SCRIPT_CMD=start
@@ -177,6 +188,8 @@ IF "%1"=="-c" goto set_cloud_mode
 IF "%1"=="-cloud" goto set_cloud_mode
 IF "%1"=="-d" goto set_server_dir
 IF "%1"=="-dir" goto set_server_dir
+IF "%1"=="-s" goto set_solr_home_dir
+IF "%1"=="-solr.home" goto set_solr_home_dir
 IF "%1"=="-e" goto set_example
 IF "%1"=="-example" goto set_example
 IF "%1"=="-h" goto set_host
@@ -190,13 +203,14 @@ IF "%1"=="-zkhost" goto set_zookeeper
 IF "%1"=="-a" goto set_addl_opts
 IF "%1"=="-addlopts" goto set_addl_opts
 IF "%1"=="-noprompt" goto set_noprompt
+IF "%1"=="-k" goto set_stop_key
+IF "%1"=="-key" goto set_stop_key
 IF NOT "%1"=="" goto invalid_cmd_line
-process_script_cmd
 
 :set_script_cmd
 set SCRIPT_CMD=%1
 SHIFT
-goto include_vars
+goto parse_args
 
 :set_foreground_mode
 set FG=1
@@ -232,6 +246,19 @@ SHIFT
 SHIFT
 goto parse_args
 
+:set_solr_home_dir
+
+set "arg=%~2"
+set firstChar=%arg:~0,1%
+IF "%firstChar%"=="-" (
+  set SCRIPT_ERROR=Expected directory but found %2 instead!
+  goto invalid_cmd_line
+)
+set "SOLR_HOME=%~2"
+SHIFT
+SHIFT
+goto parse_args
+
 :set_example
 
 set "arg=%~2"
@@ -256,7 +283,6 @@ IF "%firstChar%"=="-" (
 )
 
 set SOLR_HEAP=%~2
-@echo SOLR_HEAP=%SOLR_HEAP%
 SHIFT
 SHIFT
 goto parse_args
@@ -287,6 +313,19 @@ SHIFT
 SHIFT
 goto parse_args
 
+:set_stop_key
+set "arg=%~2"
+set firstChar=%arg:~0,1%
+IF "%firstChar%"=="-" (
+  set SCRIPT_ERROR=Expected port but found %2 instead!
+  goto invalid_cmd_line
+)
+
+set STOP_KEY=%~2
+SHIFT
+SHIFT
+goto parse_args
+
 :set_zookeeper
 
 set "arg=%~2"
@@ -302,7 +341,6 @@ SHIFT
 goto parse_args
 
 :set_addl_opts
-
 set "arg=%~2"
 set "SOLR_ADDL_ARGS=%~2"
 SHIFT
@@ -320,7 +358,7 @@ REM Perform the requested command after 
 IF "%verbose%"=="1" (
   @echo Using Solr root directory: %SOLR_TIP%
   @echo Using Java: %JAVA%
-  %JAVA% -version
+  "%JAVA%" -version
 )
 
 IF NOT "%SOLR_HOST%"=="" (
@@ -359,6 +397,19 @@ IF "%EXAMPLE%"=="" (
 
 :start_solr
 IF "%SOLR_HOME%"=="" set "SOLR_HOME=%SOLR_SERVER_DIR%\solr"
+IF NOT EXIST "%SOLR_HOME%\" (
+  IF EXIST "%SOLR_SERVER_DIR%\%SOLR_HOME%" (
+    set "SOLR_HOME=%SOLR_SERVER_DIR%\%SOLR_HOME%"
+  ) ELSE (
+    set SCRIPT_ERROR=Solr home directory %SOLR_HOME% not found!
+    goto err
+  )
+)
+
+IF NOT EXIST "%SOLR_HOME%\solr.xml" (
+  set SCRIPT_ERROR=Solr home directory %SOLR_HOME% must contain solr.xml!
+  goto err
+)
 
 IF "%STOP_KEY%"=="" set STOP_KEY=solrrocks
 
@@ -541,7 +592,13 @@ for /l %%x in (1, 1, !CLOUD_NUM_NODES!) 
     @echo Cloning %DEFAULT_SERVER_DIR% into %SOLR_TIP%\node%%x
     xcopy /Q /E /I "%DEFAULT_SERVER_DIR%" "%SOLR_TIP%\node%%x"
   )
-  
+
+  IF NOT "!SOLR_HEAP!"=="" (
+    set "DASHM=-m !SOLR_HEAP!"
+  ) ELSE (
+    set "DASHM="
+  )
+
   IF %%x EQU 1 (
     set EXAMPLE=
     IF NOT "!ZK_HOST!"=="" (
@@ -550,8 +607,8 @@ for /l %%x in (1, 1, !CLOUD_NUM_NODES!) 
       set "DASHZ="
     )
     @echo Starting node1 on port !NODE_PORT! using command:
-    @echo solr -cloud -p !NODE_PORT! -d node1 !DASHZ!
-    START "" "%SDIR%\solr" -f -cloud -p !NODE_PORT! -d node1 !DASHZ!
+    @echo solr -cloud -p !NODE_PORT! -d node1 !DASHZ! !DASHM!
+    START "" "%SDIR%\solr" -f -cloud -p !NODE_PORT! -d node1 !DASHZ! !DASHM!
     set NODE1_PORT=!NODE_PORT!
   ) ELSE (
     IF "!ZK_HOST!"=="" (
@@ -559,8 +616,8 @@ for /l %%x in (1, 1, !CLOUD_NUM_NODES!) 
       set "ZK_HOST=localhost:!ZK_PORT!"
     )
     @echo Starting node%%x on port !NODE_PORT! using command:
-    @echo solr -cloud -p !NODE_PORT! -d node%%x -z !ZK_HOST!
-    START "" "%SDIR%\solr" -f -cloud -p !NODE_PORT! -d node%%x -z !ZK_HOST!
+    @echo solr -cloud -p !NODE_PORT! -d node%%x -z !ZK_HOST! !DASHM!
+    START "" "%SDIR%\solr" -f -cloud -p !NODE_PORT! -d node%%x -z !ZK_HOST! !DASHM!
   )
 
   timeout /T 10

Modified: lucene/dev/branches/lucene5969/solr/bin/solr.in.cmd
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/bin/solr.in.cmd?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/bin/solr.in.cmd (original)
+++ lucene/dev/branches/lucene5969/solr/bin/solr.in.cmd Wed Oct 15 01:26:26 2014
@@ -25,7 +25,7 @@ REM Increase Java Min/Max Heap as needed
 set SOLR_JAVA_MEM=-Xms512m -Xmx512m -XX:MaxPermSize=256m -XX:PermSize=256m
 
 REM Enable verbose GC logging
-set GC_LOG_OPTS=-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution
+set GC_LOG_OPTS=-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime
 
 REM These GC settings have shown to work well for a number of common Solr workloads
 set GC_TUNE=-XX:-UseSuperWord ^
@@ -34,11 +34,13 @@ set GC_TUNE=-XX:-UseSuperWord ^
  -XX:TargetSurvivorRatio=90 ^
  -XX:MaxTenuringThreshold=8 ^
  -XX:+UseConcMarkSweepGC ^
+ -XX:+UseParNewGC ^
+ -XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 ^
  -XX:+CMSScavengeBeforeRemark ^
  -XX:PretenureSizeThreshold=64m ^
  -XX:CMSFullGCsBeforeCompaction=1 ^
  -XX:+UseCMSInitiatingOccupancyOnly ^
- -XX:CMSInitiatingOccupancyFraction=70 ^
+ -XX:CMSInitiatingOccupancyFraction=50 ^
  -XX:CMSTriggerPermRatio=80 ^
  -XX:CMSMaxAbortablePrecleanTime=6000 ^
  -XX:+CMSParallelRemarkEnabled ^

Modified: lucene/dev/branches/lucene5969/solr/bin/solr.in.sh
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/bin/solr.in.sh?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/bin/solr.in.sh (original)
+++ lucene/dev/branches/lucene5969/solr/bin/solr.in.sh Wed Oct 15 01:26:26 2014
@@ -23,7 +23,7 @@ SOLR_JAVA_MEM="-Xms512m -Xmx512m -XX:Max
 
 # Enable verbose GC logging
 GC_LOG_OPTS="-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \
--XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution"
+-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime"
 
 # These GC settings have shown to work well for a number of common Solr workloads
 GC_TUNE="-XX:-UseSuperWord \
@@ -32,11 +32,13 @@ GC_TUNE="-XX:-UseSuperWord \
 -XX:TargetSurvivorRatio=90 \
 -XX:MaxTenuringThreshold=8 \
 -XX:+UseConcMarkSweepGC \
+-XX:+UseParNewGC \
+-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \
 -XX:+CMSScavengeBeforeRemark \
 -XX:PretenureSizeThreshold=64m \
 -XX:CMSFullGCsBeforeCompaction=1 \
 -XX:+UseCMSInitiatingOccupancyOnly \
--XX:CMSInitiatingOccupancyFraction=70 \
+-XX:CMSInitiatingOccupancyFraction=50 \
 -XX:CMSTriggerPermRatio=80 \
 -XX:CMSMaxAbortablePrecleanTime=6000 \
 -XX:+CMSParallelRemarkEnabled \

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/Overseer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/Overseer.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/Overseer.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/Overseer.java Wed Oct 15 01:26:26 2014
@@ -18,7 +18,11 @@ package org.apache.solr.cloud;
  */
 
 import static java.util.Collections.singletonMap;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.SLICE_UNIQUE;
 import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.COLL_PROP_PREFIX;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESLICEUNIQUE;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -26,12 +30,15 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.ListIterator;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -448,6 +455,13 @@ public class Overseer implements Closeab
           case DELETEREPLICAPROP:
             clusterState = deleteReplicaProp(clusterState, message);
             break;
+          case BALANCESLICEUNIQUE:
+            ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(this, clusterState, message);
+            if (dProp.balanceProperty()) {
+              String collName = message.getStr(ZkStateReader.COLLECTION_PROP);
+              clusterState = newState(clusterState, singletonMap(collName, dProp.getDocCollection()));
+            }
+            break;
           default:
             throw new RuntimeException("unknown operation:" + operation
                 + " contents:" + message.getProperties());
@@ -532,9 +546,10 @@ public class Overseer implements Closeab
       String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
       String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
       String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (property.startsWith(OverseerCollectionProcessor.COLL_PROP_PREFIX) == false) {
+      if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
         property = OverseerCollectionProcessor.COLL_PROP_PREFIX + property;
       }
+      property = property.toLowerCase(Locale.ROOT);
       String propVal = message.getStr(ZkStateReader.PROPERTY_VALUE_PROP);
       String sliceUnique = message.getStr(OverseerCollectionProcessor.SLICE_UNIQUE);
 
@@ -593,7 +608,7 @@ public class Overseer implements Closeab
       String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
       String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
       String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (property.startsWith(OverseerCollectionProcessor.COLL_PROP_PREFIX) == false) {
+      if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
         property = OverseerCollectionProcessor.COLL_PROP_PREFIX + property;
       }
 
@@ -934,8 +949,16 @@ public class Overseer implements Closeab
         // System.out.println("########## UPDATE MESSAGE: " + JSONUtil.toJSON(message));
         if (slice != null) {
           Replica oldReplica = slice.getReplicasMap().get(coreNodeName);
-          if (oldReplica != null && oldReplica.containsKey(ZkStateReader.LEADER_PROP)) {
-            replicaProps.put(ZkStateReader.LEADER_PROP, oldReplica.get(ZkStateReader.LEADER_PROP));
+          if (oldReplica != null) {
+            if (oldReplica.containsKey(ZkStateReader.LEADER_PROP)) {
+              replicaProps.put(ZkStateReader.LEADER_PROP, oldReplica.get(ZkStateReader.LEADER_PROP));
+            }
+            // Move custom props over.
+            for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
+              if (ent.getKey().startsWith(COLL_PROP_PREFIX)) {
+                replicaProps.put(ent.getKey(), ent.getValue());
+              }
+            }
           }
         }
 
@@ -1146,7 +1169,7 @@ public class Overseer implements Closeab
         return null;
       }
 
-      private ClusterState updateSlice(ClusterState state, String collectionName, Slice slice) {
+    ClusterState updateSlice(ClusterState state, String collectionName, Slice slice) {
         // System.out.println("###!!!### OLD CLUSTERSTATE: " + JSONUtil.toJSON(state.getCollectionStates()));
         // System.out.println("Updating slice:" + slice);
         DocCollection newCollection = null;
@@ -1374,6 +1397,307 @@ public class Overseer implements Closeab
 
   }
 
+  // Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.
+  private class ExclusiveSliceProperty {
+    private ClusterStateUpdater updater;
+    private ClusterState clusterState;
+    private final boolean onlyActiveNodes;
+    private final String property;
+    private final DocCollection collection;
+    private final String collectionName;
+
+    // Key structure. For each node, list all replicas on it regardless of whether they have the property or not.
+    private final Map<String, List<SliceReplica>> nodesHostingReplicas = new HashMap<>();
+    // Key structure. For each node, a list of the replicas _currently_ hosting the property.
+    private final Map<String, List<SliceReplica>> nodesHostingProp = new HashMap<>();
+    Set<String> shardsNeedingHosts = new HashSet<String>();
+    Map<String, Slice> changedSlices = new HashMap<>(); // Work on copies rather than the underlying cluster state.
+
+    private int origMaxPropPerNode = 0;
+    private int origModulo = 0;
+    private int tmpMaxPropPerNode = 0;
+    private int tmpModulo = 0;
+    Random rand = new Random();
+
+    private int assigned = 0;
+
+    ExclusiveSliceProperty(ClusterStateUpdater updater, ClusterState clusterState, ZkNodeProps message) {
+      this.updater = updater;
+      this.clusterState = clusterState;
+      String tmp = message.getStr(ZkStateReader.PROPERTY_PROP);
+      if (StringUtils.startsWith(tmp, OverseerCollectionProcessor.COLL_PROP_PREFIX) == false) {
+        tmp = OverseerCollectionProcessor.COLL_PROP_PREFIX + tmp;
+      }
+      this.property = tmp.toLowerCase(Locale.ROOT);
+      collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
+
+      if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(property)) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Overseer '" + message.getStr(QUEUE_OPERATION) + "'  requires both the '" + ZkStateReader.COLLECTION_PROP + "' and '" +
+                ZkStateReader.PROPERTY_PROP + "' parameters. No action taken ");
+      }
+
+      Boolean sliceUnique = Boolean.parseBoolean(message.getStr(SLICE_UNIQUE));
+      if (sliceUnique == false &&
+          Overseer.sliceUniqueBooleanProperties.contains(this.property) == false) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
+            + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'sliceUnique' be set to 'true' " +
+            " Property: " + this.property + " sliceUnique: " + Boolean.toString(sliceUnique));
+      }
+
+      collection = clusterState.getCollection(collectionName);
+      if (collection == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Could not find collection ' " + collectionName + "' for overseer operation '" +
+                message.getStr(QUEUE_OPERATION) + "'. No action taken.");
+      }
+      onlyActiveNodes = Boolean.parseBoolean(message.getStr(ONLY_ACTIVE_NODES, "true"));
+    }
+
+
+    private DocCollection getDocCollection() {
+      return collection;
+    }
+
+    private boolean isActive(Replica replica) {
+      return ZkStateReader.ACTIVE.equals(replica.getStr(ZkStateReader.STATE_PROP));
+    }
+
+    // Collect a list of all the nodes that _can_ host the indicated property. Along the way, also collect any of
+    // the replicas on that node that _already_ host the property as well as any slices that do _not_ have the
+    // property hosted.
+    //
+    // Return true if anything node needs it's property reassigned. False if the property is already balanced for
+    // the collection.
+
+    private boolean collectCurrentPropStats() {
+      int maxAssigned = 0;
+      // Get a list of potential replicas that can host the property _and_ their counts
+      // Move any obvious entries to a list of replicas to change the property on
+      Set<String> allHosts = new HashSet<>();
+      for (Slice slice : collection.getSlices()) {
+        boolean sliceHasProp = false;
+        for (Replica replica : slice.getReplicas()) {
+          if (onlyActiveNodes && isActive(replica) == false) {
+            if (StringUtils.isNotBlank(replica.getStr(property))) {
+              removeProp(slice, replica.getName()); // Note, we won't be committing this to ZK until later.
+            }
+            continue;
+          }
+          allHosts.add(replica.getNodeName());
+          String nodeName = replica.getNodeName();
+          if (StringUtils.isNotBlank(replica.getStr(property))) {
+            if (sliceHasProp) {
+              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                  "'" + BALANCESLICEUNIQUE + "' should only be called for properties that have at most one member " +
+                      "in any slice with the property set. No action taken.");
+            }
+            if (nodesHostingProp.containsKey(nodeName) == false) {
+              nodesHostingProp.put(nodeName, new ArrayList<SliceReplica>());
+            }
+            nodesHostingProp.get(nodeName).add(new SliceReplica(slice, replica));
+            ++assigned;
+            maxAssigned = Math.max(maxAssigned, nodesHostingProp.get(nodeName).size());
+            sliceHasProp = true;
+          }
+          if (nodesHostingReplicas.containsKey(nodeName) == false) {
+            nodesHostingReplicas.put(nodeName, new ArrayList<SliceReplica>());
+          }
+          nodesHostingReplicas.get(nodeName).add(new SliceReplica(slice, replica));
+        }
+      }
+
+      // If the total number of already-hosted properties assigned to nodes
+      // that have potential to host leaders is equal to the slice count _AND_ none of the current nodes has more than
+      // the max number of properties, there's nothing to do.
+      origMaxPropPerNode = collection.getSlices().size() / allHosts.size();
+
+      // Some nodes can have one more of the proeprty if the numbers aren't exactly even.
+      origModulo = collection.getSlices().size() % allHosts.size();
+      if (origModulo > 0) {
+        origMaxPropPerNode++;  // have to have some nodes with 1 more property.
+      }
+
+      // We can say for sure that we need to rebalance if we don't have as many assigned properties as slices.
+      if (assigned != collection.getSlices().size()) {
+        return true;
+      }
+
+      // Make sure there are no more slices at the limit than the "leftovers"
+      // Let's say there's 7 slices and 3 nodes. We need to distribute the property as 3 on node1, 2 on node2 and 2 on node3
+      // (3, 2, 2) We need to be careful to not distribute them as 3, 3, 1. that's what this check is all about.
+      int counter = origModulo;
+      for (List<SliceReplica> list : nodesHostingProp.values()) {
+        if (list.size() == origMaxPropPerNode) --counter;
+      }
+      if (counter == 0) return false; // nodes with 1 extra leader are exactly the needed number
+
+      return true;
+    }
+
+    private void removeSliceAlreadyHostedFromPossibles(String sliceName) {
+      for (Map.Entry<String, List<SliceReplica>> entReplica : nodesHostingReplicas.entrySet()) {
+
+        ListIterator<SliceReplica> iter = entReplica.getValue().listIterator();
+        while (iter.hasNext()) {
+          SliceReplica sr = iter.next();
+          if (sr.slice.getName().equals(sliceName))
+            iter.remove();
+        }
+      }
+    }
+
+    private void balanceUnassignedReplicas() {
+      tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
+      tmpModulo = origModulo;
+
+      // Get the nodeName and shardName for the node that has the least room for this
+
+      while (shardsNeedingHosts.size() > 0) {
+        String nodeName = "";
+        int minSize = Integer.MAX_VALUE;
+        SliceReplica srToChange = null;
+        for (String slice : shardsNeedingHosts) {
+          for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
+            // A little tricky. If we don't set this to something below, then it means all possible places to
+            // put this property are full up, so just put it somewhere.
+            if (srToChange == null && ent.getValue().size() > 0) {
+              srToChange = ent.getValue().get(0);
+            }
+            ListIterator<SliceReplica> iter = ent.getValue().listIterator();
+            while (iter.hasNext()) {
+              SliceReplica sr = iter.next();
+              if (StringUtils.equals(slice, sr.slice.getName()) == false) {
+                continue;
+              }
+              if (nodesHostingProp.containsKey(ent.getKey()) == false) {
+                nodesHostingProp.put(ent.getKey(), new ArrayList<SliceReplica>());
+              }
+              if (minSize > nodesHostingReplicas.get(ent.getKey()).size() && nodesHostingProp.get(ent.getKey()).size() < tmpMaxPropPerNode) {
+                minSize = nodesHostingReplicas.get(ent.getKey()).size();
+                srToChange = sr;
+                nodeName = ent.getKey();
+              }
+            }
+          }
+        }
+        // Now, you have a slice and node to put it on
+        shardsNeedingHosts.remove(srToChange.slice.getName());
+        if (nodesHostingProp.containsKey(nodeName) == false) {
+          nodesHostingProp.put(nodeName, new ArrayList<SliceReplica>());
+        }
+        nodesHostingProp.get(nodeName).add(srToChange);
+        adjustLimits(nodesHostingProp.get(nodeName));
+        removeSliceAlreadyHostedFromPossibles(srToChange.slice.getName());
+        addProp(srToChange.slice, srToChange.replica.getName());
+      }
+    }
+
+    // Adjust the min/max counts per allowed per node. Special handling here for dealing with the fact
+    // that no node should have more than 1 more replica with this property than any other.
+    private void adjustLimits(List<SliceReplica> changeList) {
+      if (changeList.size() == tmpMaxPropPerNode) {
+        if (tmpModulo < 0) return;
+
+        --tmpModulo;
+        if (tmpModulo == 0) {
+          --tmpMaxPropPerNode;
+          --tmpModulo;  // Prevent dropping tmpMaxPropPerNode again.
+        }
+      }
+    }
+
+    // Go through the list of presently-hosted proeprties and remove any that have too many replicas that host the property
+    private void removeOverallocatedReplicas() {
+      tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
+      tmpModulo = origModulo;
+
+      for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingProp.entrySet()) {
+        while (ent.getValue().size() > tmpMaxPropPerNode) { // remove delta nodes
+          ent.getValue().remove(rand.nextInt(ent.getValue().size()));
+        }
+        adjustLimits(ent.getValue());
+      }
+    }
+
+    private void removeProp(Slice origSlice, String replicaName) {
+      getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
+    }
+
+    private void addProp(Slice origSlice, String replicaName) {
+      getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
+    }
+
+    // Just a place to encapsulate the fact that we need to have new slices (copy) to update before we
+    // put this all in the cluster state.
+    private Replica getReplicaFromChanged(Slice origSlice, String replicaName) {
+      Slice newSlice = changedSlices.get(origSlice.getName());
+      Replica replica;
+      if (newSlice != null) {
+        replica = newSlice.getReplica(replicaName);
+      } else {
+        newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy());
+        changedSlices.put(origSlice.getName(), newSlice);
+        replica = newSlice.getReplica(replicaName);
+      }
+      if (replica == null) {
+        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Should have been able to find replica '" +
+            replicaName + "' in slice '" + origSlice.getName() + "'. No action taken");
+      }
+      return replica;
+
+    }
+    // Main entry point for carrying out the action. Returns "true" if we have actually moved properties around.
+
+    private boolean balanceProperty() {
+      if (collectCurrentPropStats() == false) {
+        return false;
+      }
+
+      // we have two lists based on nodeName
+      // 1> all the nodes that _could_ host a property for the slice
+      // 2> all the nodes that _currently_ host a property for the slice.
+
+      // So, remove a replica from the nodes that have too many
+      removeOverallocatedReplicas();
+
+      // prune replicas belonging to a slice that have the property currently assigned from the list of replicas
+      // that could host the property.
+      for (Map.Entry<String, List<SliceReplica>> entProp : nodesHostingProp.entrySet()) {
+        for (SliceReplica srHosting : entProp.getValue()) {
+          removeSliceAlreadyHostedFromPossibles(srHosting.slice.getName());
+        }
+      }
+
+      // Assemble the list of slices that do not have any replica hosting the property:
+      for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
+        ListIterator<SliceReplica> iter = ent.getValue().listIterator();
+        while (iter.hasNext()) {
+          SliceReplica sr = iter.next();
+          shardsNeedingHosts.add(sr.slice.getName());
+        }
+      }
+
+      // At this point, nodesHostingProp should contain _only_ lists of replicas that belong to slices that do _not_
+      // have any replica hosting the property. So let's assign them.
+
+      balanceUnassignedReplicas();
+      for (Slice newSlice : changedSlices.values()) {
+        clusterState = updater.updateSlice(clusterState, collectionName, newSlice);
+      }
+      return true;
+    }
+  }
+
+  private class SliceReplica {
+    private Slice slice;
+    private Replica replica;
+
+    SliceReplica(Slice slice, Replica replica) {
+      this.slice = slice;
+      this.replica = replica;
+    }
+  }
   static void getShardNames(Integer numShards, List<String> shardNames) {
     if(numShards == null)
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "numShards" + " is a required param");

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java Wed Oct 15 01:26:26 2014
@@ -23,10 +23,10 @@ import static org.apache.solr.common.clo
 import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESLICEUNIQUE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
@@ -48,6 +48,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.SynchronousQueue;
@@ -154,6 +155,8 @@ public class OverseerCollectionProcessor
 
   public static final String SLICE_UNIQUE = "sliceUnique";
 
+  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
+
   public int maxParallelThreads = 10;
 
   public static final Set<String> KNOWN_CLUSTER_PROPS = ImmutableSet.of(ZkStateReader.LEGACY_CLOUD, ZkStateReader.URL_SCHEME);
@@ -164,6 +167,18 @@ public class OverseerCollectionProcessor
       ZkStateReader.MAX_SHARDS_PER_NODE, "1",
       ZkStateReader.AUTO_ADD_REPLICAS, "false");
 
+  private static final Random RANDOM;
+  static {
+    // We try to make things reproducible in the context of our tests by initializing the random instance
+    // based on the current seed
+    String seed = System.getProperty("tests.seed");
+    if (seed == null) {
+      RANDOM = new Random();
+    } else {
+      RANDOM = new Random(seed.hashCode());
+    }
+  }
+
   public ExecutorService tpe ;
   
   private static Logger log = LoggerFactory
@@ -633,6 +648,9 @@ public class OverseerCollectionProcessor
           case DELETEREPLICAPROP:
             processReplicaDeletePropertyCommand(message);
             break;
+          case BALANCESLICEUNIQUE:
+            balanceProperty(message);
+            break;
           default:
             throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
                 + operation);
@@ -696,6 +714,21 @@ public class OverseerCollectionProcessor
     inQueue.offer(ZkStateReader.toJSON(m));
   }
 
+  private void balanceProperty(ZkNodeProps message) throws KeeperException, InterruptedException {
+    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
+              "' parameters are required for the BALANCESLICEUNIQUE operation, no action taken");
+    }
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getInQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, BALANCESLICEUNIQUE.toLower());
+    propMap.putAll(message.getProperties());
+    inQueue.offer(ZkStateReader.toJSON(new ZkNodeProps(propMap)));
+  }
+
+
   @SuppressWarnings("unchecked")
   private void getOverseerStatus(ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
     String leaderNode = getLeaderNode(zkStateReader.getZkClient());
@@ -1623,8 +1656,8 @@ public class OverseerCollectionProcessor
       Set<String> nodes = clusterState.getLiveNodes();
       List<String> nodeList = new ArrayList<>(nodes.size());
       nodeList.addAll(nodes);
-      
-      Collections.shuffle(nodeList);
+
+      Collections.shuffle(nodeList, RANDOM);
 
       // TODO: Have maxShardsPerNode param for this operation?
 
@@ -1634,7 +1667,7 @@ public class OverseerCollectionProcessor
       // TODO: change this to handle sharding a slice into > 2 sub-shards.
 
       for (int i = 1; i <= subSlices.size(); i++) {
-        Collections.shuffle(nodeList);
+        Collections.shuffle(nodeList, RANDOM);
         String sliceName = subSlices.get(i - 1);
         for (int j = 2; j <= repFactor; j++) {
           String subShardNodeName = nodeList.get((repFactor * (i - 1) + (j - 2)) % nodeList.size());
@@ -2284,7 +2317,7 @@ public class OverseerCollectionProcessor
       List<String> nodeList = new ArrayList<>(nodes.size());
       nodeList.addAll(nodes);
       if (createNodeList != null) nodeList.retainAll(createNodeList);
-      Collections.shuffle(nodeList);
+      Collections.shuffle(nodeList, RANDOM);
       
       if (nodeList.size() <= 0) {
         throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/ZkController.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/ZkController.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/ZkController.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/cloud/ZkController.java Wed Oct 15 01:26:26 2014
@@ -20,8 +20,11 @@ package org.apache.solr.cloud;
 import java.io.File;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.net.URLEncoder;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
@@ -30,6 +33,7 @@ import java.util.Collections;
 import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -41,6 +45,8 @@ import java.util.concurrent.TimeoutExcep
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.http.NoHttpResponseException;
+import org.apache.http.conn.ConnectTimeoutException;
 import org.apache.solr.client.solrj.impl.HttpSolrServer;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
 import org.apache.solr.common.SolrException;
@@ -78,6 +84,8 @@ import org.apache.zookeeper.KeeperExcept
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.KeeperException.SessionExpiredException;
 import org.apache.zookeeper.data.Stat;
+import org.noggit.JSONParser;
+import org.noggit.ObjectBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1623,6 +1631,23 @@ public final class ZkController {
               server.request(prepCmd);
               break;
             } catch (Exception e) {
+
+              // if the core container is shutdown, don't wait
+              if (cc.isShutDown()) {
+                throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+                    "Core container is shutdown.");
+              }
+
+              Throwable rootCause = SolrException.getRootCause(e);
+              if (rootCause instanceof IOException) {
+                // if there was a communication error talking to the leader, see if the leader is even alive
+                if (!zkStateReader.getClusterState().liveNodesContain(leaderProps.getNodeName())) {
+                  throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+                      "Node "+leaderProps.getNodeName()+" hosting leader for "+
+                          shard+" in "+collection+" is not live!");
+                }
+              }
+
               SolrException.log(log,
                   "There was a problem making a request to the leader", e);
               try {
@@ -1930,16 +1955,19 @@ public final class ZkController {
   }  
   
   public String getLeaderInitiatedRecoveryState(String collection, String shardId, String coreNodeName) {
-    
+    Map<String,Object> stateObj = getLeaderInitiatedRecoveryStateObject(collection, shardId, coreNodeName);
+    return (stateObj != null) ? (String)stateObj.get("state") : null;
+  }
+
+  public Map<String,Object> getLeaderInitiatedRecoveryStateObject(String collection, String shardId, String coreNodeName) {
+
     if (collection == null || shardId == null || coreNodeName == null)
       return null; // if we don't have complete data about a core in cloud mode, return null
     
     String znodePath = getLeaderInitiatedRecoveryZnodePath(collection, shardId, coreNodeName);
-    String state = null;
+    byte[] stateData = null;
     try {
-      byte[] data = zkClient.getData(znodePath, null, new Stat(), false);
-      if (data != null && data.length > 0)
-        state = new String(data, "UTF-8");
+      stateData = zkClient.getData(znodePath, null, new Stat(), false);
     } catch (NoNodeException ignoreMe) {
       // safe to ignore as this znode will only exist if the leader initiated recovery
     } catch (ConnectionLossException cle) {
@@ -1950,8 +1978,6 @@ public final class ZkController {
       // sort of safe to ignore ??? Usually these are seen when the core is going down
       // or there are bigger issues to deal with than reading this znode
       log.warn("Unable to read "+znodePath+" due to: "+see);
-    } catch (UnsupportedEncodingException e) {
-      throw new Error("JVM Does not seem to support UTF-8", e);
     } catch (Exception exc) {
       log.error("Failed to read data from znode "+znodePath+" due to: "+exc);
       if (exc instanceof SolrException) {
@@ -1961,7 +1987,22 @@ public final class ZkController {
             "Failed to read data from znodePath: "+znodePath, exc);
       }
     }
-    return state;
+
+    Map<String,Object> stateObj = null;
+    if (stateData != null && stateData.length > 0) {
+      Object parsedJson = ZkStateReader.fromJSON(stateData);
+      if (parsedJson instanceof Map) {
+        stateObj = (Map<String,Object>)parsedJson;
+      } else if (parsedJson instanceof String) {
+        // old format still in ZK
+        stateObj = new LinkedHashMap<>();
+        stateObj.put("state", (String)parsedJson);
+      } else {
+        throw new SolrException(ErrorCode.SERVER_ERROR, "Leader-initiated recovery state data is invalid! "+parsedJson);
+      }
+    }
+
+    return stateObj;
   }
   
   private void updateLeaderInitiatedRecoveryState(String collection, String shardId, String coreNodeName, String state) {
@@ -1982,14 +2023,22 @@ public final class ZkController {
       }
       return;
     }
-    
-    byte[] znodeData = null;
+
+    Map<String,Object> stateObj = null;
     try {
-      znodeData = state.getBytes("UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      throw new Error("JVM Does not seem to support UTF-8", e);
+      stateObj = getLeaderInitiatedRecoveryStateObject(collection, shardId, coreNodeName);
+    } catch (Exception exc) {
+      log.warn(exc.getMessage(), exc);
     }
+    if (stateObj == null)
+      stateObj = new LinkedHashMap<>();
+
+    stateObj.put("state", state);
+    // only update the createdBy value if its not set
+    if (stateObj.get("createdByNodeName") == null)
+      stateObj.put("createdByNodeName", String.valueOf(this.nodeName));
 
+    byte[] znodeData = ZkStateReader.toJSON(stateObj);
     boolean retryOnConnLoss = true; // be a little more robust when trying to write data
     try {
       if (zkClient.exists(znodePath, retryOnConnLoss)) {

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/core/SolrCore.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/core/SolrCore.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/core/SolrCore.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/core/SolrCore.java Wed Oct 15 01:26:26 2014
@@ -1527,8 +1527,7 @@ public final class SolrCore implements S
         // (caches take a little while to instantiate)
         final boolean useCaches = !realtime;
         final String newName = realtime ? "realtime" : "main";
-        tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), 
-                                    getSolrConfig().indexConfig, newName,
+        tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName,
                                     newReader, true, useCaches, true, directoryFactory);
 
       } else {
@@ -1539,7 +1538,7 @@ public final class SolrCore implements S
           // so that we pick up any uncommitted changes and so we don't go backwards
           // in time on a core reload
           DirectoryReader newReader = newReaderCreator.call();
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig, 
+          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), 
               (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
         } else if (solrConfig.nrtMode) {
           RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState().getIndexWriter(this);
@@ -1549,7 +1548,7 @@ public final class SolrCore implements S
           } finally {
             writer.decref();
           }
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig, 
+          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
               (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
         } else {
          // normal open that happens at startup

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java Wed Oct 15 01:26:26 2014
@@ -88,8 +88,8 @@ public class DumpRequestHandler extends 
   }
 
   @Override
-  public SolrRequestHandler getSubHandler(String path) {
-    if(subpaths !=null && subpaths.contains(path)) return this;
+  public SolrRequestHandler getSubHandler(String subPath) {
+    if(subpaths !=null && subpaths.contains(subPath)) return this;
     return null;
   }
   private List<String> subpaths;

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java Wed Oct 15 01:26:26 2014
@@ -54,6 +54,8 @@ import org.apache.solr.search.SolrReturn
 import org.apache.solr.search.SortSpec;
 import org.apache.solr.search.SyntaxError;
 import org.apache.solr.util.SolrPluginUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.Reader;
@@ -77,6 +79,8 @@ public class MoreLikeThisHandler extends
 {
   // Pattern is thread safe -- TODO? share this with general 'fl' param
   private static final Pattern splitList = Pattern.compile(",| ");
+
+  protected static Logger log = LoggerFactory.getLogger(MoreLikeThisHandler.class);
   
   @Override
   public void init(NamedList args) {
@@ -267,8 +271,7 @@ public class MoreLikeThisHandler extends
           }
         }
       } catch (ExitableDirectoryReader.ExitingReaderException ex) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "MLTHandler Request took too long during query expansion. Terminating request.");
+        log.warn( "Query: " + req.getParamString() + "; " + ex.getMessage());
       } finally {
         SolrQueryTimeoutImpl.reset();
       }

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java Wed Oct 15 01:26:26 2014
@@ -23,6 +23,7 @@ import org.apache.solr.request.SolrReque
 /**An interface for RequestHandlers need to handle all paths under its registered path
  */
 public interface NestedRequestHandler {
-
-  public SolrRequestHandler getSubHandler(String path);
+  /** Return a Requestandler to handle a subpath from the path this handler is registered.
+   */
+  public SolrRequestHandler getSubHandler(String subPath);
 }

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java Wed Oct 15 01:26:26 2014
@@ -197,7 +197,7 @@ public abstract class RequestHandlerBase
 
 
   @Override
-  public SolrRequestHandler getSubHandler(String path) {
+  public SolrRequestHandler getSubHandler(String subPath) {
     return null;
   }
 

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/SnapPuller.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/SnapPuller.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/SnapPuller.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/SnapPuller.java Wed Oct 15 01:26:26 2014
@@ -1228,7 +1228,7 @@ public class SnapPuller {
           //read the size of the packet
           int packetSize = readInt(intbytes);
           if (packetSize <= 0) {
-            LOG.warn("No content recieved for file: " + currentFile);
+            LOG.warn("No content received for file: " + currentFile);
             return NO_CONTENT;
           }
           if (buf.length < packetSize)
@@ -1496,7 +1496,7 @@ public class SnapPuller {
           //read the size of the packet
           int packetSize = readInt(intbytes);
           if (packetSize <= 0) {
-            LOG.warn("No content recieved for file: " + currentFile);
+            LOG.warn("No content received for file: " + currentFile);
             return NO_CONTENT;
           }
           if (buf.length < packetSize)

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java Wed Oct 15 01:26:26 2014
@@ -23,6 +23,7 @@ import static org.apache.solr.cloud.Over
 import static org.apache.solr.cloud.OverseerCollectionProcessor.CREATE_NODE_SET;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.SLICE_UNIQUE;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_ACTIVE_NODES;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_IF_DOWN;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.REPLICATION_FACTOR;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.REQUESTID;
@@ -37,6 +38,7 @@ import static org.apache.solr.common.clo
 import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESLICEUNIQUE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERPROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
@@ -246,7 +248,10 @@ public class CollectionsHandler extends 
         this.handleDeleteReplicaProp(req, rsp);
         break;
       }
-
+      case BALANCESLICEUNIQUE: {
+        this.handleBalanceSliceUnique(req, rsp);
+        break;
+      }
       default: {
           throw new RuntimeException("Unknown action: " + action);
       }
@@ -294,6 +299,27 @@ public class CollectionsHandler extends 
 
 
 
+  private void handleBalanceSliceUnique(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException {
+    req.getParams().required().check(COLLECTION_PROP, PROPERTY_PROP);
+    Boolean sliceUnique = Boolean.parseBoolean(req.getParams().get(SLICE_UNIQUE));
+    String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
+    if (StringUtils.startsWith(prop, OverseerCollectionProcessor.COLL_PROP_PREFIX) == false) {
+      prop = OverseerCollectionProcessor.COLL_PROP_PREFIX + prop;
+    }
+
+    if (sliceUnique == false &&
+        Overseer.sliceUniqueBooleanProperties.contains(prop) == false) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
+      + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'sliceUnique' be set to 'true'. " +
+      " Property: " + prop + " sliceUnique: " + Boolean.toString(sliceUnique));
+    }
+
+    Map<String, Object> map = ZkNodeProps.makeMap(Overseer.QUEUE_OPERATION, BALANCESLICEUNIQUE.toLower());
+    copyIfNotNull(req.getParams(), map, COLLECTION_PROP, PROPERTY_PROP, ONLY_ACTIVE_NODES, SLICE_UNIQUE);
+
+    handleResponse(BALANCESLICEUNIQUE.toLower(), new ZkNodeProps(map), rsp);
+  }
+
   private void handleOverseerStatus(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException {
     Map<String, Object> props = ZkNodeProps.makeMap(
         Overseer.QUEUE_OPERATION, OVERSEERSTATUS.toLower());

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java Wed Oct 15 01:26:26 2014
@@ -23,6 +23,7 @@ import org.apache.solr.common.util.Named
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.request.SolrRequestHandler;
 import org.apache.solr.response.SolrQueryResponse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -136,4 +137,9 @@ public class InfoHandler extends Request
   protected void setSystemInfoHandler(SystemInfoHandler systemInfoHandler) {
     this.systemInfoHandler = systemInfoHandler;
   }
+
+  @Override
+  public SolrRequestHandler getSubHandler(String subPath) {
+    return this;
+  }
 }

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java Wed Oct 15 01:26:26 2014
@@ -69,6 +69,7 @@ public class HttpShardHandlerFactory ext
   int soTimeout = 0; 
   int connectionTimeout = 0; 
   int maxConnectionsPerHost = 20;
+  int maxConnections = 10000;
   int corePoolSize = 0;
   int maximumPoolSize = Integer.MAX_VALUE;
   int keepAliveTime = 5;
@@ -122,6 +123,7 @@ public class HttpShardHandlerFactory ext
     }
     this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout);
     this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost);
+    this.maxConnections = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections);
     this.corePoolSize = getParameter(args, INIT_CORE_POOL_SIZE, corePoolSize);
     this.maximumPoolSize = getParameter(args, INIT_MAX_POOL_SIZE, maximumPoolSize);
     this.keepAliveTime = getParameter(args, MAX_THREAD_IDLE_TIME, keepAliveTime);
@@ -148,7 +150,7 @@ public class HttpShardHandlerFactory ext
 
     ModifiableSolrParams clientParams = new ModifiableSolrParams();
     clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost);
-    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000);
+    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections);
     clientParams.set(HttpClientUtil.PROP_SO_TIMEOUT, soTimeout);
     clientParams.set(HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout);
     clientParams.set(HttpClientUtil.PROP_USE_RETRY, false);

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java Wed Oct 15 01:26:26 2014
@@ -804,7 +804,8 @@ public class QueryComponent extends Sear
     boolean distribSinglePass = rb.req.getParams().getBool(ShardParams.DISTRIB_SINGLE_PASS, false);
 
     if(distribSinglePass || (fields != null && fields.wantsField(keyFieldName)
-        && fields.getRequestedFieldNames() != null && Arrays.asList(keyFieldName, "score").containsAll(fields.getRequestedFieldNames()))) {
+        && fields.getRequestedFieldNames() != null  
+        && (!fields.hasPatternMatching() && Arrays.asList(keyFieldName, "score").containsAll(fields.getRequestedFieldNames())))) {
       sreq.purpose |= ShardRequest.PURPOSE_GET_FIELDS;
       rb.onePassDistributedQuery = true;
     }

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java Wed Oct 15 01:26:26 2014
@@ -244,8 +244,6 @@ public class SearchHandler extends Reque
         }
       } catch (ExitableDirectoryReader.ExitingReaderException ex) {
         log.warn( "Query: " + req.getParamString() + "; " + ex.getMessage());
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Request took too long during query expansion. Terminating request.");
       } finally {
         SolrQueryTimeoutImpl.reset();
       }

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java Wed Oct 15 01:26:26 2014
@@ -25,6 +25,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.params.SolrParams;
@@ -191,7 +192,7 @@ public class JsonLoader extends ContentS
 
     private void handleSplitMode(String split, String[] fields) throws IOException {
       if(split == null) split = "/";
-      if(fields == null || fields.length ==0) fields = new String[]{"/**"};
+      if(fields == null || fields.length ==0) fields = new String[]{"$FQN:/**"};
       final boolean echo = "true".equals( req.getParams().get("echo"));
       JsonRecordReader jsonRecordReader = JsonRecordReader.getInst(split, Arrays.asList(fields));
       jsonRecordReader.streamRecords(parser,new JsonRecordReader.Handler() {
@@ -222,7 +223,7 @@ public class JsonLoader extends ContentS
       });
     }
 
-    private void handleStreamingSingleDocs() throws IOException
+    /*private void handleStreamingSingleDocs() throws IOException
     {
       while( true ) {
         int ev = parser.nextEvent();
@@ -240,7 +241,7 @@ public class JsonLoader extends ContentS
           throw  new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected event :"+ev);
         }
       }
-    }
+    }*/
 
     //
     // "delete":"id"

Modified: lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/schema/SchemaManager.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/schema/SchemaManager.java?rev=1631928&r1=1631927&r2=1631928&view=diff
==============================================================================
--- lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/schema/SchemaManager.java (original)
+++ lucene/dev/branches/lucene5969/solr/core/src/java/org/apache/solr/schema/SchemaManager.java Wed Oct 15 01:26:26 2014
@@ -24,29 +24,22 @@ import org.apache.solr.core.CoreDescript
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.rest.BaseSolrResource;
-import org.noggit.JSONParser;
-import org.noggit.ObjectBuilder;
+import org.apache.solr.util.CommandOperation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.io.Reader;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import static java.util.Collections.EMPTY_LIST;
 import static java.util.Collections.EMPTY_MAP;
-import static java.util.Collections.emptyMap;
 import static java.util.Collections.singletonList;
 import static java.util.Collections.singletonMap;
-import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
 import static org.apache.solr.schema.FieldType.CLASS_NAME;
 import static org.apache.solr.schema.IndexSchema.DESTINATION;
 import static org.apache.solr.schema.IndexSchema.NAME;
@@ -87,20 +80,20 @@ public class SchemaManager {
    * @return Lis of errors . If the List is empty then the operation is successful.
    */
   public List performOperations(Reader rdr)  {
-    List<Operation> ops = null;
+    List<CommandOperation> ops = null;
     try {
-      ops = SchemaManager.parse(rdr);
+      ops = CommandOperation.parse(rdr);
     } catch (Exception e) {
       String msg= "Error parsing schema operations ";
       log.warn(msg  ,e );
-      return Collections.singletonList(singletonMap(ERR_MSGS, msg + ":" + e.getMessage()));
+      return Collections.singletonList(singletonMap(CommandOperation.ERR_MSGS, msg + ":" + e.getMessage()));
     }
-    List errs = captureErrors(ops);
+    List errs = CommandOperation.captureErrors(ops);
     if(!errs.isEmpty()) return errs;
 
     IndexSchema schema = req.getCore().getLatestSchema();
     if (!(schema instanceof ManagedIndexSchema)) {
-      return singletonList( singletonMap(ERR_MSGS,"schema is not editable"));
+      return singletonList( singletonMap(CommandOperation.ERR_MSGS,"schema is not editable"));
     }
 
     synchronized (schema.getSchemaUpdateLock()) {
@@ -109,14 +102,14 @@ public class SchemaManager {
 
   }
 
-  private List<String> doOperations(List<Operation> operations){
+  private List doOperations(List<CommandOperation> operations){
     int timeout = req.getParams().getInt(BaseSolrResource.UPDATE_TIMEOUT_SECS, -1);
     long startTime = System.nanoTime();
     long endTime = timeout >0  ? System.nanoTime()+ (timeout * 1000*1000) : Long.MAX_VALUE;
     SolrCore core = req.getCore();
     for(;System.nanoTime() < endTime ;) {
       managedIndexSchema = (ManagedIndexSchema) core.getLatestSchema();
-      for (Operation op : operations) {
+      for (CommandOperation op : operations) {
         if (ADD_FIELD.equals(op.name) || ADD_DYNAMIC_FIELD.equals(op.name)) {
           applyAddField(op);
         } else if(ADD_COPY_FIELD.equals(op.name)) {
@@ -128,7 +121,7 @@ public class SchemaManager {
           op.addError("No such operation : " + op.name);
         }
       }
-      List errs = captureErrors(operations);
+      List errs = CommandOperation.captureErrors(operations);
       if (!errs.isEmpty()) return errs;
 
       try {
@@ -169,13 +162,13 @@ public class SchemaManager {
     }
   }
 
-  private boolean applyAddType(Operation op) {
+  private boolean applyAddType(CommandOperation op) {
     String name = op.getStr(NAME);
     String clz = op.getStr(CLASS_NAME);
     if(op.hasError())
       return false;
     try {
-      FieldType fieldType = managedIndexSchema.newFieldType(name, clz, (Map<String, ?>) op.commandData);
+      FieldType fieldType = managedIndexSchema.newFieldType(name, clz, op.getDataMap());
       managedIndexSchema = managedIndexSchema.addFieldTypes(singletonList(fieldType), false);
       return true;
     } catch (Exception e) {
@@ -184,7 +177,7 @@ public class SchemaManager {
     }
   }
 
-  private String getErrorStr(Exception e) {
+  public static String getErrorStr(Exception e) {
     StringBuilder sb = new StringBuilder();
     Throwable cause= e;
     for(int i =0;i<5;i++) {
@@ -195,7 +188,7 @@ public class SchemaManager {
     return sb.toString();
   }
 
-  private boolean applyAddCopyField(Operation op) {
+  private boolean applyAddCopyField(CommandOperation op) {
     String src  = op.getStr(SOURCE);
     List<String> dest = op.getStrs(DESTINATION);
     if(op.hasError())
@@ -210,7 +203,7 @@ public class SchemaManager {
   }
 
 
-  private boolean applyAddField( Operation op) {
+  private boolean applyAddField( CommandOperation op) {
     String name = op.getStr(NAME);
     String type = op.getStr(TYPE);
     if(op.hasError())
@@ -238,142 +231,4 @@ public class SchemaManager {
     return true;
   }
 
-
-  public static class Operation {
-    public final String name;
-    private Object commandData;//this is most often a map
-    private List<String> errors = new ArrayList<>();
-
-    Operation(String operationName, Object metaData) {
-      commandData = metaData;
-      this.name = operationName;
-      if(!KNOWN_OPS.contains(this.name)) errors.add("Unknown Operation :"+this.name);
-    }
-
-    public String getStr(String key, String def){
-      String s = (String) getMapVal(key);
-      return s == null ? def : s;
-    }
-
-    private Object getMapVal(String key) {
-      if (commandData instanceof Map) {
-        Map metaData = (Map) commandData;
-        return metaData.get(key);
-      } else {
-        String msg= " value has to be an object for operation :"+name;
-        if(!errors.contains(msg)) errors.add(msg);
-        return null;
-      }
-    }
-
-    public List<String> getStrs(String key){
-      List<String> val = getStrs(key, null);
-      if(val == null) errors.add("'"+key + "' is a required field");
-      return val;
-
-    }
-
-    /**Get collection of values for a key. If only one val is present a
-     * single value collection is returned
-     */
-    public List<String> getStrs(String key, List<String> def){
-      Object v = getMapVal(key);
-      if(v == null){
-        return def;
-      } else {
-        if (v instanceof List) {
-          ArrayList<String> l =  new ArrayList<>();
-          for (Object o : (List)v) {
-            l.add(String.valueOf(o));
-          }
-          if(l.isEmpty()) return def;
-          return  l;
-        } else {
-          return singletonList(String.valueOf(v));
-        }
-      }
-
-    }
-
-    /**Get a required field. If missing it adds to the errors
-     */
-    public String getStr(String key){
-      String s = getStr(key,null);
-      if(s==null) errors.add("'"+key + "' is a required field");
-      return s;
-    }
-
-    private Map errorDetails(){
-       return makeMap(name, commandData, ERR_MSGS, errors);
-    }
-
-    public boolean hasError() {
-      return !errors.isEmpty();
-    }
-
-    public void addError(String s) {
-      errors.add(s);
-    }
-
-    /**Get all the values from the metadata for the command
-     * without the specified keys
-     */
-    public Map getValuesExcluding(String... keys) {
-      getMapVal(null);
-      if(hasError()) return emptyMap();//just to verify the type is Map
-      LinkedHashMap<String, Object> cp = new LinkedHashMap<>((Map<String,?>) commandData);
-      if(keys == null) return cp;
-      for (String key : keys) {
-        cp.remove(key);
-      }
-      return cp;
-    }
-
-
-    public List<String> getErrors() {
-      return errors;
-    }
-  }
-
-  /**Parse the command operations into command objects
-   */
-  static List<Operation> parse(Reader rdr ) throws IOException {
-    JSONParser parser = new JSONParser(rdr);
-
-    ObjectBuilder ob = new ObjectBuilder(parser);
-
-    if(parser.lastEvent() != JSONParser.OBJECT_START) {
-      throw new RuntimeException("The JSON must be an Object of the form {\"command\": {...},...");
-    }
-    List<Operation> operations = new ArrayList<>();
-    for(;;) {
-      int ev = parser.nextEvent();
-      if (ev==JSONParser.OBJECT_END) return operations;
-      Object key =  ob.getKey();
-      ev = parser.nextEvent();
-      Object val = ob.getVal();
-      if (val instanceof List) {
-        List list = (List) val;
-        for (Object o : list) {
-          operations.add(new Operation(String.valueOf(key), o));
-        }
-      } else {
-        operations.add(new Operation(String.valueOf(key), val));
-      }
-    }
-
-  }
-
-  static List<Map> captureErrors(List<Operation> ops){
-    List<Map> errors = new ArrayList<>();
-    for (SchemaManager.Operation op : ops) {
-      if(op.hasError()) {
-        errors.add(op.errorDetails());
-      }
-    }
-    return errors;
-  }
-  public static final String ERR_MSGS = "errorMessages";
-
-
 }



Mime
View raw message