activemq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From andytay...@apache.org
Subject [48/51] [partial] activemq-6 git commit: ACTIVEMQ6-2 Update to HQ master
Date Tue, 11 Nov 2014 11:01:18 GMT
http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/clustered/hornetq-users.xml
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/clustered/hornetq-users.xml b/distribution/hornetq/src/main/resources/config/trunk/clustered/hornetq-users.xml
deleted file mode 100644
index 934306c..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/clustered/hornetq-users.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<configuration xmlns="urn:hornetq" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-            xsi:schemaLocation="urn:hornetq /schema/hornetq-users.xsd">
-   <!-- the default user.  this is used where username is null-->
-   <defaultuser name="guest" password="guest">
-      <role name="guest"/>
-   </defaultuser>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/clustered/jndi.properties
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/clustered/jndi.properties b/distribution/hornetq/src/main/resources/config/trunk/clustered/jndi.properties
deleted file mode 100644
index e2a9832..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/clustered/jndi.properties
+++ /dev/null
@@ -1,2 +0,0 @@
-java.naming.factory.initial=org.jnp.interfaces.NamingContextFactory
-java.naming.factory.url.pkgs=org.jboss.naming:org.jnp.interfaces
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/clustered/logging.properties
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/clustered/logging.properties b/distribution/hornetq/src/main/resources/config/trunk/clustered/logging.properties
deleted file mode 100644
index dd49ead..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/clustered/logging.properties
+++ /dev/null
@@ -1,34 +0,0 @@
-############################################################
-#  	Default Logging Configuration File
-#
-# You can use a different file by specifying a filename
-# with the java.util.logging.config.file system property.
-# For example java -Djava.util.logging.config.file=myfile
-############################################################
-
-############################################################
-#  	Global properties
-############################################################
-
-# "handlers" specifies a comma separated list of log Handler
-# classes.  These handlers will be installed during VM startup.
-# Note that these classes must be on the system classpath.
-# By default we only configure a ConsoleHandler, which will only
-# show messages at the INFO and above levels.
-handlers=java.util.logging.ConsoleHandler,java.util.logging.FileHandler
-java.util.logging.ConsoleHandler.formatter=org.hornetq.integration.logging.HornetQLoggerFormatter
-java.util.logging.FileHandler.level=INFO
-java.util.logging.FileHandler.pattern=logs/hornetq.log
-java.util.logging.FileHandler.formatter=org.hornetq.integration.logging.HornetQLoggerFormatter
-# Default global logging level.
-# This specifies which kinds of events are logged across
-# all loggers.  For any given facility this global level
-# can be overriden by a facility specific level
-# Note that the ConsoleHandler also has a separate level
-# setting to limit messages printed to the console.
-.level= INFO
-
-############################################################
-# Handler specific properties.
-# Describes specific configuration info for Handlers.
-############################################################

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-beans.xml
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-beans.xml b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-beans.xml
deleted file mode 100644
index 195019f..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-beans.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<deployment xmlns="urn:jboss:bean-deployer:2.0">
-
-   <bean name="Naming" class="org.jnp.server.NamingBeanImpl"/>
-
-   <!-- JNDI server. Disable this if you don't want JNDI -->
-   <bean name="JNDIServer" class="org.jnp.server.Main">
-      <property name="namingInfo">
-         <inject bean="Naming"/>
-      </property>
-      <property name="port">1099</property>
-      <property name="bindAddress">localhost</property>
-      <property name="rmiPort">1098</property>
-      <property name="rmiBindAddress">localhost</property>
-   </bean>
-   
-   <!-- MBean server -->
-   <bean name="MBeanServer" class="javax.management.MBeanServer">
-      <constructor factoryClass="java.lang.management.ManagementFactory"
-                   factoryMethod="getPlatformMBeanServer"/>
-   </bean> 
-
-   <!-- The core configuration -->
-   <bean name="Configuration" class="org.hornetq.core.config.impl.FileConfiguration">
-   </bean>
-
-	<!-- The security manager -->
-   <bean name="HornetQSecurityManager" class="org.hornetq.spi.core.security.HornetQSecurityManagerImpl">
-      <start ignored="true"/>
-      <stop ignored="true"/>
-   </bean>
-
-	<!-- The core server -->
-   <bean name="HornetQServer" class="org.hornetq.core.server.impl.HornetQServerImpl">
-      <constructor>
-         <parameter>
-            <inject bean="Configuration"/>
-         </parameter>
-         <parameter>
-            <inject bean="MBeanServer"/>
-         </parameter>
-         <parameter>
-            <inject bean="HornetQSecurityManager"/>
-         </parameter>        
-      </constructor>
-      <start ignored="true"/>
-      <stop ignored="true"/>
-   </bean>
-   
-   <!-- The JMS server -->
-   <bean name="JMSServerManager" class="org.hornetq.jms.server.impl.JMSServerManagerImpl">
-      <constructor>         
-         <parameter>
-            <inject bean="HornetQServer"/>
-         </parameter>         
-      </constructor>
-   </bean>
-
-</deployment>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-configuration.xml
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-configuration.xml b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-configuration.xml
deleted file mode 100644
index d6788f3..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-configuration.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<configuration xmlns="urn:hornetq"
-	       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	       xsi:schemaLocation="urn:hornetq /schema/hornetq-configuration.xsd">
-	
-	<journal-min-files>10</journal-min-files>
-	
-	<connectors>
-		<connector name="netty">
-			<factory-class>org.hornetq.core.remoting.impl.netty.NettyConnectorFactory</factory-class>
-			<param key="host"  value="${hornetq.remoting.netty.host:localhost}"/>
-			<param key="port"  value="${hornetq.remoting.netty.port:5445}"/>
-		</connector>
-		
-		<connector name="netty-throughput">
-            <factory-class>org.hornetq.core.remoting.impl.netty.NettyConnectorFactory</factory-class>
-            <param key="host"  value="${hornetq.remoting.netty.host:localhost}"/>
-            <param key="port"  value="${hornetq.remoting.netty.port:5455}"/>
-            <param key="batch-delay" value="50"/>
-      </connector>
-	</connectors>
-	
-	<acceptors>
-		<acceptor name="netty">
-			<factory-class>org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory</factory-class>
-			<param key="host"  value="${hornetq.remoting.netty.host:localhost}"/>
-			<param key="port"  value="${hornetq.remoting.netty.port:5445}"/>
-		</acceptor>
-		
-        <acceptor name="netty-throughput">
-            <factory-class>org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory</factory-class>
-            <param key="host"  value="${jboss.bind.address:localhost}"/>
-            <param key="port"  value="${hornetq.remoting.netty.port:5455}"/>
-            <param key="batch-delay" value="50"/>
-            <param key="direct-deliver" value="false"/>
-        </acceptor>
-	</acceptors>
-	
-	<security-settings>
-		<security-setting match="#">
-			<permission type="createNonDurableQueue" roles="guest"/>
-			<permission type="deleteNonDurableQueue" roles="guest"/>
-			<permission type="consume" roles="guest"/>
-			<permission type="send" roles="guest"/>
-		</security-setting>
-	</security-settings>
-	
-    <address-settings>
-       <!--default for catch all-->
-       <address-setting match="#">
-          <dead-letter-address>jms.queue.DLQ</dead-letter-address>
-          <expiry-address>jms.queue.ExpiryQueue</expiry-address>
-          <redelivery-delay>0</redelivery-delay>
-          <max-size-bytes>10485760</max-size-bytes>       
-          <message-counter-history-day-limit>10</message-counter-history-day-limit>
-          <address-full-policy>BLOCK</address-full-policy>
-       </address-setting>
-    </address-settings>
-	
-</configuration>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-jms.xml
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-jms.xml b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-jms.xml
deleted file mode 100644
index 3a3dbeb..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-jms.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<configuration xmlns="urn:hornetq"
-	       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	       xsi:schemaLocation="urn:hornetq /schema/hornetq-jms.xsd">
-	
-	<connection-factory name="ConnectionFactory">
-	   <connectors>
-         <connector-ref connector-name="netty"/>
-	   </connectors>
-		<entries>
-			<entry name="/ConnectionFactory"/>
-			<entry name="/XAConnectionFactory"/>
-		</entries>
-	</connection-factory>
-	
-	<!--
-	<connection-factory name="NettyThroughputConnectionFactory">
-	   <connectors>
-         <connector-ref connector-name="netty-throughput"/>
-	   </connectors>
-		<entries>
-			<entry name="/ThroughputConnectionFactory"/>
-			<entry name="/XAThroughputConnectionFactory"/>
-		</entries>
-	</connection-factory>
-	-->
-	
-	<queue name="DLQ">
-		<entry name="/queue/DLQ"/>
-	</queue>
-	<queue name="ExpiryQueue">
-		<entry name="/queue/ExpiryQueue"/>
-	</queue>   
-	<queue name="ExampleQueue">
-		<entry name="/queue/ExampleQueue"/>
-	</queue>
-	<topic name="ExampleTopic">
-		<entry name="/topic/ExampleTopic"/>
-	</topic>
-	
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-users.xml
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-users.xml b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-users.xml
deleted file mode 100644
index 934306c..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/hornetq-users.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<configuration xmlns="urn:hornetq" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-            xsi:schemaLocation="urn:hornetq /schema/hornetq-users.xsd">
-   <!-- the default user.  this is used where username is null-->
-   <defaultuser name="guest" password="guest">
-      <role name="guest"/>
-   </defaultuser>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/jndi.properties
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/jndi.properties b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/jndi.properties
deleted file mode 100644
index e2a9832..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/jndi.properties
+++ /dev/null
@@ -1,2 +0,0 @@
-java.naming.factory.initial=org.jnp.interfaces.NamingContextFactory
-java.naming.factory.url.pkgs=org.jboss.naming:org.jnp.interfaces
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/config/trunk/non-clustered/logging.properties
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/logging.properties b/distribution/hornetq/src/main/resources/config/trunk/non-clustered/logging.properties
deleted file mode 100644
index 00f9c3d..0000000
--- a/distribution/hornetq/src/main/resources/config/trunk/non-clustered/logging.properties
+++ /dev/null
@@ -1,38 +0,0 @@
-############################################################
-#  	Default Logging Configuration File
-#
-# You can use a different file by specifying a filename
-# with the java.util.logging.config.file system property.
-# For example java -Djava.util.logging.config.file=myfile
-############################################################
-
-############################################################
-#  	Global properties
-############################################################
-
-# "handlers" specifies a comma separated list of log Handler
-# classes.  These handlers will be installed during VM startup.
-# Note that these classes must be on the system classpath.
-# By default we only configure a ConsoleHandler, which will only
-# show messages at the INFO and above levels.
-handlers=java.util.logging.ConsoleHandler,java.util.logging.FileHandler
-java.util.logging.ConsoleHandler.formatter=org.hornetq.integration.logging.HornetQLoggerFormatter
-java.util.logging.FileHandler.level=INFO
-java.util.logging.FileHandler.formatter=org.hornetq.integration.logging.HornetQLoggerFormatter
-# cycle through 10 files of 20MiB max which append logs
-java.util.logging.FileHandler.count=10
-java.util.logging.FileHandler.limit=20971520
-java.util.logging.FileHandler.append=true
-java.util.logging.FileHandler.pattern=logs/hornetq.%g.log
-# Default global logging level.
-# This specifies which kinds of events are logged across
-# all loggers.  For any given facility this global level
-# can be overriden by a facility specific level
-# Note that the ConsoleHandler also has a separate level
-# setting to limit messages printed to the console.
-.level= INFO
-
-############################################################
-# Handler specific properties.
-# Describes specific configuration info for Handlers.
-############################################################

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/hornetq/src/main/resources/examples/common/config/ant.properties
----------------------------------------------------------------------
diff --git a/distribution/hornetq/src/main/resources/examples/common/config/ant.properties b/distribution/hornetq/src/main/resources/examples/common/config/ant.properties
deleted file mode 100644
index 88ef1a7..0000000
--- a/distribution/hornetq/src/main/resources/examples/common/config/ant.properties
+++ /dev/null
@@ -1,4 +0,0 @@
-hornetq.example.logserveroutput=true
-hornetq.jars.dir=${imported.basedir}/../../lib
-jars.dir=${imported.basedir}/../../lib
-aio.library.path=${imported.basedir}/../../bin
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/jboss-mc/pom.xml
----------------------------------------------------------------------
diff --git a/distribution/jboss-mc/pom.xml b/distribution/jboss-mc/pom.xml
deleted file mode 100644
index 1a3fc0a..0000000
--- a/distribution/jboss-mc/pom.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-   <modelVersion>4.0.0</modelVersion>
-
-
-   <parent>
-      <groupId>org.hornetq</groupId>
-      <artifactId>hornetq-distribution</artifactId>
-      <version>2.5.0-SNAPSHOT</version>
-   </parent>
-
-   <artifactId>jboss-mc</artifactId>
-   <packaging>jar</packaging>
-   <name>JBoss Microcontainer jar</name>
-
-   <dependencies>
-      <!--<dependency>
-          <groupId>org.jboss.logging</groupId>
-          <artifactId>jboss-logging-spi</artifactId>
-      </dependency>-->
-      <dependency>
-          <groupId>org.jboss.microcontainer</groupId>
-          <artifactId>jboss-kernel</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>org.jboss.microcontainer</groupId>
-          <artifactId>jboss-dependency</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>org.jboss</groupId>
-          <artifactId>jboss-reflect</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>org.jboss</groupId>
-          <artifactId>jboss-common-core</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>org.jboss</groupId>
-          <artifactId>jboss-mdr</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>org.jboss</groupId>
-          <artifactId>jbossxb</artifactId>
-      </dependency>
-      <dependency>
-          <groupId>sun-jaxb</groupId>
-          <artifactId>jaxb-api</artifactId>
-      </dependency>
-      <dependency>
-            <groupId>org.jboss.logging</groupId>
-            <artifactId>jboss-logging</artifactId>
-        </dependency>
-      <dependency>
-            <groupId>org.jboss.logmanager</groupId>
-            <artifactId>jboss-logmanager</artifactId>
-        </dependency>
-   </dependencies>
-
-   <build>
-      <resources>
-         <resource>
-            <directory>src/main/resources</directory>
-            <filtering>true</filtering>
-         </resource>
-      </resources>
-      <plugins>
-         <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-shade-plugin</artifactId>
-            <executions>
-               <execution>
-                  <phase>package</phase>
-                  <goals>
-                     <goal>shade</goal>
-                  </goals>
-                  <configuration>
-                     <artifactSet>
-                        <excludes>
-                           <exclude>org.jboss.netty:netty</exclude>
-                           <exclude>org.jboss.logging:jboss-logging-spi</exclude>
-                        </excludes>
-                     </artifactSet>
-                     <filters>
-                        <!--<filter>
-                           <artifact>org.jboss.logging:jboss-logging-spi</artifact>
-                        </filter>-->
-                        <filter>
-                           <artifact>org.jboss.microcontainer:jboss-kernel</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss.microcontainer:jboss-dependency</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss:jboss-reflect</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss:jboss-common-core</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss:jboss-mdr</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss:jbossxb</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>sun-jaxb:jaxb-api</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss.logging:jboss-logging</artifact>
-                        </filter>
-                        <filter>
-                           <artifact>org.jboss.logmanager:jboss-logmanager</artifact>
-                        </filter>
-                     </filters>
-                  </configuration>
-               </execution>
-
-            </executions>
-         </plugin>
-      </plugins>
-   </build>
-
-</project>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/distribution/pom.xml
----------------------------------------------------------------------
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 498d761..538e685 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -34,7 +34,6 @@
 
    <modules>
       <module>jnp-client</module>
-      <module>jboss-mc</module>
       <module>hornetq</module>
    </modules>
 

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/quickstart-guide/en/running.xml
----------------------------------------------------------------------
diff --git a/docs/quickstart-guide/en/running.xml b/docs/quickstart-guide/en/running.xml
index 39f8797..de0ba4f 100644
--- a/docs/quickstart-guide/en/running.xml
+++ b/docs/quickstart-guide/en/running.xml
@@ -26,21 +26,21 @@
    <section id="running.standalone">
       <title>Standalone HornetQ</title>
       <para>To run a stand-alone server, open up a shell or command prompt and navigate into the
-            <literal>bin</literal> directory. Then execute <literal>./run.sh</literal> (or <literal
-            >run.bat</literal> on Windows) and you should see the following output </para>
+            <literal>bin</literal> directory. Then execute <literal>./hornetq run</literal> (or <literal
+            >./hornetq.cmd run</literal> on Windows) and you should see the following output </para>
       <programlisting>
-         bin$ ./run.sh
-         
-         15:05:54,108 INFO  @main [HornetQBootstrapServer] Starting HornetQ server
+         bin$ ./hornetq run
+
+         11:05:06,589 INFO  [org.hornetq.integration.bootstrap] HQ101000: Starting HornetQ Server
          ...
-         15:06:02,566 INFO  @main [HornetQServerImpl] HornetQ Server version 
-         2.0.0.CR3 (yellowjacket, 111) started
+         11:05:10,848 INFO  [org.hornetq.core.server] HQ221001: HornetQ Server version 2.5.0.SNAPSHOT (Wild Hornet, 125) [e32ae252-52ee-11e4-a716-7785dc3013a3]
       </programlisting>
       <para>HornetQ is now running.</para>
       <para>Both the run and the stop scripts use the config under <literal
-            >config/stand-alone/non-clustered</literal> by default. The configuration can be changed
-         by running <literal>./run.sh ../config/stand-alone/clustered</literal> or another config of
-         your choosing. This is the same for the stop script and the windows bat files.</para>
+            >config/non-clustered</literal> by default. The configuration can be changed
+         by running <literal>./hornetq run xml:../config/non-clustered/bootstrap.xml</literal> or another config of
+         your choosing.</para>
+      <para>The server can be stopped by running <literal>./hornetq stop</literal></para>
    </section>
    <section id="running.jboss.Wildfly">
       <title>HornetQ In Wildfly</title>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/HornetQ_User_Manual.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/HornetQ_User_Manual.xml b/docs/user-manual/en/HornetQ_User_Manual.xml
index 3ddf954..a726cfc 100644
--- a/docs/user-manual/en/HornetQ_User_Manual.xml
+++ b/docs/user-manual/en/HornetQ_User_Manual.xml
@@ -36,6 +36,7 @@
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="persistence.xml"/>
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="configuring-transports.xml"/>
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="connection-ttl.xml"/>
+  <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="slow-consumers.xml"/>
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="transaction-config.xml"/>
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="flow-control.xml"/>
   <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="send-guarantees.xml"/>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/clusters.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/clusters.xml b/docs/user-manual/en/clusters.xml
index 25e79b2..2dd1896 100644
--- a/docs/user-manual/en/clusters.xml
+++ b/docs/user-manual/en/clusters.xml
@@ -594,11 +594,13 @@ ClientSession session = factory.createSession();</programlisting>
                shows all the available configuration options</para>
             <itemizedlist>
                 <listitem id="clusters.address">
-                    <para><literal>address</literal>. Each cluster connection only applies to
-                        messages sent to an address that starts with this value. Note: this does
-                        not use wild-card matching.</para>
-                    <para>In this case, this cluster connection will load balance messages sent to
-                        address that start with <literal>jms</literal>. This cluster connection,
+                    <para><literal>address</literal> Each cluster connection only applies to addresses that match the
+                        specified address field.  An address is matched on the cluster connection when it begins with the
+                        string specified in this field.  The address field on a cluster connection also supports comma
+                        separated lists and an exclude syntax '!'.  To prevent an address from being matched on this
+                        cluster connection, prepend a cluster connection address string with '!'.</para>
+                    <para>In the case shown above the cluster connection will load balance messages sent to
+                        addresses that start with <literal>jms</literal>. This cluster connection,
                         will, in effect apply to all JMS queues and topics since they map to core
                         queues that start with the substring "jms".</para>
                     <para>The address can be any value and you can have many cluster connections
@@ -611,6 +613,24 @@ ClientSession session = factory.createSession();</programlisting>
                         values of <literal>address</literal>, e.g. "europe" and "europe.news" since
                         this could result in the same messages being distributed between more than
                         one cluster connection, possibly resulting in duplicate deliveries.</para>
+                   <para>
+                      Examples:
+                      <itemizedlist>
+                         <listitem><literal>'jms.eu'</literal> matches all addresses starting with 'jms.eu'</listitem>
+                         <listitem><literal>'!jms.eu'</literal>  matches all address except for those starting with
+                             'jms.eu'</listitem>
+                         <listitem><literal>'jms.eu.uk,jms.eu.de'</literal>  matches all addresses starting with either
+                             'jms.eu.uk' or 'jms.eu.de'</listitem>
+                         <listitem><literal>'jms.eu,!jms.eu.uk'</literal>  matches all addresses starting with 'jms.eu'
+                              but not those starting with 'jms.eu.uk'</listitem>
+                      </itemizedlist>
+                      Notes:
+                      <itemizedlist>
+                         <listitem>Address exclusion will always takes precedence over address inclusion.</listitem>
+                         <listitem>Address matching on cluster connections does not support wild-card matching.
+                         </listitem>
+                      </itemizedlist>
+                   </para>
                     <para>This parameter is mandatory.</para>
                 </listitem>
                 <listitem>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/configuring-transports.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/configuring-transports.xml b/docs/user-manual/en/configuring-transports.xml
index 066e783..d2f4143 100644
--- a/docs/user-manual/en/configuring-transports.xml
+++ b/docs/user-manual/en/configuring-transports.xml
@@ -177,10 +177,10 @@ etc</programlisting>
             Java IO, or NIO (non-blocking), also to use straightforward TCP sockets, SSL, or to
             tunnel over HTTP or HTTPS..</para>
         <para>We believe this caters for the vast majority of transport requirements.</para>
-        <section>
+        <section id="configuring-transports.single-port">
             <title>Single Port Support</title>
             <para>As of version 2.4 HornetQ now supports using a single port for all protocols, HornetQ will automatically
-            detect which protocol is being used CORE, AMQP or STOMP and use the appropriate HornetQ handler. It will also detect
+            detect which protocol is being used CORE, AMQP, STOMP or OPENWIRE and use the appropriate HornetQ handler. It will also detect
             whether protocols such as HTTP or Web Sockets are being used and also use the appropriate decoders</para>
             <para>It is possible to limit which protocols are supported by using the <literal>protocols</literal> parameter
             on the Acceptor like so:</para>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/core-bridges.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/core-bridges.xml b/docs/user-manual/en/core-bridges.xml
index 9004e7f..822510a 100644
--- a/docs/user-manual/en/core-bridges.xml
+++ b/docs/user-manual/en/core-bridges.xml
@@ -197,11 +197,13 @@
                     connection used to forward messages to the target node. This attribute is
                     described in section <xref linkend="client-reconnection"/></para>
 
-                 <warning><para>When using the bridge to forward messages from a queue which has a
-                    max-size-bytes set it's important that confirmation-window-size is less than
-                    or equal to <literal>max-size-bytes</literal> to prevent the flow of
-                    messages from ceasing.</para>
-                 </warning>
+                <warning><para>When using the bridge to forward messages to an address which uses
+                    the <literal>BLOCK</literal> <literal>address-full-policy</literal> from a
+                    queue which has a <literal>max-size-bytes</literal> set it's important that
+                    <literal>confirmation-window-size</literal> is less than or equal to
+                    <literal>max-size-bytes</literal> to prevent the flow of messages from
+                    ceasing.</para>
+                </warning>
 
             </listitem>
             <listitem>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/diagrams/ha-colocated.odg
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/diagrams/ha-colocated.odg b/docs/user-manual/en/diagrams/ha-colocated.odg
new file mode 100644
index 0000000..e464bb7
Binary files /dev/null and b/docs/user-manual/en/diagrams/ha-colocated.odg differ

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/diagrams/ha-scaledown.odg
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/diagrams/ha-scaledown.odg b/docs/user-manual/en/diagrams/ha-scaledown.odg
new file mode 100644
index 0000000..933829f
Binary files /dev/null and b/docs/user-manual/en/diagrams/ha-scaledown.odg differ

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/examples.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/examples.xml b/docs/user-manual/en/examples.xml
index cec4021..d202c89 100644
--- a/docs/user-manual/en/examples.xml
+++ b/docs/user-manual/en/examples.xml
@@ -401,6 +401,11 @@
                  sessions are used, once and only once message delivery is not guaranteed and it is possible
                  that some messages will be lost or delivered twice.</para>
         </section>
+        <section id="examples.openwire">
+            <title>OpenWire</title>
+            <para>The <literal>Openwire</literal> example shows how to configure a HornetQ
+            server to communicate with an ActiveMQ JMS client that uses open-wire protocol.</para>
+        </section>
         <section id="examples.paging">
             <title>Paging</title>
             <para>The <literal>paging</literal> example shows how HornetQ can support huge queues

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/ha.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/ha.xml b/docs/user-manual/en/ha.xml
index f4b2d2b..f6772f4 100644
--- a/docs/user-manual/en/ha.xml
+++ b/docs/user-manual/en/ha.xml
@@ -30,7 +30,6 @@
     <para>A part of high availability is <emphasis>failover</emphasis> which we define as the
        <emphasis>ability for client connections to migrate from one server to another in event of
           server failure so client applications can continue to operate</emphasis>.</para>
-
     <section>
         <title>Live - Backup Groups</title>
 
@@ -48,14 +47,71 @@
            live server goes down, if the current live server is configured to allow automatic failback
            then it will detect the live server coming back up and automatically stop.</para>
 
-        <section id="ha.mode">
-            <title>HA modes</title>
+        <section id="ha.policies">
+            <title>HA Policies</title>
             <para>HornetQ supports two different strategies for backing up a server <emphasis>shared
-               store</emphasis> and <emphasis>replication</emphasis>.</para>
+               store</emphasis> and <emphasis>replication</emphasis>. Which is configured via the
+               <literal>ha-policy</literal> configuration element.</para>
+           <programlisting>
+&lt;ha-policy>
+  &lt;replication/>
+&lt;/ha-policy>
+           </programlisting>
+           <para>
+              or
+           </para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;shared-store/>
+&lt;/ha-policy>
+           </programlisting>
+           <para>
+              As well as these 2 strategies there is also a 3rd called <literal>live-only</literal>. This of course means there
+              will be no Backup Strategy and is the default if none is provided, however this is used to configure
+              <literal>scale-down</literal> which we will cover in a later chapter.
+           </para>
+           <note>
+              <para>
+                 The <literal>ha-policy</literal> configurations replaces any current HA configuration in the root of the
+                 <literal>hornetq-configuration.xml</literal> configuration. All old configuration is now deprecated altho
+                 best efforts will be made to honour it if configured this way.
+              </para>
+           </note>
             <note>
                 <para>Only persistent message data will survive failover. Any non persistent message
                    data will not be available after failover.</para>
             </note>
+           <para>The <literal>ha-policy</literal> type configures which strategy a cluster should use to provide the
+              backing up of a servers data. Within this configuration element is configured how a server should behave
+              within the cluster, either as a master (live), slave (backup) or colocated (both live and backup). This
+              would look something like: </para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;master/>
+   &lt;/replication>
+&lt;/ha-policy>
+           </programlisting>
+           <para>
+              or
+           </para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;shared-store/>
+      &lt;slave/>
+   &lt;/shared-store/>
+&lt;/ha-policy>
+           </programlisting>
+           <para>
+              or
+           </para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;colocated/>
+   &lt;/replication>
+&lt;/ha-policy>
+           </programlisting>
         </section>
 
         <section id="ha.mode.replicated">
@@ -81,7 +137,7 @@
                the one at the live's storage. If you configure your live server to perform a
                <xref linkend="ha.allow-fail-back">'fail-back'</xref> when restarted, it will synchronize
                its data with the backup's. If both servers are shutdown, the administrator will have
-               to determine which one has the lastest data.</para>
+               to determine which one has the latest data.</para>
 
             <para>The replicating live and backup pair must be part of a cluster.  The Cluster
                Connection also defines how backup servers will find the remote live servers to pair
@@ -104,39 +160,40 @@
             <itemizedlist>
                 <listitem>
                     <para><literal>specifying a node group</literal>. You can specify a group of live servers that a backup
-                       server can connect to. This is done by configuring <literal>backup-group-name</literal> in the main
+                       server can connect to. This is done by configuring <literal>group-name</literal> in either the <literal>master</literal>
+                       or the <literal>slave</literal> element of the
                        <literal>hornetq-configuration.xml</literal>. A Backup server will only connect to a live server that
                        shares the same node group name</para>
                 </listitem>
                 <listitem>
-                   <para><literal>connecting to any live</literal>. Simply put not configuring <literal>backup-group-name</literal>
-                      will allow a backup server to connect to any live server</para>
+                   <para><literal>connecting to any live</literal>. This will be the behaviour if <literal>group-name</literal>
+                      is not configured allowing a backup server to connect to any live server</para>
                 </listitem>
             </itemizedlist>
             <note>
-                <para>A <literal>backup-group-name</literal> example: suppose you have 5 live servers and 6 backup
+                <para>A <literal>group-name</literal> example: suppose you have 5 live servers and 6 backup
                    servers:</para>
                 <itemizedlist>
                     <listitem>
                         <para><literal>live1</literal>, <literal>live2</literal>, <literal>live3</literal>: with
-                           <literal>backup-group-name=fish</literal></para>
+                           <literal>group-name=fish</literal></para>
                     </listitem>
                     <listitem>
-                       <para><literal>live4</literal>, <literal>live5</literal>: with <literal>backup-group-name=bird</literal></para>
+                       <para><literal>live4</literal>, <literal>live5</literal>: with <literal>group-name=bird</literal></para>
                     </listitem>
                     <listitem>
                        <para><literal>backup1</literal>, <literal>backup2</literal>, <literal>backup3</literal>,
-                          <literal>backup4</literal>: with <literal>backup-group-name=fish</literal></para>
+                          <literal>backup4</literal>: with <literal>group-name=fish</literal></para>
                     </listitem>
                     <listitem>
                        <para><literal>backup5</literal>, <literal>backup6</literal>: with
-                          <literal>backup-group-name=bird</literal></para>
+                          <literal>group-name=bird</literal></para>
                     </listitem>
                 </itemizedlist>
-                <para>After joining the cluster the backups with <literal>backup-group-name=fish</literal> will
-                   search for live servers with <literal>backup-group-name=fish</literal> to pair with. Since there
+                <para>After joining the cluster the backups with <literal>group-name=fish</literal> will
+                   search for live servers with <literal>group-name=fish</literal> to pair with. Since there
                    is one backup too many, the <literal>fish</literal> will remain with one spare backup.</para>
-                <para>The 2 backups with <literal>backup-group-name=bird</literal> (<literal>backup5</literal> and
+                <para>The 2 backups with <literal>group-name=bird</literal> (<literal>backup5</literal> and
                    <literal>backup6</literal>) will pair with live servers <literal>live4</literal> and
                    <literal>live5</literal>.</para>
             </note>
@@ -145,14 +202,14 @@
                configured. If no live server is available it will wait until the cluster topology changes and
                repeats the process.</para>
             <note>
-               <para>This is an important distinction from a shared-store backup, as in that case if
-                  the backup starts and does not find its live server, the server will just activate
-                  and start to serve client requests. In the replication case, the backup just keeps
-                  waiting for a live server to pair with. Notice that in replication the backup server
+               <para>This is an important distinction from a shared-store backup, if a backup starts and does not find
+                  a live server, the server will just activate and start to serve client requests.
+                  In the replication case, the backup just keeps
+                  waiting for a live server to pair with. Note that in replication the backup server
                   does not know whether any data it might have is up to date, so it really cannot
                   decide to activate automatically. To activate a replicating backup server using the data
-                  it has, the administrator must change its configuration to make a live server of it,
-                  that change <literal>backup=true</literal> to <literal>backup=false</literal>.</para>
+                  it has, the administrator must change its configuration to make it a live server by changing
+                  <literal>slave</literal> to <literal>master</literal>.</para>
             </note>
 
             <para>Much like in the shared-store case, when the live server stops or crashes,
@@ -169,12 +226,14 @@
                 <title>Configuration</title>
 
                 <para>To configure the live and backup servers to be a replicating pair, configure
-                   both servers' <literal>hornetq-configuration.xml</literal> to have:</para>
+                   the live server in ' <literal>hornetq-configuration.xml</literal> to have:</para>
 
                 <programlisting>
-&lt;!-- FOR BOTH LIVE AND BACKUP SERVERS' -->
-&lt;shared-store>false&lt;/shared-store>
-.
+&lt;ha-policy>
+   &lt;replication>
+      &lt;master/>
+   &lt;/replication>
+&lt;/ha-policy>
 .
 &lt;cluster-connections>
    &lt;cluster-connection name="my-cluster">
@@ -183,12 +242,95 @@
 &lt;/cluster-connections>
                 </programlisting>
 
-                <para>The backup server must also be configured as a backup.</para>
+                <para>The backup server must be similarly configured but as a <literal>slave</literal></para>
 
                 <programlisting>
-&lt;backup>true&lt;/backup>
-</programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;slave/>
+   &lt;/replication>
+&lt;/ha-policy></programlisting>
             </section>
+           <section>
+              <title>All Replication Configuration</title>
+
+              <para>The following table lists all the <literal>ha-policy</literal> configuration elements for HA strategy
+                 Replication for <literal>master</literal>:</para>
+              <table>
+                 <tgroup cols="2">
+                    <colspec colname="c1" colnum="1"/>
+                    <colspec colname="c2" colnum="2"/>
+                    <thead>
+                       <row>
+                          <entry>name</entry>
+                          <entry>Description</entry>
+                       </row>
+                    </thead>
+                    <tbody>
+                       <row>
+                          <entry><literal>check-for-live-server</literal></entry>
+                          <entry>Whether to check the cluster for a (live) server using our own server ID when starting
+                             up. This option is only necessary for performing 'fail-back' on replicating servers.</entry>
+                       </row>
+                       <row>
+                          <entry><literal>cluster-name</literal></entry>
+                          <entry>Name of the cluster configuration to use for replication. This setting is only necessary if you
+                             configure multiple cluster connections. If configured then the connector configuration of the
+                             cluster configuration with this name will be used when connecting to the cluster to discover
+                          if a live server is already running, see <literal>check-for-live-server</literal>. If unset then
+                          the default cluster connections configuration is used (the first one configured)</entry>
+                       </row>
+                       <row>
+                          <entry><literal>group-name</literal></entry>
+                          <entry>If set, backup servers will only pair with live servers with matching group-name</entry>
+                       </row>
+                    </tbody>
+                 </tgroup>
+              </table>
+              <para>The following table lists all the <literal>ha-policy</literal> configuration elements for HA strategy
+                 Replication for <literal>slave</literal>:</para>
+              <table>
+                 <tgroup cols="2">
+                    <colspec colname="c1" colnum="1"/>
+                    <colspec colname="c2" colnum="2"/>
+                    <thead>
+                       <row>
+                          <entry>name</entry>
+                          <entry>Description</entry>
+                       </row>
+                    </thead>
+                    <tbody>
+                       <row>
+                          <entry><literal>cluster-name</literal></entry>
+                          <entry>Name of the cluster configuration to use for replication. This setting is only necessary if you
+                             configure multiple cluster connections. If configured then the connector configuration of the
+                             cluster configuration with this name will be used when connecting to the cluster to discover
+                             if a live server is already running, see <literal>check-for-live-server</literal>. If unset then
+                             the default cluster connections configuration is used (the first one configured)</entry>
+                       </row>
+                       <row>
+                          <entry><literal>group-name</literal></entry>
+                          <entry>If set, backup servers will only pair with live servers with matching group-name</entry>
+                       </row>
+                       <row>
+                          <entry><literal>max-saved-replicated-journals-size</literal></entry>
+                          <entry>This specifies how many times a replicated backup server can restart after moving its files on start.
+                             Once there are this number of backup journal files the server will stop permanently after if fails
+                             back.</entry>
+                       </row>
+                       <row>
+                          <entry><literal>allow-failback</literal></entry>
+                          <entry>Whether a server will automatically stop when a another places a request to take over
+                             its place. The use case is when the backup has failed over </entry>
+                       </row>
+                       <row>
+                          <entry><literal>failback-delay</literal></entry>
+                          <entry>delay to wait before fail-back occurs on (failed over live's) restart</entry>
+                       </row>
+                    </tbody>
+                 </tgroup>
+              </table>
+           </section>
         </section>
 
         <section id="ha.mode.shared">
@@ -213,20 +355,37 @@
                the shared store which can take some time depending on the amount of data in the
                store.</para>
             <para>If you require the highest performance during normal operation, have access to
-               a fast SAN, and can live with a slightly slower failover (depending on amount of
-               data), we recommend shared store high availability</para>
+               a fast SAN and live with a slightly slower failover (depending on amount of
+               data).</para>
             <graphic fileref="images/ha-shared-store.png" align="center"/>
 
             <section id="ha/mode.shared.configuration">
                 <title>Configuration</title>
                 <para>To configure the live and backup servers to share their store, configure
-                   all <literal>hornetq-configuration.xml</literal>:</para>
-                <programlisting>
-&lt;shared-store>true&lt;/shared-store>
-                </programlisting>
-                <para>Additionally, each backup server must be flagged explicitly as a backup:</para>
-                <programlisting>
-&lt;backup>true&lt;/backup></programlisting>
+                   id via the <literal>ha-policy</literal> configuration in <literal>hornetq-configuration.xml</literal>:</para>
+               <programlisting>
+&lt;ha-policy>
+   &lt;shared-store>
+      &lt;master/>
+   &lt;/shared-store>
+&lt;/ha-policy>
+.
+&lt;cluster-connections>
+   &lt;cluster-connection name="my-cluster">
+...
+   &lt;/cluster-connection>
+&lt;/cluster-connections>
+               </programlisting>
+
+               <para>The backup server must also be configured as a backup.</para>
+
+               <programlisting>
+&lt;ha-policy>
+   &lt;shared-store>
+      &lt;slave/>
+   &lt;/shared-store>
+&lt;/ha-policy>
+               </programlisting>
                 <para>In order for live - backup groups to operate properly with a shared store,
                    both servers must have configured the location of journal directory to point
                    to the <emphasis>same shared location</emphasis> (as explained in
@@ -244,14 +403,57 @@
             <title>Failing Back to live Server</title>
             <para>After a live server has failed and a backup taken has taken over its duties, you may want to
                restart the live server and have clients fail back.</para>
-            <para>In case of "shared disk", simply restart the original live
-               server and kill the new live server. You can do this by killing the process itself or just waiting for the server to crash naturally.</para>
-            <para>In case of a replicating live server that has been replaced by a remote backup you will need to also set <link linkend="hq.check-for-live-server">check-for-live-server</link>. This option is necessary because a starting server cannot know whether there is a (remote) server running in its place, so with this option set, the server will check the cluster for another server using its node-ID and if it finds one it will try initiate a fail-back. This option only applies to live servers that are restarting, it is ignored by backup servers.</para>
-            <para>It is also possible to cause failover to occur on normal server shutdown, to enable
-               this set the following property to true in the <literal>hornetq-configuration.xml</literal>
-               configuration file like so:</para>
+            <para>In case of "shared disk", simply restart the original live server and kill the new live server by can
+               do this by killing the process itself. Alternatively you can set <literal>allow-fail-back</literal> to
+               <literal>true</literal> on the slave config which will force the backup that has become live to automatically
+               stop. This configuration would look like:</para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;shared-store>
+      &lt;slave>
+         &lt;allow-failback>true&lt;/allow-failback>
+         &lt;failback-delay>5000&lt;/failback-delay>
+      &lt;/slave>
+   &lt;/shared-store>
+&lt;/ha-policy>
+           </programlisting>
+           <para>The <literal>failback-delay</literal> configures how long the backup must wait after automatically
+              stopping before it restarts. This is to gives the live server time to start and obtain its lock.</para>
+           <para id="hq.check-for-live-server">In replication HA mode you need to set an extra property <literal>check-for-live-server</literal>
+              to <literal>true</literal> in the <literal>master</literal> configuration. If set to true, during start-up
+              a live server will first search the cluster for another server using its nodeID. If it finds one, it will
+              contact this server and try to "fail-back". Since this is a remote replication scenario, the "starting live"
+              will have to synchronize its data with the server running with its ID, once they are in sync, it will
+              request the other server (which it assumes it is a back that has assumed its duties) to shutdown for it to
+              take over. This is necessary because otherwise the live server has no means to know whether there was a
+              fail-over or not, and if there was if the server that took its duties is still running or not.
+              To configure this option at your <literal>hornetq-configuration.xml</literal> configuration file as follows:</para>
+           <programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;master>
+         &lt;check-for-live-server>true&lt;/check-for-live-server>
+      &lt;master>
+   &lt;/replication>
+&lt;/ha-policy></programlisting>
+           <warning>
+              <para>
+                 Be aware that if you restart a live server while after failover has occurred then this value must be
+                 set to <literal><emphasis role="bold">true</emphasis></literal>. If not the live server will restart and server the same
+                 messages that the backup has already handled causing duplicates.
+              </para>
+           </warning>
+            <para>It is also possible, in the case of shared store, to cause failover to occur on normal server shutdown,
+               to enable this set the following property to true in the <literal>ha-policy</literal> configuration on either
+               the <literal>master</literal> or <literal>slave</literal> like so:</para>
             <programlisting>
-&lt;failover-on-shutdown>true&lt;/failover-on-shutdown></programlisting>
+&lt;ha-policy>
+   &lt;shared-store>
+      &lt;master>
+         &lt;failover-on-shutdown>true&lt;/failover-on-shutdown>
+      &lt;/master>
+   &lt;/shared-store>
+&lt;/ha-policy></programlisting>
             <para>By default this is set to false, if by some chance you have set this to false but still
                want to stop the server normally and cause failover then you can do this by using the management
                API as explained at <xref linkend="management.core.server"/></para>
@@ -259,39 +461,284 @@
                the original live server to take over automatically by setting the following property in the
                <literal>hornetq-configuration.xml</literal> configuration file as follows:</para>
             <programlisting>
-&lt;allow-failback>true&lt;/allow-failback></programlisting>
-            <para id="hq.check-for-live-server">In replication HA mode you need to set an extra property <literal>check-for-live-server</literal>
-               to <literal>true</literal>. If set to true, during start-up a live server will first search the cluster for another server using its nodeID. If it finds one, it will contact this server and try to "fail-back". Since this is a remote replication scenario, the "starting live" will have to synchronize its data with the server running with its ID, once they are in sync, it will request the other server (which it assumes it is a back that has assumed its duties) to shutdown for it to take over. This is necessary because otherwise the live server has no means to know whether there was a fail-over or not, and if there was if the server that took its duties is still running or not. To configure this option at your <literal>hornetq-configuration.xml</literal> configuration file as follows:</para>
-            <programlisting>
-&lt;check-for-live-server>true&lt;/check-for-live-server></programlisting>
+&lt;ha-policy>
+   &lt;shared-store>
+      &lt;slave>
+         &lt;allow-failback>true&lt;/allow-failback>
+      &lt;/slave>
+   &lt;/shared-store>
+&lt;/ha-policy></programlisting>
+
+           <section>
+              <title>All Shared Store Configuration</title>
+
+              <para>The following table lists all the <literal>ha-policy</literal> configuration elements for HA strategy
+                 shared store for <literal>master</literal>:</para>
+              <table>
+                 <tgroup cols="2">
+                    <colspec colname="c1" colnum="1"/>
+                    <colspec colname="c2" colnum="2"/>
+                    <thead>
+                       <row>
+                          <entry>name</entry>
+                          <entry>Description</entry>
+                       </row>
+                    </thead>
+                    <tbody>
+                       <row>
+                          <entry><literal>failback-delay</literal></entry>
+                          <entry>If a backup server is detected as being live, via the lock file, then the live server
+                          will wait announce itself as a backup and wait this amount of time (in ms) before starting as
+                          a live</entry>
+                       </row>
+                       <row>
+                          <entry><literal>failover-on-server-shutdown</literal></entry>
+                          <entry>If set to true then when this server is stopped normally the backup will become live
+                          assuming failover. If false then the backup server will remain passive. Note that if false you
+                             want failover to occur the you can use the the management API as explained at <xref linkend="management.core.server"/></entry>
+                       </row>
+                    </tbody>
+                 </tgroup>
+              </table>
+              <para>The following table lists all the <literal>ha-policy</literal> configuration elements for HA strategy
+                 Shared Store for <literal>slave</literal>:</para>
+              <table>
+                 <tgroup cols="2">
+                    <colspec colname="c1" colnum="1"/>
+                    <colspec colname="c2" colnum="2"/>
+                    <thead>
+                       <row>
+                          <entry>name</entry>
+                          <entry>Description</entry>
+                       </row>
+                    </thead>
+                    <tbody>
+                       <row>
+                          <entry><literal>failover-on-server-shutdown</literal></entry>
+                          <entry>In the case of a backup that has become live. then when set to true then when this server
+                             is stopped normally the backup will become liveassuming failover. If false then the backup
+                             server will remain passive. Note that if false you want failover to occur the you can use
+                             the the management API as explained at <xref linkend="management.core.server"/></entry>
+                       </row>
+                       <row>
+                          <entry><literal>allow-failback</literal></entry>
+                          <entry>Whether a server will automatically stop when a another places a request to take over
+                             its place. The use case is when the backup has failed over.</entry>
+                       </row>
+                       <row>
+                          <entry><literal>failback-delay</literal></entry>
+                          <entry>After failover and the slave has become live, this is set on the new live server.
+                             When starting If a backup server is detected as being live, via the lock file, then the live server
+                             will wait announce itself as a backup and wait this amount of time (in ms) before starting as
+                             a live, however this is unlikely since this backup has just stopped anyway. It is also used
+                          as the delay after failback before this backup will restart (if <literal>allow-failback</literal>
+                          is set to true.</entry>
+                       </row>
+                    </tbody>
+                 </tgroup>
+              </table>
+           </section>
+
         </section>
         <section id="ha.colocated">
             <title>Colocated Backup Servers</title>
             <para>It is also possible when running standalone to colocate backup servers in the same
-                JVM as another live server.The colocated backup will become a backup for another live
-                server in the cluster but not the one it shares the vm with. To configure a colocated
-                backup server simply add the following to the <literal>hornetq-configuration.xml</literal> file</para>
+                JVM as another live server. Live Servers can be configured to request another live server in the cluster
+                to start a backup server in the same JVM either using shared store or replication. The new backup server
+                will inherit its configuration from the live server creating it apart from its name, which will be set to
+                <literal>colocated_backup_n</literal> where n is the number of backups the server has created, and any directories
+                 and its Connectors and Acceptors which are discussed later on in this chapter. A live server can also
+                be configured to allow requests from backups and also how many backups a live server can start. this way
+                you can evenly distribute backups around the cluster. This is configured via the <literal>ha-policy</literal>
+                element in the <literal>hornetq-configuration.xml</literal> file like so:</para>
             <programlisting>
-&lt;backup-servers>
-    &lt;backup-server name="backup2" inherit-configuration="true" port-offset="1000">
-        &lt;configuration>
-            &lt;bindings-directory>target/server1/data/messaging/bindings&lt;/bindings-directory>
-            &lt;journal-directory>target/server1/data/messaging/journal&lt;/journal-directory>
-            &lt;large-messages-directory>target/server1/data/messaging/largemessages&lt;/large-messages-directory>
-            &lt;paging-directory>target/server1/data/messaging/paging&lt;/paging-directory>
-        &lt;/configuration>
-    &lt;/backup-server>
-&lt;/backup-servers>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;colocated>
+         &lt;request-backup>true&lt;/request-backup>
+         &lt;max-backups>1&lt;/max-backups>
+         &lt;backup-request-retries>-1&lt;/backup-request-retries>
+         &lt;backup-request-retry-interval>5000&lt;/backup-request-retry-interval>
+         &lt;master/>
+         &lt;slave/>
+      &lt;/colocated>
+   &lt;replication>
+&lt;/ha-policy>
             </programlisting>
-            <para> you will notice 3 attributes on the <literal>backup-server</literal>, <literal>name</literal>
-                which is a unique name used to identify the backup server, <literal>inherit-configuration</literal>
-            which if set to true means the server will inherit the configuration of its parent server
-            and <literal>port-offset</literal> which is what the port for any netty connectors or
-            acceptors will be increased by if the configuration is inherited.</para>
-            <para>it is also possible to configure the backup server in the normal way, in this example you will
-            notice we have changed the journal directories.</para>
+            <para>the above example is configured to use replication, in this case the <literal>master</literal> and
+            <literal>slave</literal> configurations must match those for normal replication as in the previous chapter.
+            <literal>shared-store</literal> is also supported</para>
+
+           <graphic fileref="images/ha-colocated.png" align="center"/>
+           <section id="ha.colocated.connectorsandacceptors">
+              <title>Configuring Connectors and Acceptors</title>
+              <para>If the HA Policy is colocated then connectors and acceptors will be inherited from the live server
+                 creating it and offset depending on the setting of <literal>backup-port-offset</literal> configuration element.
+                 If this is set to say 100 (which is the default) and a connector is using port 5445 then this will be
+                 set to 5545 for the first server created, 5645 for the second and so on.</para>
+              <note><para>for INVM connectors and Acceptors the id will have <literal>colocated_backup_n</literal> appended,
+              where n is the backup server number.</para></note>
+              <section id="ha.colocated.connectorsandacceptors.remote">
+                 <title>Remote Connectors</title>
+                 <para>It may be that some of the Connectors configured are for external servers and hence should be excluded from the offset.
+                 for instance a Connector used by the cluster connection to do quorum voting for a replicated backup server,
+                  these can be omitted from being offset by adding them to the <literal>ha-policy</literal> configuration like so:</para>
+                 <programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;colocated>
+         &lt;excludes>
+            &lt;connector-ref>remote-connector&lt;/connector-ref>
+         &lt;/excludes>
+.........
+&lt;/ha-policy>
+                 </programlisting>
+              </section>
+           </section>
+           <section id="ha.colocated.directories">
+              <title>Configuring Directories</title>
+              <para>Directories for the Journal, Large messages and Paging will be set according to what the HA strategy is.
+              If shared store the the requesting server will notify the target server of which directories to use. If replication
+              is configured then directories will be inherited from the creating server but have the new backups name
+              appended.</para>
+           </section>
+
+           <para>The following table lists all the <literal>ha-policy</literal> configuration elements:</para>
+           <table>
+              <tgroup cols="2">
+                 <colspec colname="c1" colnum="1"/>
+                 <colspec colname="c2" colnum="2"/>
+                 <thead>
+                    <row>
+                       <entry>name</entry>
+                       <entry>Description</entry>
+                    </row>
+                 </thead>
+                 <tbody>
+                    <row>
+                       <entry><literal>request-backup</literal></entry>
+                       <entry>If true then the server will request a backup on another node</entry>
+                    </row>
+                    <row>
+                       <entry><literal>backup-request-retries</literal></entry>
+                       <entry>How many times the live server will try to request a backup, -1 means for ever.</entry>
+                    </row>
+                    <row>
+                       <entry><literal>backup-request-retry-interval</literal></entry>
+                       <entry>How long to wait for retries between attempts to request a backup server.</entry>
+                    </row>
+                    <row>
+                       <entry><literal>max-backups</literal></entry>
+                       <entry>Whether or not this live server will accept backup requests from other live servers.</entry>
+                    </row>
+                    <row>
+                       <entry><literal>backup-port-offset</literal></entry>
+                       <entry>The offset to use for the Connectors and Acceptors when creating a new backup server.</entry>
+                    </row>
+                 </tbody>
+              </tgroup>
+           </table>
         </section>
     </section>
+   <section id="ha.scaledown">
+      <title>Scaling Down</title>
+      <para>An alternative to using Live/Backup groups is to configure scaledown. when configured for scale down a server
+      can copy all its messages and transaction state to another live server. The advantage of this is that you dont need
+      full backups to provide some form of HA, however there are disadvantages with this approach the first being that it
+         only deals with a server being stopped and not a server crash. The caveat here is if you configure a backup to scale down. </para>
+      <para>Another disadvantage is that it is possible to lose message ordering. This happens in the following scenario,
+      say you have 2 live servers and messages are distributed evenly between the servers from a single producer, if one
+         of the servers scales down then the messages sent back to the other server will be in the queue after the ones
+         already there, so server 1 could have messages 1,3,5,7,9 and server 2 would have 2,4,6,8,10, if server 2 scales
+         down the order in server 1 would be 1,3,5,7,9,2,4,6,8,10.</para>
+      <graphic fileref="images/ha-scaledown.png" align="center"/>
+      <para>The configuration for a live server to scale down would be something like:</para>
+      <programlisting>
+&lt;ha-policy>
+   &lt;live-only>
+      &lt;scale-down>
+         &lt;connectors>
+            &lt;connector-ref>server1-connector&lt;/connector-ref>
+         &lt;/connectors>
+      &lt;/scale-down>
+   &lt;/live-only>
+&lt;/ha-policy>
+      </programlisting>
+      <para>In this instance the server is configured to use a specific connector to scale down, if a connector is not
+         specified then the first INVM connector is chosen, this is to make scale down fromm a backup server easy to configure.
+         It is also possible to use discovery to scale down, this would look like:</para>
+      <programlisting>
+&lt;ha-policy>
+   &lt;live-only>
+      &lt;scale-down>
+         &lt;discovery-group>my-discovery-group&lt;/discovery-group>
+      &lt;/scale-down>
+   &lt;/live-only>
+&lt;/ha-policy>
+      </programlisting>
+      <section id="ha.scaledown.group">
+         <title>Scale Down with groups</title>
+         <para>It is also possible to configure servers to only scale down to servers that belong in the same group. This
+         is done by configuring the group like so:</para>
+         <programlisting>
+&lt;ha-policy>
+   &lt;live-only>
+      &lt;scale-down>
+         ...
+         &lt;group-name>my-group&lt;/group-name>
+      &lt;/scale-down>
+   &lt;/live-only>
+&lt;/ha-policy>
+         </programlisting>
+         <para>In this scenario only servers that belong to the group <literal>my-group</literal> will be scaled down to</para>
+      </section>
+      <section>
+         <title>Scale Down and Backups</title>
+         <para>It is also possible to mix scale down with HA via backup servers. If a slave is configured to scale down
+         then after failover has occurred, instead of starting fully the backup server will immediately scale down to
+         another live server. The most appropriate configuration for this is using the <literal>colocated</literal> approach.
+         it means as you bring up live server they will automatically be backed up by server and as live servers are
+         shutdown, there messages are made available on another live server. A typical configuration would look like:</para>
+         <programlisting>
+&lt;ha-policy>
+   &lt;replication>
+      &lt;colocated>
+         &lt;backup-request-retries>44&lt;/backup-request-retries>
+         &lt;backup-request-retry-interval>33&lt;/backup-request-retry-interval>
+         &lt;max-backups>3&lt;/max-backups>
+         &lt;request-backup>false&lt;/request-backup>
+         &lt;backup-port-offset>33&lt;/backup-port-offset>
+         &lt;master>
+            &lt;group-name>purple&lt;/group-name>
+            &lt;check-for-live-server>true&lt;/check-for-live-server>
+            &lt;cluster-name>abcdefg&lt;/cluster-name>
+         &lt;/master>
+         &lt;slave>
+            &lt;group-name>tiddles&lt;/group-name>
+            &lt;max-saved-replicated-journals-size>22&lt;/max-saved-replicated-journals-size>
+            &lt;cluster-name>33rrrrr&lt;/cluster-name>
+            &lt;restart-backup>false&lt;/restart-backup>
+            &lt;scale-down>
+               &lt;!--a grouping of servers that can be scaled down to-->
+               &lt;group-name>boo!&lt;/group-name>
+               &lt;!--either a discovery group-->
+               &lt;discovery-group>wahey&lt;/discovery-group>
+            &lt;/scale-down>
+         &lt;/slave>
+      &lt;/colocated>
+   &lt;/replication>
+&lt;/ha-policy>
+         </programlisting>
+      </section>
+   <section id="ha.scaledown.client">
+      <title>Scale Down and Clients</title>
+      <para>When a server is stopping and preparing to scale down it will send a message to all its clients informing them
+      which server it is scaling down to before disconnecting them. At this point the client will reconnect however this
+      will only succeed once the server has completed scaledown. This is to ensure that any state such as queues or transactions
+      are there for the client when it reconnects. The normal reconnect settings apply when the client is reconnecting so
+      these should be high enough to deal with the time needed to scale down.</para>
+      </section>
+   </section>
     <section id="failover">
         <title>Failover Modes</title>
         <para>HornetQ defines two types of client failover:</para>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/images/ha-colocated.png
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/images/ha-colocated.png b/docs/user-manual/en/images/ha-colocated.png
new file mode 100644
index 0000000..e7b2d30
Binary files /dev/null and b/docs/user-manual/en/images/ha-colocated.png differ

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/images/ha-scaledown.png
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/images/ha-scaledown.png b/docs/user-manual/en/images/ha-scaledown.png
new file mode 100644
index 0000000..b33f5ce
Binary files /dev/null and b/docs/user-manual/en/images/ha-scaledown.png differ

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/interoperability.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/interoperability.xml b/docs/user-manual/en/interoperability.xml
index e4261d7..68e2962 100644
--- a/docs/user-manual/en/interoperability.xml
+++ b/docs/user-manual/en/interoperability.xml
@@ -285,4 +285,23 @@ java.naming.factory.url.pkgs=org.jboss.naming:org.jnp.interfaces</programlisting
                 however in this version HornetQ will only support single transactions per session</para></note>
         </section>
     </section>
+    <section>
+        <title>OpenWire</title>
+        <para>HornetQ now supports the <ulink url="http://activemq.apache.org/openwire.html">OpenWire</ulink>
+        protocol so that an ActiveMQ JMS client can talk directly to a HornetQ server. To enable OpenWire support
+        you must configure a Netty Acceptor, like so:</para>
+        <programlisting>
+&lt;acceptor name="openwire-acceptor">
+&lt;factory-class>org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory&lt;/factory-class>
+&lt;param key="protocols"  value="OPENWIRE"/>
+&lt;param key="port"  value="61616"/>
+&lt;/acceptor>
+        </programlisting>
+        <para>The HornetQ server will then listens on port 61616 for incoming openwire commands. Please note the "protocols" is not mandatory here. 
+        The openwire configuration conforms to HornetQ's "Single Port" feature. Please refer to 
+        <link linkend="configuring-transports.single-port">Configuring Single Port</link> for details.</para>
+        <para>Please refer to the openwire example for more coding details.</para>
+        <para>Currently we support ActiveMQ clients that using standard JMS APIs. In the future we will get more supports
+        for some advanced, ActiveMQ specific features into HornetQ.</para>
+    </section>
 </chapter>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/management.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/management.xml b/docs/user-manual/en/management.xml
index be5cd32..c8ec3a3 100644
--- a/docs/user-manual/en/management.xml
+++ b/docs/user-manual/en/management.xml
@@ -148,15 +148,16 @@
                         >core.server</literal>).</para>
                </listitem>
                <listitem>
-                   <para>It is possible to stop the server and force failover to occur with any currently attached clients.</para>
-                   <para>to do this use the <literal>forceFailover()</literal> on the <literal
+                  <para>It is possible to stop the server and force failover to occur with any currently attached clients.</para>
+                  <para>to do this use the <literal>forceFailover()</literal> on the <literal
                         >HornetQServerControl</literal> (with the ObjectName <literal
                         >org.hornetq:module=Core,type=Server</literal> or the resource name <literal
                         >core.server</literal>) </para>
-                   <para>
-                       <note>Since this method actually stops the server you will probably receive some sort of error
-                       depending on which management service you use to call it.</note>
-                   </para>
+                  <note>
+                     <para>Since this method actually stops the server you will probably receive some sort of error
+                        depending on which management service you use to call it.
+                     </para>
+                  </note>
                </listitem>
             </itemizedlist>
          </section>
@@ -834,7 +835,7 @@ notificationConsumer.setMessageListener(new MessageListener()
                how to use a JMS <literal>MessageListener</literal> to receive management notifications
                from HornetQ server.</para>
        </section>
-       <section>
+       <section id="notification.types.and.headers">
            <title>Notification Types and Headers</title>
            <para>Below is a list of all the different kinds of notifications as well as which headers are
                 on the messages.  Every notification has a <literal>_HQ_NotifType</literal> (value noted in parentheses)
@@ -966,6 +967,14 @@ notificationConsumer.setMessageListener(new MessageListener()
                        <literal>_HQ_Address</literal>, <literal>_HQ_Distance</literal></para>
                </listitem>
            </itemizedlist>
+           <itemizedlist>
+               <listitem>
+                   <para><literal>CONSUMER_SLOW</literal> (21)</para>
+                   <para><literal>_HQ_Address</literal>, <literal>_HQ_ConsumerCount</literal>,
+                       <literal>_HQ_RemoteAddress</literal>, <literal>_HQ_ConnectionName</literal>,
+                       <literal>_HQ_ConsumerName</literal>, <literal>_HQ_SessionName</literal></para>
+                </listitem>
+           </itemizedlist>
        </section>
    </section>
    <section id="management.message-counters">

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/queue-attributes.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/queue-attributes.xml b/docs/user-manual/en/queue-attributes.xml
index 8ec06b5..f9b2fd8 100644
--- a/docs/user-manual/en/queue-attributes.xml
+++ b/docs/user-manual/en/queue-attributes.xml
@@ -109,6 +109,9 @@
       &lt;redistribution-delay>0&lt;/redistribution-delay>
       &lt;send-to-dla-on-no-route>true&lt;/send-to-dla-on-no-route>
       &lt;address-full-policy>PAGE&lt;/address-full-policy>
+      &lt;slow-consumer-threshold>-1&lt;/slow-consumer-threshold>
+      &lt;slow-consumer-policy>NOTIFY&lt;/slow-consumer-policy>
+      &lt;slow-consumer-check-period>5&lt;/slow-consumer-check-period>
    &lt;/address-setting>
 &lt;/address-settings></programlisting>
         <para>The idea with address settings, is you can provide a block of settings which will be
@@ -154,7 +157,16 @@
         
         See the following chapters for more info <xref linkend="flow-control"/>, <xref linkend="paging"/>.
         </para>
-        
-   
+       <para><literal>slow-consumer-threshold</literal>. The minimum rate of message consumption allowed before a
+          consumer is considered "slow." Measured in messages-per-second. Default is -1 (i.e. disabled); any other valid
+          value must be greater than 0.</para>
+       <para><literal>slow-consumer-policy</literal>. What should happen when a slow consumer is detected.
+          <literal>KILL</literal> will kill the consumer's connection (which will obviously impact any other client
+          threads using that same connection). <literal>NOTIFY</literal> will send a CONSUMER_SLOW management
+          notification which an application could receive and take action with. See
+          <xref linkend="notification.types.and.headers"/> for more details on this notification.</para>
+       <para><literal>slow-consumer-check-period</literal>. How often to check for slow consumers on a particular queue.
+          Measured in minutes. Default is 5. See <xref linkend="slow-consumers"/> for more information about slow
+          consumer detection.</para>
     </section>
 </chapter>

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/docs/user-manual/en/slow-consumers.xml
----------------------------------------------------------------------
diff --git a/docs/user-manual/en/slow-consumers.xml b/docs/user-manual/en/slow-consumers.xml
new file mode 100644
index 0000000..aef287d
--- /dev/null
+++ b/docs/user-manual/en/slow-consumers.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- ============================================================================= -->
+<!-- Copyright © 2009 Red Hat, Inc. and others.                                    -->
+<!--                                                                               -->
+<!-- The text of and illustrations in this document are licensed by Red Hat under  -->
+<!-- a Creative Commons Attribution–Share Alike 3.0 Unported license ("CC-BY-SA"). -->
+<!--                                                                               -->
+<!-- An explanation of CC-BY-SA is available at                                    -->
+<!--                                                                               -->
+<!--            http://creativecommons.org/licenses/by-sa/3.0/.                    -->
+<!--                                                                               -->
+<!-- In accordance with CC-BY-SA, if you distribute this document or an adaptation -->
+<!-- of it, you must provide the URL for the original version.                     -->
+<!--                                                                               -->
+<!-- Red Hat, as the licensor of this document, waives the right to enforce,       -->
+<!-- and agrees not to assert, Section 4d of CC-BY-SA to the fullest extent        -->
+<!-- permitted by applicable law.                                                  -->
+<!-- ============================================================================= -->
+
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "HornetQ_User_Manual.ent">
+%BOOK_ENTITIES;
+]>
+<chapter id="slow-consumers">
+   <title>Detecting Slow Consumers</title>
+   <para>In this section we will discuss how HornetQ can be configured to deal with slow consumers. A slow consumer with
+      a server-side queue (e.g. JMS topic subscriber) can pose a significant problem for broker performance. If messages
+      build up in the consumer's server-side queue then memory will begin filling up and the broker may enter paging
+      mode which would impact performance negatively. However, criteria can be set so that consumers which don't
+      acknowledge messages quickly enough can potentially be disconnected from the broker which in the case of a
+      non-durable JMS subscriber would allow the broker to remove the subscription and all of its messages freeing up
+      valuable server resources.
+   </para>
+   <section id="slow.consumer.configuration">
+      <title>Configuration required for detecting slow consumers</title>
+      <para>By default the server will not detect slow consumers. If slow consumer detection is desired then see
+      <xref linkend="queue-attributes.address-settings"/>
+         for more details.
+      </para>
+      <para>The calculation to determine whether or not a consumer is slow only inspects the number of messages a
+         particular consumer has <emphasis>acknowledged</emphasis>. It doesn't take into account whether or not flow
+         control has been enabled on the consumer, whether or not the consumer is streaming a large message, etc. Keep
+         this in mind when configuring slow consumer detection.
+      </para>
+      <para>Please note that slow consumer checks are performed using the scheduled thread pool and that each queue on
+         the broker with slow consumer detection enabled will cause a new entry in the internal
+         <literal>java.util.concurrent.ScheduledThreadPoolExecutor</literal> instance. If there are a high number of
+         queues and the <literal>slow-consumer-check-period</literal> is relatively low then there may be delays in
+         executing some of the checks. However, this will not impact the accuracy of the calculations used by the
+         detection algorithm. See <xref linkend="server.scheduled.thread.pool"/> for more details about this pool.
+      </para>
+   </section>
+</chapter>


Mime
View raw message