activemq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From andytay...@apache.org
Subject [16/51] [partial] activemq-6 git commit: ACTIVEMQ6-2 Update to HQ master
Date Tue, 11 Nov 2014 11:00:46 GMT
http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/main/resources/schema/hornetq-configuration.xsd
----------------------------------------------------------------------
diff --git a/hornetq-server/src/main/resources/schema/hornetq-configuration.xsd b/hornetq-server/src/main/resources/schema/hornetq-configuration.xsd
index f134057..3ae9384 100644
--- a/hornetq-server/src/main/resources/schema/hornetq-configuration.xsd
+++ b/hornetq-server/src/main/resources/schema/hornetq-configuration.xsd
@@ -1,5 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <xsd:schema xmlns="urn:hornetq" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:hq="urn:org.hornetq"
+            xmlns:csd="http://www.w3.org/2001/XMLSchema"
             attributeFormDefault="unqualified" elementFormDefault="qualified" targetNamespace="urn:hornetq"
             version="1.0">
 
@@ -224,7 +225,7 @@
             </xsd:annotation>
          </xsd:element>
 
-         <xsd:element name="jmx-domain" type="xsd:string" default="jboss.as" maxOccurs="1" minOccurs="0">
+         <xsd:element name="jmx-domain" type="xsd:string" default="org.hornetq" maxOccurs="1" minOccurs="0">
             <xsd:annotation hq:linkend="management.jmx.configuration" hq:field_name="DEFAULT_JMX_DOMAIN">
                <xsd:documentation>
                   the JMX domain used to registered HornetQ MBeans in the MBeanServer
@@ -1230,8 +1231,7 @@
             </xsd:annotation>
          </xsd:element>
 
-         <xsd:element name="use-duplicate-detection" type="xsd:boolean" default="true"
-                      maxOccurs="1" minOccurs="0">
+         <xsd:element name="use-duplicate-detection" type="xsd:boolean" default="true" maxOccurs="1" minOccurs="0">
             <xsd:annotation hq:field_name="DEFAULT_BRIDGE_DUPLICATE_DETECTION">
                <xsd:documentation>
                   should duplicate detection headers be inserted in forwarded messages?
@@ -1239,8 +1239,8 @@
             </xsd:annotation>
          </xsd:element>
 
-         <xsd:element name="confirmation-window-size" type="xsd:int" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:default="(bytes, 1024 * 1024)">
+         <xsd:element name="confirmation-window-size" type="xsd:int" maxOccurs="1" minOccurs="0" default="1048576">
+            <xsd:annotation hq:default="(bytes)" hq:field_name="DEFAULT_BRIDGE_CONFIRMATION_WINDOW_SIZE">
                <xsd:documentation>
                   Once the bridge has received this many bytes, it sends a confirmation
                </xsd:documentation>
@@ -1263,8 +1263,7 @@
             </xsd:annotation>
          </xsd:element>
 
-         <xsd:element name="reconnect-attempts-same-node" default="10" type="xsd:int"
-                      maxOccurs="1" minOccurs="0">
+         <xsd:element name="reconnect-attempts-same-node" default="10" type="xsd:int" maxOccurs="1" minOccurs="0">
             <xsd:annotation hq:default="(int, 10)" hq:field_name="DEFAULT_BRIDGE_CONNECT_SAME_NODE">
                <xsd:documentation>
                   Upon reconnection this configures the number of time the same node on the topology will be retried
@@ -1326,8 +1325,7 @@
          </xsd:element>
 
          <xsd:element name="check-period" type="xsd:long" default="30000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="connection-ttl" hq:default="(ms)"
-                            hq:field_name="DEFAULT_CLUSTER_FAILURE_CHECK_PERIOD">
+            <xsd:annotation hq:linkend="connection-ttl" hq:default="(ms)" hq:field_name="DEFAULT_CLUSTER_FAILURE_CHECK_PERIOD">
                <xsd:documentation>
                   The period (in milliseconds) used to check if the cluster connection has failed to receive pings from
                   another server
@@ -1336,8 +1334,7 @@
          </xsd:element>
 
          <xsd:element name="connection-ttl" type="xsd:long" default="60000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="connection-ttl" hq:default="(ms)"
-                            hq:field_name="DEFAULT_CLUSTER_CONNECTION_TTL">
+            <xsd:annotation hq:linkend="connection-ttl" hq:default="(ms)" hq:field_name="DEFAULT_CLUSTER_CONNECTION_TTL">
                <xsd:documentation>
                   how long to keep a connection alive in the absence of any data arriving from the client
                </xsd:documentation>
@@ -1353,7 +1350,7 @@
          </xsd:element>
 
          <xsd:element name="call-timeout" type="xsd:long" default="30000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:default="(ms)">
+            <xsd:annotation hq:default="(ms)" hq:field_name="DEFAULT_CLUSTER_CALL_TIMEOUT">
                <xsd:documentation>
                   How long to wait for a reply
                </xsd:documentation>
@@ -1425,7 +1422,7 @@
          </xsd:element>
 
          <xsd:element name="confirmation-window-size" type="xsd:int" default="1048576" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="client-reconnection">
+            <xsd:annotation hq:field_name="DEFAULT_CLUSTER_CONFIRMATION_WINDOW_SIZE" hq:linkend="client-reconnection">
                <xsd:documentation>
                   The size (in bytes) of the window used for confirming data from the server connected to.
                </xsd:documentation>
@@ -1433,7 +1430,7 @@
          </xsd:element>
 
          <xsd:element name="call-failover-timeout" type="xsd:long" default="-1" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="clusters.cluster-connections" hq:default="(ms)">
+            <xsd:annotation hq:linkend="clusters.cluster-connections" hq:default="(ms)" hq:field_name="DEFAULT_CLUSTER_CALL_FAILOVER_TIMEOUT">
                <xsd:documentation>
                   How long to wait for a reply if in the middle of a fail-over. -1 means wait forever.
                </xsd:documentation>
@@ -1471,9 +1468,8 @@
                   <xsd:sequence>
                      <xsd:element name="connector-ref" type="xsd:string" maxOccurs="unbounded" minOccurs="0"/>
                   </xsd:sequence>
-                  <xsd:attribute name="allow-direct-connections-only" default="false" type="xsd:boolean"
-                                 use="optional">
-                     <xsd:annotation>
+                  <xsd:attribute name="allow-direct-connections-only" default="false" type="xsd:boolean" use="optional">
+                     <xsd:annotation hq:field_name="DEFAULT_CLUSTER_ALLOW_DIRECT_CONNECTIONS">
                         <xsd:documentation>
                            restricts cluster connections to the listed connector-ref's
                         </xsd:documentation>
@@ -1558,27 +1554,60 @@
       </xsd:attribute>
 
    </xsd:complexType>
-
    <xsd:complexType name="haPolicyType">
-      <xsd:all>
-         <xsd:element name="policy-type" minOccurs="0" maxOccurs="1" default="NONE">
+      <xsd:choice>
+         <xsd:element name="live-only" type="haLiveOnlyPolicyType" minOccurs="0" maxOccurs="1">
             <xsd:annotation>
                <xsd:documentation>
-                  what kind of HA Policy should we use
+                  A live only server with no HA capabilities apart from scale down.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="replication" type="haReplicationType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Configuration for a replicated server, either master, slave or colocated.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="shared-store" type="haSharedStoreType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Configuration for a shared store server, either master, slave or colocated.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:choice>
+   </xsd:complexType>
+
+   <xsd:complexType name="haReplicationType">
+      <xsd:choice>
+         <xsd:element name="master" type="replicatedPolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A live server configured to replicate.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="slave" type="replicaPolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A backup server configured to replicate.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="colocated" type="haColocationReplicationType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  a replicated lives server that will allow requests to create colocated replicated backup servers.
                </xsd:documentation>
             </xsd:annotation>
-            <xsd:simpleType>
-               <xsd:restriction base="xsd:string">
-                  <xsd:enumeration value="NONE"/>
-                  <xsd:enumeration value="REPLICATED"/>
-                  <xsd:enumeration value="SHARED_STORE"/>
-                  <xsd:enumeration value="BACKUP_REPLICATED"/>
-                  <xsd:enumeration value="BACKUP_SHARED_STORE"/>
-                  <xsd:enumeration value="COLOCATED_REPLICATED"/>
-                  <xsd:enumeration value="COLOCATED_SHARED_STORE"/>
-               </xsd:restriction>
-            </xsd:simpleType>
          </xsd:element>
+      </xsd:choice>
+   </xsd:complexType>
+
+   <xsd:complexType name="haColocationReplicationType">
+      <xsd:all>
          <xsd:element name="request-backup" type="xsd:boolean" minOccurs="0" maxOccurs="1" default="false">
             <xsd:annotation hq:field_name="DEFAULT_HAPOLICY_REQUEST_BACKUP">
                <xsd:documentation>
@@ -1614,24 +1643,11 @@
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="backup-strategy" minOccurs="0" maxOccurs="1">
-            <xsd:annotation>
-               <xsd:documentation>
-                  The backup strategy to use if we are a backup or for any colocated backups.
-               </xsd:documentation>
-            </xsd:annotation>
-            <xsd:simpleType>
-               <xsd:restriction base="xsd:string">
-                  <xsd:enumeration value="FULL"/>
-                  <xsd:enumeration value="SCALE_DOWN"/>
-               </xsd:restriction>
-            </xsd:simpleType>
-         </xsd:element>
-         <xsd:element name="scale-down-connectors" minOccurs="0" maxOccurs="1">
+         <xsd:element name="excludes" maxOccurs="1" minOccurs="0">
             <xsd:annotation>
                <xsd:documentation>
-                  A list of connectors to use for scaling down, if not supplied then the scale-down-discovery-group or
-                  first invm connector will be used
+                  the connectors that shouldn't have their ports offset, typically remote connectors or the
+                  connector used in the cluster connection if scalinmg down
                </xsd:documentation>
             </xsd:annotation>
             <xsd:complexType>
@@ -1640,41 +1656,132 @@
                </xsd:sequence>
             </xsd:complexType>
          </xsd:element>
-         <xsd:element name="scale-down-discovery-group" type="xsd:string" minOccurs="0" maxOccurs="1">
+         <xsd:element name="master" type="replicatedPolicyType" minOccurs="0" maxOccurs="1">
             <xsd:annotation>
                <xsd:documentation>
-                  The discovery group to use for scale down, if not supplied then the scale-down-connectors or first
-                  invm connector will be used
+                  The configuration for the live replicated server.
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="scale-down-group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
+         <xsd:element name="slave" type="replicaPolicyType" minOccurs="0" maxOccurs="1">
             <xsd:annotation>
                <xsd:documentation>
-                  The scale down group to scale down to, a server will only scale down to a server within the same group
+                  The configuration for any slaves created.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+
+   <xsd:complexType name="haColocationSharedStoreType">
+      <xsd:all>
+         <xsd:element name="request-backup" type="xsd:boolean" minOccurs="0" maxOccurs="1" default="false">
+            <xsd:annotation>
+               <xsd:documentation>
+                  If true then the server will request a backup on another node
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-request-retries" type="xsd:int" minOccurs="0" maxOccurs="1" default="-1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  How many times the live server will try to request a backup, -1 means for ever.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-request-retry-interval" type="xsd:long" minOccurs="0" maxOccurs="1" default="5000">
+            <xsd:annotation>
+               <xsd:documentation>
+                  How long to wait for retries between attempts to request a backup server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="max-backups" type="xsd:int" minOccurs="0" maxOccurs="1" default="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Whether or not this live server will accept backup requests from other live servers.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-port-offset" type="xsd:int" minOccurs="0" maxOccurs="1" default="100">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The offset to use for the Connectors and Acceptors when creating a new backup server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="master" type="sharedStoreMasterPolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The configuration for the live shared store server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="slave" type="sharedStoreSlavePolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The configuration for any shared store backups created.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+
+   <xsd:complexType name="haSharedStoreType">
+      <xsd:choice>
+         <xsd:element name="master" type="sharedStoreMasterPolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A shared store live server configuration.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="slave" type="sharedStoreSlavePolicyType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A shared store backup server configuration.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="colocated" type="haColocationSharedStoreType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A shared store colocated configuration
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="backup-group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
+      </xsd:choice>
+   </xsd:complexType>
+
+   <xsd:complexType name="haLiveOnlyPolicyType">
+      <xsd:all>
+         <xsd:element name="scale-down" type="scaleDownType" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The scale down configuration of this live server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="replicatedPolicyType">
+      <xsd:all>
+         <xsd:element name="group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
             <xsd:annotation>
                <xsd:documentation>
                   used for replication, if set, (remote) backup servers will only pair with live servers with matching
-                  backup-group-name
+                  group-name
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="remote-connectors" maxOccurs="1" minOccurs="0">
+         <xsd:element name="cluster-name" type="xsd:string" maxOccurs="1" minOccurs="0">
             <xsd:annotation>
                <xsd:documentation>
-                  the remote connectors that shouldn't have their ports offset, typically remote connectors or the
-                  connector used in the cluster connection if scalinmg down
+                  Name of the cluster configuration to use for replication. This setting is only necessary in case you
+                  configure multiple cluster connections. It is used by a replicating backups and by live servers that
+                  may attempt fail-back.
                </xsd:documentation>
             </xsd:annotation>
-            <xsd:complexType>
-               <xsd:sequence>
-                  <xsd:element name="connector-ref" type="xsd:string" maxOccurs="unbounded" minOccurs="1"/>
-               </xsd:sequence>
-            </xsd:complexType>
          </xsd:element>
          <xsd:element name="check-for-live-server" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
             <xsd:annotation hq:linkend="hq.check-for-live-server" hq:field_name="DEFAULT_CHECK_FOR_LIVE_SERVER">
@@ -1686,6 +1793,50 @@
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="replicaPolicyType">
+      <xsd:all>
+         <xsd:element name="group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  used for replication, if set, (remote) backup servers will only pair with live servers with matching
+                  group-name
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="cluster-name" type="xsd:string" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Name of the cluster configuration to use for replication. This setting is only necessary in case you
+                  configure multiple cluster connections. It is used by a replicating backups and by live servers that
+                  may attempt fail-back.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="max-saved-replicated-journals-size" type="xsd:int" default="2" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:field_name="DEFAULT_MAX_SAVED_REPLICATED_JOURNALS_SIZE">
+               <xsd:documentation>
+                  This specifies how many times a replicated backup server can restart after moving its files on start.
+                  Once there are this number of backup journal files the server will stop permanently after if fails
+                  back.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="scale-down" type="scaleDownType" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  if provided then this backup will scale down rather than becoming live after fail over.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="restart-backup" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:linkend="ha.restart-backup" hq:field_name="DEFAULT_RESTART_BACKUP">
+               <xsd:documentation>
+                  Will this server, if a backup, restart once it has been stopped because of failback or scaling down.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
          <xsd:element name="allow-failback" type="xsd:boolean" default="true" maxOccurs="1" minOccurs="0">
             <xsd:annotation hq:linkend="ha.allow-fail-back" hq:field_name="DEFAULT_ALLOW_AUTO_FAILBACK">
                <xsd:documentation>
@@ -1697,21 +1848,25 @@
             </xsd:annotation>
          </xsd:element>
          <xsd:element name="failback-delay" type="xsd:long" default="5000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="ha.allow-fail-back" hq:default="(in milliseconds)"
-                            hq:field_name="DEFAULT_FAILBACK_DELAY">
+            <xsd:annotation hq:linkend="ha.allow-fail-back" hq:default="(in milliseconds)" hq:field_name="DEFAULT_FAILBACK_DELAY">
                <xsd:documentation>
-                  delay to wait before fail-back occurs on (live's) restart
+                  if we have to start as a replicated server this is the delay to wait before fail-back occurs
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="failover-on-shutdown" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="ha.allow-fail-back" hq:field_name="DEFAULT_FAILOVER_ON_SERVER_SHUTDOWN">
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="colocatedReplicaPolicyType">
+      <xsd:all>
+         <xsd:element name="group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
                <xsd:documentation>
-                  Will this backup server come live on a normal server shutdown
+                  used for replication, if set, (remote) backup servers will only pair with live servers with matching
+                  group-name
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="replication-clustername" type="xsd:string" maxOccurs="1" minOccurs="0">
+         <xsd:element name="cluster-name" type="xsd:string" maxOccurs="1" minOccurs="0">
             <xsd:annotation>
                <xsd:documentation>
                   Name of the cluster configuration to use for replication. This setting is only necessary in case you
@@ -1720,57 +1875,173 @@
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="scale-down-clustername" type="xsd:string" maxOccurs="1" minOccurs="0">
+         <xsd:element name="max-saved-replicated-journals-size" type="xsd:int" default="2" maxOccurs="1" minOccurs="0">
             <xsd:annotation>
                <xsd:documentation>
-                  Name of the cluster configuration to use for scaling down. This setting is only necessary in case you
-                  configure multiple cluster connections.
+                  This specifies how many times a replicated backup server can restart after moving its files on start.
+                  Once there are this number of backup journal files the server will stop permanently after if fails
+                  back.
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="max-saved-replicated-journals-size" type="xsd:int" default="2" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:field_name="DEFAULT_MAX_SAVED_REPLICATED_JOURNALS_SIZE">
+         <xsd:element name="scale-down" type="scaleDownType" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
                <xsd:documentation>
-                  This specifies how many times a replicated backup server can restart after moving its files on start.
-                  Once there are this number of backup journal files the server will stop permanently after if fails
-                  back.
+                  if provided then this backup will scale down rather than becoming live after fail over.
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
-         <xsd:element name="scale-down" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="ha.scale-down" hq:field_name="DEFAULT_SCALE_DOWN">
+         <xsd:element name="restart-backup" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:linkend="ha.restart-backup">
+               <xsd:documentation>
+                  Will this server, if a backup, restart once it has been stopped because of failback or scaling down.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="sharedStoreMasterPolicyType">
+      <xsd:all>
+         <xsd:element name="failback-delay" type="xsd:long" default="5000" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  delay to wait before fail-back occurs on (live's) restart
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="failover-on-shutdown" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:linkend="ha.allow-fail-back" hq:field_name="DEFAULT_FAILOVER_ON_SERVER_SHUTDOWN">
                <xsd:documentation>
-                  Will this server send its messages to another live server in the cluster when shut-down cleanly.
+                  Will this backup server come live on a normal server shutdown
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="sharedStoreSlavePolicyType">
+      <xsd:all>
+         <xsd:element name="allow-failback" type="xsd:boolean" default="true" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:linkend="ha.allow-fail-back">
+               <xsd:documentation>
+                  Whether a server will automatically stop when a another places a request to take over
+                  its place. The use case is when a regular server stops and its backup takes over its
+                  duties, later the main server restarts and requests the server (the former backup) to
+                  stop operating.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="failback-delay" type="xsd:long" default="5000" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  delay to wait before fail-back occurs on (live's) restart
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="failover-on-shutdown" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Will this backup server come live on a normal server shutdown
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="scale-down" type="scaleDownType" maxOccurs="1" minOccurs="0">
+            <xsd:annotation>
+               <xsd:documentation>
+                  if provided then this backup will scale down rather than becoming live after fail over.
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
          <xsd:element name="restart-backup" type="xsd:boolean" default="false" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:linkend="ha.restart-backup" hq:field_name="DEFAULT_RESTART_BACKUP">
+            <xsd:annotation hq:linkend="ha.restart-backup">
                <xsd:documentation>
                   Will this server, if a backup, restart once it has been stopped because of failback or scaling down.
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
       </xsd:all>
-      <xsd:attribute name="template" use="optional">
-         <xsd:annotation>
-            <xsd:documentation>
-               if true then the backup will use the same configuration as the live server.
-            </xsd:documentation>
-         </xsd:annotation>
-         <xsd:simpleType>
-            <xsd:restriction base="xsd:string">
-               <xsd:enumeration value="NONE"/>
-               <xsd:enumeration value="REPLICATED"/>
-               <xsd:enumeration value="SHARED_STORE"/>
-               <xsd:enumeration value="BACKUP_REPLICATED"/>
-               <xsd:enumeration value="BACKUP_SHARED_STORE"/>
-               <xsd:enumeration value="COLOCATED_REPLICATED"/>
-               <xsd:enumeration value="COLOCATED_SHARED_STORE"/>
-            </xsd:restriction>
-         </xsd:simpleType>
-      </xsd:attribute>
    </xsd:complexType>
+   <xsd:complexType name="colocatedPolicyType">
+      <xsd:all>
+         <xsd:element name="request-backup" type="xsd:boolean" minOccurs="0" maxOccurs="1" default="false">
+            <xsd:annotation>
+               <xsd:documentation>
+                  If true then the server will request a backup on another node
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-request-retries" type="xsd:int" minOccurs="0" maxOccurs="1" default="-1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  How many times the live server will try to request a backup, -1 means for ever.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-request-retry-interval" type="xsd:long" minOccurs="0" maxOccurs="1" default="5000">
+            <xsd:annotation>
+               <xsd:documentation>
+                  How long to wait for retries between attempts to request a backup server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="max-backups" type="xsd:int" minOccurs="0" maxOccurs="1" default="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  Whether or not this live server will accept backup requests from other live servers.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="backup-port-offset" type="xsd:int" minOccurs="0" maxOccurs="1" default="100">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The offset to use for the Connectors and Acceptors when creating a new backup server.
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+      </xsd:all>
+   </xsd:complexType>
+   <xsd:complexType name="scaleDownType">
+      <xsd:sequence>
+         <xsd:element name="enabled" type="xsd:boolean" default="true" maxOccurs="1" minOccurs="0">
+            <xsd:annotation hq:linkend="ha.scale-down" hq:field_name="DEFAULT_SCALE_DOWN_ENABLED">
+               <xsd:documentation>
+                  its possible that you only want a server to partake in scale down as a receiver, via a group.
+                  In this case set scale-down to false
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="group-name" type="xsd:string" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The scale down group to scale down to, a server will only scale down to a server within the same group
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:choice>
+         <xsd:element name="discovery-group" type="xsd:string" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  The discovery group to use for scale down, if not supplied then the scale-down-connectors or first
+                  invm connector will be used
+               </xsd:documentation>
+            </xsd:annotation>
+         </xsd:element>
+         <xsd:element name="connectors" minOccurs="0" maxOccurs="1">
+            <xsd:annotation>
+               <xsd:documentation>
+                  A list of connectors to use for scaling down, if not supplied then the scale-down-discovery-group or
+                  first invm connector will be used
+               </xsd:documentation>
+            </xsd:annotation>
+            <xsd:complexType>
+               <xsd:sequence>
+                  <xsd:element name="connector-ref" type="xsd:string" maxOccurs="unbounded" minOccurs="1"/>
+               </xsd:sequence>
+            </xsd:complexType>
+         </xsd:element>
+         </xsd:choice>
+      </xsd:sequence>
+   </xsd:complexType>
+
    <xsd:complexType name="groupingHandlerType">
       <xsd:all>
          <xsd:element name="type" maxOccurs="1" minOccurs="1">
@@ -1795,14 +2066,14 @@
             </xsd:annotation>
          </xsd:element>
          <xsd:element name="timeout" type="xsd:int" default="5000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:default="(ms)">
+            <xsd:annotation hq:default="(ms)" hq:field_name="DEFAULT_GROUPING_HANDLER_TIMEOUT">
                <xsd:documentation>
                   How long to wait for a decision
                </xsd:documentation>
             </xsd:annotation>
          </xsd:element>
          <xsd:element name="group-timeout" type="xsd:int" default="-1" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:default="(ms)">
+            <xsd:annotation hq:default="(ms)" hq:field_name="DEFAULT_GROUPING_HANDLER_GROUP_TIMEOUT">
                <xsd:documentation>
                   How long a group binding will be used, -1 means for ever. Bindings are removed after this wait
                   elapses. On the remote node this is used to determine how often you should re-query the main
@@ -1811,7 +2082,7 @@
             </xsd:annotation>
          </xsd:element>
          <xsd:element name="reaper-period" type="xsd:long" default="30000" maxOccurs="1" minOccurs="0">
-            <xsd:annotation hq:default="(ms)">
+            <xsd:annotation hq:default="(ms)" hq:field_name="DEFAULT_GROUPING_HANDLER_REAPER_PERIOD">
                <xsd:documentation>
                   How often the reaper will be run to check for timed out group bindings. Only valid for LOCAL handlers
                </xsd:documentation>
@@ -1966,6 +2237,37 @@
                   </xsd:documentation>
                </xsd:annotation>
             </xsd:element>
+
+            <xsd:element name="slow-consumer-threshold" type="xsd:long" maxOccurs="1" minOccurs="0">
+               <xsd:annotation hq:default="-1">
+                  <xsd:documentation>
+                     The minimum rate of message consumption allowed before a consumer is considered "slow." Measured
+                     in messages-per-second.
+                  </xsd:documentation>
+               </xsd:annotation>
+            </xsd:element>
+
+            <xsd:element name="slow-consumer-policy" default="NOTIFY" maxOccurs="1" minOccurs="0">
+               <xsd:annotation>
+                  <xsd:documentation>
+                     what happens when a slow consumer is identified
+                  </xsd:documentation>
+               </xsd:annotation>
+               <xsd:simpleType>
+                  <xsd:restriction base="xsd:string">
+                     <xsd:enumeration value="KILL"/>
+                     <xsd:enumeration value="NOTIFY"/>
+                  </xsd:restriction>
+               </xsd:simpleType>
+            </xsd:element>
+
+            <xsd:element name="slow-consumer-check-period" type="xsd:long" maxOccurs="1" minOccurs="0">
+               <xsd:annotation hq:default="5">
+                  <xsd:documentation>
+                     How often to check for slow consumers on a particular queue. Measured in minutes.
+                  </xsd:documentation>
+               </xsd:annotation>
+            </xsd:element>
          </xsd:all>
 
          <xsd:attribute name="match" type="xsd:string" use="required">

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/config/impl/ConfigurationImplTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/config/impl/ConfigurationImplTest.java b/hornetq-server/src/test/java/org/hornetq/core/config/impl/ConfigurationImplTest.java
index aa396c3..1718125 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/config/impl/ConfigurationImplTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/config/impl/ConfigurationImplTest.java
@@ -11,7 +11,7 @@
  * permissions and limitations under the License.
  */
 package org.hornetq.core.config.impl;
-import org.hornetq.core.server.cluster.ha.HAPolicy;
+import org.hornetq.core.config.ha.LiveOnlyPolicyConfiguration;
 import org.junit.Before;
 
 import org.junit.Test;
@@ -45,7 +45,6 @@ public class ConfigurationImplTest extends UnitTestCase
    @Test
    public void testDefaults()
    {
-      Assert.assertEquals(HornetQDefaultConfiguration.isDefaultSharedStore(), conf.getHAPolicy().isSharedStore());
       Assert.assertEquals(HornetQDefaultConfiguration.getDefaultScheduledThreadPoolMaxSize(),
                           conf.getScheduledThreadPoolMaxSize());
       Assert.assertEquals(HornetQDefaultConfiguration.getDefaultSecurityInvalidationInterval(),
@@ -110,10 +109,6 @@ public class ConfigurationImplTest extends UnitTestCase
    {
       for (int j = 0; j < 100; j++)
       {
-         HAPolicy.POLICY_TYPE p = HAPolicy.POLICY_TYPE.SHARED_STORE;
-         conf.getHAPolicy().setPolicyType(p);
-         Assert.assertEquals(p, conf.getHAPolicy().getPolicyType());
-
          int i = RandomUtil.randomInt();
          conf.setScheduledThreadPoolMaxSize(i);
          Assert.assertEquals(i, conf.getScheduledThreadPoolMaxSize());
@@ -324,7 +319,7 @@ public class ConfigurationImplTest extends UnitTestCase
    {
       boolean b = RandomUtil.randomBoolean();
 
-      conf.getHAPolicy().setPolicyType(HAPolicy.POLICY_TYPE.SHARED_STORE);
+      conf.setHAPolicyConfiguration(new LiveOnlyPolicyConfiguration());
 
       int i = RandomUtil.randomInt();
       conf.setScheduledThreadPoolMaxSize(i);

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/config/impl/DefaultsFileConfigurationTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/config/impl/DefaultsFileConfigurationTest.java b/hornetq-server/src/test/java/org/hornetq/core/config/impl/DefaultsFileConfigurationTest.java
index 359b3f8..15e9f72 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/config/impl/DefaultsFileConfigurationTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/config/impl/DefaultsFileConfigurationTest.java
@@ -12,11 +12,9 @@
  */
 package org.hornetq.core.config.impl;
 
-import org.hornetq.core.config.BackupStrategy;
-import org.hornetq.core.server.cluster.ha.HAPolicy;
+import org.hornetq.core.config.ha.LiveOnlyPolicyConfiguration;
 import org.junit.Test;
 
-import java.util.ArrayList;
 import java.util.Collections;
 
 import org.junit.Assert;
@@ -39,9 +37,6 @@ public class DefaultsFileConfigurationTest extends ConfigurationImplTest
    @Test
    public void testDefaults()
    {
-      Assert.assertEquals(HornetQDefaultConfiguration.isDefaultBackup(), conf.getHAPolicy().isBackup());
-
-      Assert.assertEquals(HornetQDefaultConfiguration.isDefaultSharedStore(), conf.getHAPolicy().isSharedStore());
 
       Assert.assertEquals(HornetQDefaultConfiguration.getDefaultScheduledThreadPoolMaxSize(),
                           conf.getScheduledThreadPoolMaxSize());
@@ -142,28 +137,8 @@ public class DefaultsFileConfigurationTest extends ConfigurationImplTest
 
       Assert.assertEquals(HornetQDefaultConfiguration.getDefaultMessageExpiryThreadPriority(),
                           conf.getMessageExpiryThreadPriority());
-      Assert.assertEquals("replication cluster name", null, conf.getHAPolicy().getReplicationClustername());
-      Assert.assertEquals("scale-down cluster name", null, conf.getHAPolicy().getScaleDownClustername());
-      Assert.assertEquals("ha-policy type", HAPolicy.POLICY_TYPE.valueOf(HornetQDefaultConfiguration.getDefaultHapolicyType()), conf.getHAPolicy().getPolicyType());
-      Assert.assertEquals("check-for-live-server", HornetQDefaultConfiguration.isDefaultCheckForLiveServer(), conf.getHAPolicy().isCheckForLiveServer());
-      Assert.assertEquals("scale-down", HornetQDefaultConfiguration.isDefaultScaleDown(), conf.getHAPolicy().isScaleDown());
-      Assert.assertEquals("max-saved-replicated-journals-size", HornetQDefaultConfiguration.getDefaultMaxSavedReplicatedJournalsSize(), conf.getHAPolicy().getMaxSavedReplicatedJournalsSize());
-      Assert.assertEquals("failover-on-shutdown", HornetQDefaultConfiguration.isDefaultFailoverOnServerShutdown(), conf.getHAPolicy().isFailoverOnServerShutdown());
-      Assert.assertEquals("backup-group-name", null, conf.getHAPolicy().getBackupGroupName());
-      Assert.assertEquals("backup-port-offset", HornetQDefaultConfiguration.getDefaultHapolicyBackupPortOffset(), conf.getHAPolicy().getBackupPortOffset());
-      Assert.assertEquals("backup-request-retries", HornetQDefaultConfiguration.getDefaultHapolicyBackupRequestRetries(), conf.getHAPolicy().getBackupRequestRetries());
-      Assert.assertEquals("backup-request-retry-interval", HornetQDefaultConfiguration.getDefaultHapolicyBackupRequestRetryInterval(), conf.getHAPolicy().getBackupRequestRetryInterval());
-      Assert.assertEquals("backup-strategy", BackupStrategy.valueOf(HornetQDefaultConfiguration.getDefaultHapolicyBackupStrategy()), conf.getHAPolicy().getBackupStrategy());
-      Assert.assertEquals("failback-delay", HornetQDefaultConfiguration.getDefaultFailbackDelay(), conf.getHAPolicy().getFailbackDelay());
-      Assert.assertEquals("max-backups", HornetQDefaultConfiguration.getDefaultHapolicyMaxBackups(), conf.getHAPolicy().getMaxBackups());
-      Assert.assertEquals("remote-connectors for ha-policy", new ArrayList<>(), conf.getHAPolicy().getRemoteConnectors());
-      Assert.assertEquals("scale-down-connectors for ha-policy", new ArrayList<>(), conf.getHAPolicy().getScaleDownConnectors());
-      Assert.assertEquals("scale-down-discovery-group", null, conf.getHAPolicy().getScaleDownDiscoveryGroup());
-      Assert.assertEquals("scale-down-group-name", null, conf.getHAPolicy().getScaleDownGroupName());
-      Assert.assertEquals("allow-failback", HornetQDefaultConfiguration.isDefaultAllowAutoFailback(), conf.getHAPolicy().isAllowAutoFailBack());
-      Assert.assertEquals("backup", false, conf.getHAPolicy().isBackup());
-      Assert.assertEquals("request-backup", HornetQDefaultConfiguration.isDefaultHapolicyRequestBackup(), conf.getHAPolicy().isRequestBackup());
-      Assert.assertEquals("shared-store", false, conf.getHAPolicy().isSharedStore());
+
+      Assert.assertTrue(conf.getHAPolicyConfiguration() instanceof LiveOnlyPolicyConfiguration);
    }
 
    // Protected ---------------------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationParserTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationParserTest.java b/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationParserTest.java
index ccafbad..a88a79c 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationParserTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationParserTest.java
@@ -124,8 +124,6 @@ public class FileConfigurationParserTest extends UnitTestCase
       config = parser.parseMainConfig(new ByteArrayInputStream(configStr.getBytes(StandardCharsets.UTF_8)));
 
       assertEquals("newpassword", config.getClusterPassword());
-
-      assertEquals("abackupgroupname", config.getHAPolicy().getBackupGroupName());
    }
 
    private static String firstPart =

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationTest.java b/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationTest.java
index 381c08d..fc60af0 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/config/impl/FileConfigurationTest.java
@@ -13,21 +13,21 @@
 package org.hornetq.core.config.impl;
 
 import java.util.Collections;
-import java.util.List;
 
 import org.hornetq.api.core.BroadcastGroupConfiguration;
 import org.hornetq.api.core.DiscoveryGroupConfiguration;
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.UDPBroadcastGroupConfiguration;
-import org.hornetq.core.config.BackupStrategy;
 import org.hornetq.core.config.BridgeConfiguration;
 import org.hornetq.core.config.ClusterConnectionConfiguration;
 import org.hornetq.core.config.Configuration;
 import org.hornetq.core.config.DivertConfiguration;
+import org.hornetq.core.config.HAPolicyConfiguration;
+import org.hornetq.core.config.ha.LiveOnlyPolicyConfiguration;
 import org.hornetq.core.security.Role;
 import org.hornetq.core.server.JournalType;
-import org.hornetq.core.server.cluster.ha.HAPolicy;
+import org.hornetq.core.settings.impl.SlowConsumerPolicy;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -68,8 +68,6 @@ public class FileConfigurationTest extends ConfigurationImplTest
       Assert.assertEquals(8, conf.getMessageExpiryThreadPriority());
       Assert.assertEquals(127, conf.getIDCacheSize());
       Assert.assertEquals(true, conf.isPersistIDCache());
-      Assert.assertEquals(false, conf.getHAPolicy().isBackup());
-      Assert.assertEquals(false, conf.getHAPolicy().isSharedStore());
       Assert.assertEquals(true, conf.isPersistDeliveryCountBeforeDelivery());
       Assert.assertEquals("pagingdir", conf.getPagingDirectory());
       Assert.assertEquals("somedir", conf.getBindingsDirectory());
@@ -234,32 +232,13 @@ public class FileConfigurationTest extends ConfigurationImplTest
 
       Assert.assertEquals(2, conf.getClusterConfigurations().size());
 
-      HAPolicy haPolicy = conf.getHAPolicy();
-      assertEquals(HAPolicy.POLICY_TYPE.COLOCATED_REPLICATED, haPolicy.getPolicyType());
-      assertEquals(true, haPolicy.isRequestBackup());
-      assertEquals(33, haPolicy.getBackupRequestRetries());
-      assertEquals(1234, haPolicy.getBackupRequestRetryInterval());
-      assertEquals(12, haPolicy.getMaxBackups());
-      assertEquals(1002, haPolicy.getBackupPortOffset());
-      assertEquals(BackupStrategy.SCALE_DOWN, haPolicy.getBackupStrategy());
-      assertEquals("wahey!", haPolicy.getScaleDownDiscoveryGroup());
-      assertEquals("boo!", haPolicy.getScaleDownGroupName());
-      List<String> scaleDownConnectors = haPolicy.getScaleDownConnectors();
-      assertEquals(2, scaleDownConnectors.size());
-      assertEquals("sd-connector1", scaleDownConnectors.get(0));
-      assertEquals("sd-connector2", scaleDownConnectors.get(1));
-      List<String> remoteConnectors = haPolicy.getRemoteConnectors();
-      assertEquals(2, remoteConnectors.size());
-      assertEquals("remote-connector1", remoteConnectors.get(0));
-      assertEquals("remote-connector2", remoteConnectors.get(1));
-      assertEquals(true, haPolicy.isCheckForLiveServer());
-      assertEquals(false, haPolicy.isAllowAutoFailBack());
-      assertEquals(10000, haPolicy.getFailbackDelay());
-      assertEquals(true, haPolicy.isFailoverOnServerShutdown());
-      assertEquals("replicationClustername", haPolicy.getReplicationClustername());
-      assertEquals("scaleDownClustername", haPolicy.getScaleDownClustername());
-      assertEquals(3, haPolicy.getMaxSavedReplicatedJournalsSize());
-      assertEquals(true, haPolicy.isScaleDown());
+      HAPolicyConfiguration pc = conf.getHAPolicyConfiguration();
+      assertNotNull(pc);
+      assertTrue(pc instanceof LiveOnlyPolicyConfiguration);
+      LiveOnlyPolicyConfiguration lopc = (LiveOnlyPolicyConfiguration) pc;
+      assertNotNull(lopc.getScaleDownConfiguration());
+      assertEquals(lopc.getScaleDownConfiguration().getGroupName(), "boo!");
+      assertEquals(lopc.getScaleDownConfiguration().getDiscoveryGroup(), "wahey");
 
       for (ClusterConnectionConfiguration ccc : conf.getClusterConfigurations())
       {
@@ -311,6 +290,9 @@ public class FileConfigurationTest extends ConfigurationImplTest
       assertEquals(81738173872337L, conf.getAddressesSettings().get("a1").getPageSizeBytes());
       assertEquals(10, conf.getAddressesSettings().get("a1").getPageCacheMaxSize());
       assertEquals(4, conf.getAddressesSettings().get("a1").getMessageCounterHistoryDayLimit());
+      assertEquals(10, conf.getAddressesSettings().get("a1").getSlowConsumerThreshold());
+      assertEquals(5, conf.getAddressesSettings().get("a1").getSlowConsumerCheckPeriod());
+      assertEquals(SlowConsumerPolicy.NOTIFY, conf.getAddressesSettings().get("a1").getSlowConsumerPolicy());
 
       assertEquals("a2.1", conf.getAddressesSettings().get("a2").getDeadLetterAddress().toString());
       assertEquals("a2.2", conf.getAddressesSettings().get("a2").getExpiryAddress().toString());
@@ -319,6 +301,9 @@ public class FileConfigurationTest extends ConfigurationImplTest
       assertEquals(7126716262626L, conf.getAddressesSettings().get("a2").getPageSizeBytes());
       assertEquals(20, conf.getAddressesSettings().get("a2").getPageCacheMaxSize());
       assertEquals(8, conf.getAddressesSettings().get("a2").getMessageCounterHistoryDayLimit());
+      assertEquals(20, conf.getAddressesSettings().get("a2").getSlowConsumerThreshold());
+      assertEquals(15, conf.getAddressesSettings().get("a2").getSlowConsumerCheckPeriod());
+      assertEquals(SlowConsumerPolicy.KILL, conf.getAddressesSettings().get("a2").getSlowConsumerPolicy());
 
 
       assertEquals(2, conf.getQueueConfigurations().size());

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/config/impl/HAPolicyConfigurationTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/config/impl/HAPolicyConfigurationTest.java b/hornetq-server/src/test/java/org/hornetq/core/config/impl/HAPolicyConfigurationTest.java
new file mode 100644
index 0000000..f0dde3a
--- /dev/null
+++ b/hornetq-server/src/test/java/org/hornetq/core/config/impl/HAPolicyConfigurationTest.java
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2005-2014 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.  See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package org.hornetq.core.config.impl;
+
+import org.hornetq.core.server.cluster.ha.ColocatedPolicy;
+import org.hornetq.core.server.cluster.ha.HAPolicy;
+import org.hornetq.core.server.cluster.ha.LiveOnlyPolicy;
+import org.hornetq.core.server.cluster.ha.ReplicaPolicy;
+import org.hornetq.core.server.cluster.ha.ReplicatedPolicy;
+import org.hornetq.core.server.cluster.ha.ScaleDownPolicy;
+import org.hornetq.core.server.cluster.ha.SharedStoreMasterPolicy;
+import org.hornetq.core.server.cluster.ha.SharedStoreSlavePolicy;
+import org.hornetq.core.server.impl.ColocatedActivation;
+import org.hornetq.core.server.impl.LiveOnlyActivation;
+import org.hornetq.core.server.impl.SharedNothingBackupActivation;
+import org.hornetq.core.server.impl.SharedNothingLiveActivation;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.server.impl.Activation;
+import org.hornetq.core.server.impl.HornetQServerImpl;
+import org.hornetq.core.server.impl.SharedStoreBackupActivation;
+import org.hornetq.core.server.impl.SharedStoreLiveActivation;
+import org.hornetq.tests.util.UnitTestCase;
+import org.junit.Test;
+
+import java.util.List;
+
+public class HAPolicyConfigurationTest extends UnitTestCase
+{
+   @Test
+   public void liveOnlyTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("live-only-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof LiveOnlyActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof LiveOnlyPolicy);
+         LiveOnlyPolicy liveOnlyPolicy = (LiveOnlyPolicy) haPolicy;
+         ScaleDownPolicy scaleDownPolicy = liveOnlyPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), "wahey");
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 0);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void liveOnlyTest2() throws Exception
+   {
+      Configuration configuration = createConfiguration("live-only-hapolicy-config2.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof LiveOnlyActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof LiveOnlyPolicy);
+         LiveOnlyPolicy liveOnlyPolicy = (LiveOnlyPolicy) haPolicy;
+         ScaleDownPolicy scaleDownPolicy = liveOnlyPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertFalse(scaleDownPolicy.isEnabled());
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), null);
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 2);
+         assertTrue(connectors.contains("sd-connector1"));
+         assertTrue(connectors.contains("sd-connector2"));
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void liveOnlyTest3() throws Exception
+   {
+      liveOnlyTest("live-only-hapolicy-config3.xml");
+   }
+
+   @Test
+   public void liveOnlyTest4() throws Exception
+   {
+      liveOnlyTest("live-only-hapolicy-config4.xml");
+   }
+   @Test
+   public void liveOnlyTest5() throws Exception
+   {
+      liveOnlyTest("live-only-hapolicy-config5.xml");
+   }
+
+   @Test
+   public void ReplicatedTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("replicated-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedNothingLiveActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ReplicatedPolicy);
+         ReplicatedPolicy replicatedPolicy = (ReplicatedPolicy) haPolicy;
+         assertEquals(replicatedPolicy.getGroupName(), "purple");
+         assertTrue(replicatedPolicy.isCheckForLiveServer());
+         assertEquals(replicatedPolicy.getClusterName(), "abcdefg");
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void ReplicaTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("replica-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedNothingBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ReplicaPolicy);
+         ReplicaPolicy replicaPolicy = (ReplicaPolicy) haPolicy;
+         assertEquals(replicaPolicy.getGroupName(), "tiddles");
+         assertEquals(replicaPolicy.getMaxSavedReplicatedJournalsSize(), 22);
+         assertEquals(replicaPolicy.getClusterName(), "33rrrrr");
+         assertFalse(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), "wahey");
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 0);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void ReplicaTest2() throws Exception
+   {
+      Configuration configuration = createConfiguration("replica-hapolicy-config2.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedNothingBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ReplicaPolicy);
+         ReplicaPolicy replicaPolicy = (ReplicaPolicy) haPolicy;
+         assertEquals(replicaPolicy.getGroupName(), "tiddles");
+         assertEquals(replicaPolicy.getMaxSavedReplicatedJournalsSize(), 22);
+         assertEquals(replicaPolicy.getClusterName(), "33rrrrr");
+         assertFalse(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), null);
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 2);
+         assertTrue(connectors.contains("sd-connector1"));
+         assertTrue(connectors.contains("sd-connector2"));
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void ReplicaTest3() throws Exception
+   {
+      Configuration configuration = createConfiguration("replica-hapolicy-config3.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedNothingBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ReplicaPolicy);
+         ReplicaPolicy replicaPolicy = (ReplicaPolicy) haPolicy;
+         assertEquals(replicaPolicy.getGroupName(), "tiddles");
+         assertEquals(replicaPolicy.getMaxSavedReplicatedJournalsSize(), 22);
+         assertEquals(replicaPolicy.getClusterName(), "33rrrrr");
+         assertFalse(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNull(scaleDownPolicy);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void SharedStoreMasterTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("shared-store-master-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedStoreLiveActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof SharedStoreMasterPolicy);
+         SharedStoreMasterPolicy masterPolicy = (SharedStoreMasterPolicy) haPolicy;
+         assertEquals(masterPolicy.getFailbackDelay(), 3456);
+         assertFalse(masterPolicy.isFailoverOnServerShutdown());
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void SharedStoreSlaveTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("shared-store-slave-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedStoreBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof SharedStoreSlavePolicy);
+         SharedStoreSlavePolicy replicaPolicy = (SharedStoreSlavePolicy) haPolicy;
+         assertEquals(replicaPolicy.getFailbackDelay(), 9876);
+         assertFalse(replicaPolicy.isFailoverOnServerShutdown());
+         assertFalse(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), "wahey");
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 0);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void SharedStoreSlaveTest2() throws Exception
+   {
+      Configuration configuration = createConfiguration("shared-store-slave-hapolicy-config2.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedStoreBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof SharedStoreSlavePolicy);
+         SharedStoreSlavePolicy replicaPolicy = (SharedStoreSlavePolicy) haPolicy;
+         assertEquals(replicaPolicy.getFailbackDelay(), 5678);
+         assertTrue(replicaPolicy.isFailoverOnServerShutdown());
+         assertTrue(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNotNull(scaleDownPolicy);
+         assertEquals(scaleDownPolicy.getGroupName(), "boo!");
+         assertEquals(scaleDownPolicy.getDiscoveryGroup(), null);
+         List<String> connectors = scaleDownPolicy.getConnectors();
+         assertNotNull(connectors);
+         assertEquals(connectors.size(), 2);
+         assertTrue(connectors.contains("sd-connector1"));
+         assertTrue(connectors.contains("sd-connector2"));
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void SharedStoreSlaveTest3() throws Exception
+   {
+      Configuration configuration = createConfiguration("shared-store-slave-hapolicy-config3.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof SharedStoreBackupActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof SharedStoreSlavePolicy);
+         SharedStoreSlavePolicy replicaPolicy = (SharedStoreSlavePolicy) haPolicy;
+         assertEquals(replicaPolicy.getFailbackDelay(), 5678);
+         assertTrue(replicaPolicy.isFailoverOnServerShutdown());
+         assertTrue(replicaPolicy.isRestartBackup());
+         ScaleDownPolicy scaleDownPolicy = replicaPolicy.getScaleDownPolicy();
+         assertNull(scaleDownPolicy);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   @Test
+   public void colocatedTest() throws Exception
+   {
+      Configuration configuration = createConfiguration("colocated-hapolicy-config.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof ColocatedActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ColocatedPolicy);
+         ColocatedPolicy colocatedPolicy = (ColocatedPolicy) haPolicy;
+         ReplicatedPolicy livePolicy = (ReplicatedPolicy) colocatedPolicy.getLivePolicy();
+         assertNotNull(livePolicy);
+
+         assertEquals(livePolicy.getGroupName(), "purple");
+         assertTrue(livePolicy.isCheckForLiveServer());
+         assertEquals(livePolicy.getClusterName(), "abcdefg");
+         ReplicaPolicy backupPolicy = (ReplicaPolicy) colocatedPolicy.getBackupPolicy();
+         assertNotNull(backupPolicy);
+         assertEquals(backupPolicy.getGroupName(), "tiddles");
+         assertEquals(backupPolicy.getMaxSavedReplicatedJournalsSize(), 22);
+         assertEquals(backupPolicy.getClusterName(), "33rrrrr");
+         assertFalse(backupPolicy.isRestartBackup());
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+
+   @Test
+   public void colocatedTest2() throws Exception
+   {
+      Configuration configuration = createConfiguration("colocated-hapolicy-config2.xml");
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof ColocatedActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof ColocatedPolicy);
+         ColocatedPolicy colocatedPolicy = (ColocatedPolicy) haPolicy;
+         SharedStoreMasterPolicy livePolicy = (SharedStoreMasterPolicy) colocatedPolicy.getLivePolicy();
+         assertNotNull(livePolicy);
+
+         assertFalse(livePolicy.isFailoverOnServerShutdown());
+         assertEquals(livePolicy.getFailbackDelay(), 1234);
+         SharedStoreSlavePolicy backupPolicy = (SharedStoreSlavePolicy) colocatedPolicy.getBackupPolicy();
+         assertNotNull(backupPolicy);
+         assertEquals(backupPolicy.getFailbackDelay(), 44);
+         assertFalse(backupPolicy.isFailoverOnServerShutdown());
+         assertFalse(backupPolicy.isRestartBackup());
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+   private void liveOnlyTest(String file) throws Exception
+   {
+      Configuration configuration = createConfiguration(file);
+      HornetQServerImpl server = new HornetQServerImpl(configuration);
+      try
+      {
+         server.start();
+         Activation activation = server.getActivation();
+         assertTrue(activation instanceof LiveOnlyActivation);
+         HAPolicy haPolicy = server.getHAPolicy();
+         assertTrue(haPolicy instanceof LiveOnlyPolicy);
+         LiveOnlyPolicy liveOnlyPolicy = (LiveOnlyPolicy) haPolicy;
+         ScaleDownPolicy scaleDownPolicy = liveOnlyPolicy.getScaleDownPolicy();
+         assertNull(scaleDownPolicy);
+      }
+      finally
+      {
+         server.stop();
+      }
+   }
+
+
+   protected Configuration createConfiguration(String fileName) throws Exception
+   {
+      FileConfiguration fc = new FileConfiguration(fileName);
+
+      fc.start();
+
+      // we need this otherwise the data folder will be located under hornetq-server and not on the temporary directory
+      fc.setPagingDirectory(getTestDir() + "/" + fc.getPagingDirectory());
+      fc.setLargeMessagesDirectory(getTestDir() + "/" + fc.getLargeMessagesDirectory());
+      fc.setJournalDirectory(getTestDir() + "/" + fc.getJournalDirectory());
+      fc.setBindingsDirectory(getTestDir() + "/" + fc.getBindingsDirectory());
+
+      return fc;
+   }
+}

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/server/group/impl/ClusteredResetMockTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/server/group/impl/ClusteredResetMockTest.java b/hornetq-server/src/test/java/org/hornetq/core/server/group/impl/ClusteredResetMockTest.java
index ecd998d..e98c86b 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/server/group/impl/ClusteredResetMockTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/server/group/impl/ClusteredResetMockTest.java
@@ -20,8 +20,6 @@ import java.util.Set;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.Assert;
-
 import org.hornetq.api.core.BroadcastGroupConfiguration;
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
@@ -56,6 +54,7 @@ import org.hornetq.spi.core.remoting.Acceptor;
 import org.hornetq.tests.util.UnitTestCase;
 import org.hornetq.utils.ConcurrentHashSet;
 import org.hornetq.utils.ReusableLatch;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/server/impl/EmbeddedServerTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/server/impl/EmbeddedServerTest.java b/hornetq-server/src/test/java/org/hornetq/core/server/impl/EmbeddedServerTest.java
index cf8bb86..78ac525 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/server/impl/EmbeddedServerTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/server/impl/EmbeddedServerTest.java
@@ -38,11 +38,11 @@ public class EmbeddedServerTest
    @Before
    public void setup()
    {
-      configuration = new ConfigurationImpl();
-      configuration.setJournalDirectory(SERVER_JOURNAL_DIR);
-      configuration.setPersistenceEnabled(false);
-      configuration.setSecurityEnabled(false);
-      configuration.getAcceptorConfigurations().add(new TransportConfiguration(InVMAcceptorFactory.class.getName()));
+      configuration = new ConfigurationImpl()
+         .setJournalDirectory(SERVER_JOURNAL_DIR)
+         .setPersistenceEnabled(false)
+         .setSecurityEnabled(false)
+         .addAcceptorConfiguration(new TransportConfiguration(InVMAcceptorFactory.class.getName()));
 
       server = HornetQServers.newHornetQServer(configuration);
       try

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/server/impl/ScheduledDeliveryHandlerTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/server/impl/ScheduledDeliveryHandlerTest.java b/hornetq-server/src/test/java/org/hornetq/core/server/impl/ScheduledDeliveryHandlerTest.java
index f2613a4..efca9bf 100644
--- a/hornetq-server/src/test/java/org/hornetq/core/server/impl/ScheduledDeliveryHandlerTest.java
+++ b/hornetq-server/src/test/java/org/hornetq/core/server/impl/ScheduledDeliveryHandlerTest.java
@@ -329,8 +329,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setMessageID(long id)
+      public FakeMessage setMessageID(long id)
       {
+         return this;
       }
 
       @Override
@@ -574,9 +575,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setUserID(UUID userID)
+      public FakeMessage setUserID(UUID userID)
       {
-
+         return this;
       }
 
       @Override
@@ -604,9 +605,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setDurable(boolean durable)
+      public FakeMessage setDurable(boolean durable)
       {
-
+         return this;
       }
 
       @Override
@@ -622,9 +623,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setExpiration(long expiration)
+      public FakeMessage setExpiration(long expiration)
       {
-
+         return this;
       }
 
       @Override
@@ -634,9 +635,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setTimestamp(long timestamp)
+      public FakeMessage setTimestamp(long timestamp)
       {
-
+         return this;
       }
 
       @Override
@@ -646,9 +647,9 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public void setPriority(byte priority)
+      public FakeMessage setPriority(byte priority)
       {
-
+         return this;
       }
 
       @Override
@@ -974,6 +975,18 @@ public class ScheduledDeliveryHandlerTest extends Assert
       {
          return null;
       }
+
+      @Override
+      public FakeMessage writeBodyBufferBytes(byte[] bytes)
+      {
+         return this;
+      }
+
+      @Override
+      public FakeMessage writeBodyBufferString(String string)
+      {
+         return this;
+      }
    }
 
 
@@ -1178,18 +1191,6 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public long getMessageCount(long timeout)
-      {
-         return 0;
-      }
-
-      @Override
-      public long getInstantMessageCount()
-      {
-         return 0;
-      }
-
-      @Override
       public int getDeliveringCount()
       {
          return 0;
@@ -1226,13 +1227,7 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
-      public long getMessagesAdded(long timeout)
-      {
-         return 0;
-      }
-
-      @Override
-      public long getInstantMessagesAdded()
+      public long getMessagesAcknowledged()
       {
          return 0;
       }
@@ -1472,6 +1467,12 @@ public class ScheduledDeliveryHandlerTest extends Assert
       }
 
       @Override
+      public void resetMessagesAcknowledged()
+      {
+
+      }
+
+      @Override
       public void incrementMesssagesAdded()
       {
 
@@ -1494,5 +1495,17 @@ public class ScheduledDeliveryHandlerTest extends Assert
       {
 
       }
+
+      @Override
+      public void postAcknowledge(MessageReference ref)
+      {
+
+      }
+
+      @Override
+      public float getRate()
+      {
+         return 0.0f;
+      }
    }
 }

http://git-wip-us.apache.org/repos/asf/activemq-6/blob/177e6820/hornetq-server/src/test/java/org/hornetq/core/settings/AddressSettingsTest.java
----------------------------------------------------------------------
diff --git a/hornetq-server/src/test/java/org/hornetq/core/settings/AddressSettingsTest.java b/hornetq-server/src/test/java/org/hornetq/core/settings/AddressSettingsTest.java
new file mode 100644
index 0000000..32c7a3a
--- /dev/null
+++ b/hornetq-server/src/test/java/org/hornetq/core/settings/AddressSettingsTest.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2005-2014 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.  See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package org.hornetq.core.settings;
+
+import org.hornetq.api.core.SimpleString;
+import org.hornetq.core.settings.impl.AddressFullMessagePolicy;
+import org.hornetq.core.settings.impl.AddressSettings;
+import org.hornetq.tests.util.UnitTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * @author <a href="ataylor@redhat.com">Andy Taylor</a>
+ */
+public class AddressSettingsTest extends UnitTestCase
+{
+   @Test
+   public void testDefaults()
+   {
+      AddressSettings addressSettings = new AddressSettings();
+      Assert.assertEquals(null, addressSettings.getDeadLetterAddress());
+      Assert.assertEquals(null, addressSettings.getExpiryAddress());
+      Assert.assertEquals(AddressSettings.DEFAULT_MAX_DELIVERY_ATTEMPTS, addressSettings.getMaxDeliveryAttempts());
+      Assert.assertEquals(addressSettings.getMaxSizeBytes(), AddressSettings.DEFAULT_MAX_SIZE_BYTES);
+      Assert.assertEquals(AddressSettings.DEFAULT_PAGE_SIZE, addressSettings.getPageSizeBytes());
+      Assert.assertEquals(AddressSettings.DEFAULT_MESSAGE_COUNTER_HISTORY_DAY_LIMIT,
+                          addressSettings.getMessageCounterHistoryDayLimit());
+      Assert.assertEquals(AddressSettings.DEFAULT_REDELIVER_DELAY, addressSettings.getRedeliveryDelay());
+      Assert.assertEquals(AddressSettings.DEFAULT_REDELIVER_MULTIPLIER,
+                          addressSettings.getRedeliveryMultiplier(),
+                          0.000001);
+      Assert.assertEquals(AddressSettings.DEFAULT_SLOW_CONSUMER_THRESHOLD, addressSettings.getSlowConsumerThreshold());
+      Assert.assertEquals(AddressSettings.DEFAULT_SLOW_CONSUMER_CHECK_PERIOD, addressSettings.getSlowConsumerCheckPeriod());
+      Assert.assertEquals(AddressSettings.DEFAULT_SLOW_CONSUMER_POLICY, addressSettings.getSlowConsumerPolicy());
+
+   }
+
+   @Test
+   public void testSingleMerge()
+   {
+      AddressSettings addressSettings = new AddressSettings();
+      AddressSettings addressSettingsToMerge = new AddressSettings();
+      SimpleString DLQ = new SimpleString("testDLQ");
+      SimpleString exp = new SimpleString("testExpiryQueue");
+      addressSettingsToMerge.setDeadLetterAddress(DLQ);
+      addressSettingsToMerge.setExpiryAddress(exp);
+      addressSettingsToMerge.setMaxDeliveryAttempts(1000);
+      addressSettingsToMerge.setAddressFullMessagePolicy(AddressFullMessagePolicy.DROP);
+      addressSettingsToMerge.setMaxSizeBytes(1001);
+      addressSettingsToMerge.setMessageCounterHistoryDayLimit(1002);
+      addressSettingsToMerge.setRedeliveryDelay(1003);
+      addressSettingsToMerge.setPageSizeBytes(1004);
+      addressSettings.merge(addressSettingsToMerge);
+      Assert.assertEquals(addressSettings.getDeadLetterAddress(), DLQ);
+      Assert.assertEquals(addressSettings.getExpiryAddress(), exp);
+      Assert.assertEquals(addressSettings.getMaxDeliveryAttempts(), 1000);
+      Assert.assertEquals(addressSettings.getMaxSizeBytes(), 1001);
+      Assert.assertEquals(addressSettings.getMessageCounterHistoryDayLimit(), 1002);
+      Assert.assertEquals(addressSettings.getRedeliveryDelay(), 1003);
+      Assert.assertEquals(addressSettings.getPageSizeBytes(), 1004);
+      Assert.assertEquals(AddressFullMessagePolicy.DROP, addressSettings.getAddressFullMessagePolicy());
+   }
+
+   @Test
+   public void testMultipleMerge()
+   {
+      AddressSettings addressSettings = new AddressSettings();
+      AddressSettings addressSettingsToMerge = new AddressSettings();
+      SimpleString DLQ = new SimpleString("testDLQ");
+      SimpleString exp = new SimpleString("testExpiryQueue");
+      addressSettingsToMerge.setDeadLetterAddress(DLQ);
+      addressSettingsToMerge.setExpiryAddress(exp);
+      addressSettingsToMerge.setMaxDeliveryAttempts(1000);
+      addressSettingsToMerge.setMaxSizeBytes(1001);
+      addressSettingsToMerge.setMessageCounterHistoryDayLimit(1002);
+      addressSettingsToMerge.setAddressFullMessagePolicy(AddressFullMessagePolicy.DROP);
+      addressSettings.merge(addressSettingsToMerge);
+
+      AddressSettings addressSettingsToMerge2 = new AddressSettings();
+      SimpleString exp2 = new SimpleString("testExpiryQueue2");
+      addressSettingsToMerge2.setExpiryAddress(exp2);
+      addressSettingsToMerge2.setMaxSizeBytes(2001);
+      addressSettingsToMerge2.setRedeliveryDelay(2003);
+      addressSettingsToMerge2.setRedeliveryMultiplier(2.5);
+      addressSettings.merge(addressSettingsToMerge2);
+
+      Assert.assertEquals(addressSettings.getDeadLetterAddress(), DLQ);
+      Assert.assertEquals(addressSettings.getExpiryAddress(), exp);
+      Assert.assertEquals(addressSettings.getMaxDeliveryAttempts(), 1000);
+      Assert.assertEquals(addressSettings.getMaxSizeBytes(), 1001);
+      Assert.assertEquals(addressSettings.getMessageCounterHistoryDayLimit(), 1002);
+      Assert.assertEquals(addressSettings.getRedeliveryDelay(), 2003);
+      Assert.assertEquals(addressSettings.getRedeliveryMultiplier(), 2.5, 0.000001);
+      Assert.assertEquals(AddressFullMessagePolicy.DROP, addressSettings.getAddressFullMessagePolicy());
+   }
+
+   @Test
+   public void testMultipleMergeAll()
+   {
+      AddressSettings addressSettings = new AddressSettings();
+      AddressSettings addressSettingsToMerge = new AddressSettings();
+      SimpleString DLQ = new SimpleString("testDLQ");
+      SimpleString exp = new SimpleString("testExpiryQueue");
+      addressSettingsToMerge.setDeadLetterAddress(DLQ);
+      addressSettingsToMerge.setExpiryAddress(exp);
+      addressSettingsToMerge.setMaxSizeBytes(1001);
+      addressSettingsToMerge.setRedeliveryDelay(1003);
+      addressSettingsToMerge.setRedeliveryMultiplier(1.0);
+      addressSettingsToMerge.setAddressFullMessagePolicy(AddressFullMessagePolicy.DROP);
+      addressSettings.merge(addressSettingsToMerge);
+
+      AddressSettings addressSettingsToMerge2 = new AddressSettings();
+      SimpleString exp2 = new SimpleString("testExpiryQueue2");
+      SimpleString DLQ2 = new SimpleString("testDlq2");
+      addressSettingsToMerge2.setExpiryAddress(exp2);
+      addressSettingsToMerge2.setDeadLetterAddress(DLQ2);
+      addressSettingsToMerge2.setMaxDeliveryAttempts(2000);
+      addressSettingsToMerge2.setMaxSizeBytes(2001);
+      addressSettingsToMerge2.setMessageCounterHistoryDayLimit(2002);
+      addressSettingsToMerge2.setRedeliveryDelay(2003);
+      addressSettingsToMerge2.setRedeliveryMultiplier(2.0);
+      addressSettingsToMerge2.setMaxRedeliveryDelay(5000);
+      addressSettingsToMerge.setAddressFullMessagePolicy(AddressFullMessagePolicy.PAGE);
+      addressSettings.merge(addressSettingsToMerge2);
+
+      Assert.assertEquals(addressSettings.getDeadLetterAddress(), DLQ);
+      Assert.assertEquals(addressSettings.getExpiryAddress(), exp);
+      Assert.assertEquals(addressSettings.getMaxDeliveryAttempts(), 2000);
+      Assert.assertEquals(addressSettings.getMaxSizeBytes(), 1001);
+      Assert.assertEquals(addressSettings.getMessageCounterHistoryDayLimit(), 2002);
+      Assert.assertEquals(addressSettings.getRedeliveryDelay(), 1003);
+      Assert.assertEquals(addressSettings.getRedeliveryMultiplier(), 1.0, 0.000001);
+      Assert.assertEquals(addressSettings.getMaxRedeliveryDelay(), 5000);
+      Assert.assertEquals(AddressFullMessagePolicy.DROP, addressSettings.getAddressFullMessagePolicy());
+   }
+}


Mime
View raw message