incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wid...@apache.org
Subject [45/50] [abbrv] adding docs
Date Tue, 24 Jul 2012 21:20:37 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/manage-cloud.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/manage-cloud.xml b/docs/en-US/manage-cloud.xml
new file mode 100644
index 0000000..564e621
--- /dev/null
+++ b/docs/en-US/manage-cloud.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="manage-cloud">
+    <title>Managing the Cloud</title>
+    <para><xref linkend="change-database-config" />vCenter Maintenance Mode</para>
+    <para><xref linkend="admin-alerts" />XenServer and Maintenance Mode</para>
+    <para><xref linkend="customizing-dns" />vCenter Maintenance Mode</para>
+    <para><xref linkend="stop-start-management-server" />XenServer and Maintenance Mode</para>
+   </section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/management-server-install-flow.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/management-server-install-flow.xml b/docs/en-US/management-server-install-flow.xml
new file mode 100644
index 0000000..a828d67
--- /dev/null
+++ b/docs/en-US/management-server-install-flow.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="management-server-install-flow">
+	<title>Management Server Installation</title>
+	<xi:include href="management-server-installation-overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="prepare-os.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="install-management-server.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="install-database-on-management-server-node.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="install-database-on-separate-node.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="prepare-nfs-shares.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="install-management-server-multi-nodes.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="prepare-system-vm-template.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="installation-complete.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/management-server-installation-overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/management-server-installation-overview.xml b/docs/en-US/management-server-installation-overview.xml
new file mode 100644
index 0000000..f733141
--- /dev/null
+++ b/docs/en-US/management-server-installation-overview.xml
@@ -0,0 +1,24 @@
+	<section id="management-server-installation-overview">
+		<title>Management Server Installation Overview</title>
+		<para>
+			This section describes installing the Management Server. There are two slightly different installation flows,
+			depending on how many Management Server nodes will be in your cloud:</para>
+		<itemizedlist>
+			<listitem><para>A single Management Server node, with MySQL on the same node.</para></listitem>
+			<listitem><para>Multiple Management Server nodes, with MySQL on a node separate from the Management Servers.</para></listitem>
+		</itemizedlist>
+		<para>In either case, each machine must meet the system requirements described in System Requirements.</para> 
+		<warning><para>For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server.</para></warning>
+		<para>
+			The procedure for installing the Management Server is:
+		</para>
+			<orderedlist>
+				<listitem><para>Prepare the Operating System</para></listitem>
+				<listitem><para>Install the First Management Server</para></listitem>
+				<listitem><para>Install and Configure the Database</para></listitem>
+				<listitem><para>Prepare NFS Shares</para></listitem>
+				<listitem><para>Prepare and Start Additional Management Servers (optional)</para></listitem>
+				<listitem><para>Prepare the System VM Template</para></listitem>
+			</orderedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/management-server-overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/management-server-overview.xml b/docs/en-US/management-server-overview.xml
new file mode 100644
index 0000000..40cbd53
--- /dev/null
+++ b/docs/en-US/management-server-overview.xml
@@ -0,0 +1,57 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="management-server-overview">
+		<title>Management Server Overview</title>
+		<para>
+			The Management Server is the &PRODUCT; software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure.
+		</para>
+		<para>
+			The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The Management Server runs in a Tomcat container and requires a MySQL database for persistence.
+		</para>
+		<para>
+			The machine must meet the system requirements described in System Requirements.
+		</para>
+		<para>
+			The Management Server:
+		</para>
+			<itemizedlist>
+				<listitem>
+					<para>
+						Provides the web user interface for the administrator and a reference user interface for end users.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Provides the APIs for &PRODUCT;.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the assignment of guest VMs to particular hosts.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the assignment of public and private IP addresses to particular accounts.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the allocation of storage to guests as virtual disks.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages snapshots, templates, and ISO images, possibly replicating them across data centers.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Provides a single point of configuration for the cloud.
+					</para>
+				</listitem>
+			</itemizedlist>
+	</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/management_server_multi_node_install.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/management_server_multi_node_install.xml b/docs/en-US/management_server_multi_node_install.xml
new file mode 100644
index 0000000..ba925bd
--- /dev/null
+++ b/docs/en-US/management_server_multi_node_install.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<chapter id="management_server_multi_node_install">
+	<title>Management Server Multi-Node Installation</title>
+		<xi:include href="multi_node_overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+		<xi:include href="prepare_os.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+		<xi:include href="first_ms_node_install.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</chapter>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/management_server_overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/management_server_overview.xml b/docs/en-US/management_server_overview.xml
new file mode 100644
index 0000000..19b32d7
--- /dev/null
+++ b/docs/en-US/management_server_overview.xml
@@ -0,0 +1,52 @@
+	<section id="management_server_overview">
+		<title>Management Server Overview</title>
+		<para>
+			The Management Server is the CloudStack software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure.
+		</para>
+		<para>
+			The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The CloudStack Management Server runs in a Tomcat container and requires a MySQL database for persistence.
+		</para>
+		<para>
+			The machine must meet the system requirements described in System Requirements.
+		</para>
+		<para>
+			The Management Server:
+		</para>
+			<itemizedlist>
+				<listitem>
+					<para>
+						Provides the web user interface for the administrator and a reference user interface for end users.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Provides the APIs for the CloudStack platform.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the assignment of guest VMs to particular hosts.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the assignment of public and private IP addresses to particular accounts.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages the allocation of storage to guests as virtual disks.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Manages snapshots, templates, and ISO images, possibly replicating them across data centers.
+					</para>
+				</listitem>
+				<listitem>
+					<para>
+						Provides a single point of configuration for the cloud.
+					</para>
+				</listitem>
+			</itemizedlist>
+	</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/manual-live-migration.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/manual-live-migration.xml b/docs/en-US/manual-live-migration.xml
new file mode 100644
index 0000000..63a80e8
--- /dev/null
+++ b/docs/en-US/manual-live-migration.xml
@@ -0,0 +1,28 @@
+<section id="manual-live-migration">
+	<title>Moving VMs Between Hosts (Manual Live Migration)</title>
+		<para>The CloudPlatform administrator can move a running VM from one host to another without interrupting service to users or going into maintenance mode. This is called manual live migration, and can be done under the following conditions:</para>
+		<itemizedlist>
+			<listitem><para>The root administrator is logged in. Domain admins and users can not perform manual live migration of VMs.</para></listitem>
+			<listitem><para>The VM is running. Stopped VMs can not be live migrated.</para></listitem>
+			<listitem><para>The destination host must be in the same cluster as the original host.</para></listitem>
+			<listitem><para>The VM must not be using local disk storage.</para></listitem>	
+			<listitem><para>The destination host must have enough available capacity. If not, the VM will remain in the "migrating" state until memory becomes available.</para></listitem>
+			<listitem><para>(OVM) If the VM is running on the OVM hypervisor, it must not have an ISO attached. Live migration of a VM with attached ISO is not supported in OVM.</para></listitem>				
+		</itemizedlist>
+		<para>To manually live migrate a virtual machine</para>
+		<orderedlist>
+			<listitem><para>Log in to the CloudPlatform UI as a user or admin.</para></listitem>
+			<listitem><para>In the left navigation, click Instances.</para></listitem>
+			<listitem><para>Choose the VM that you want to migrate.</para></listitem>
+			<listitem><para>Click the Migrate Instance button <inlinemediaobject>
+				<imageobject>
+					<imagedata fileref="./images/migrate-instance.png" />
+				</imageobject>
+				<textobject><phrase>Migrateinstance.png: button to migrate an instance</phrase></textobject>
+			</inlinemediaobject>
+				</para></listitem>
+			<listitem><para>From the list of hosts, choose the one to which you want to move the VM.</para></listitem>
+			<listitem><para>Click OK.</para></listitem>
+		</orderedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/max-result-page-returned.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/max-result-page-returned.xml b/docs/en-US/max-result-page-returned.xml
new file mode 100644
index 0000000..ffbe60b
--- /dev/null
+++ b/docs/en-US/max-result-page-returned.xml
@@ -0,0 +1,14 @@
+	<section id="max-result-page-returned">
+		<title>Maximum Result Pages Returned</title>
+		<para>
+			For each cloud, there is a default upper limit on the number of results that any API command will return in a single page. This is to help prevent overloading the cloud servers and prevent DOS attacks. For example, if the page size limit is 500 and a command returns 10,000 results, the command will return 20 pages.
+		</para>
+		<para>The default page size limit can be different for each cloud. It is set in the global configuration parameter default.page.size. If your cloud has many users with lots of VMs, you might need to increase the value of this parameter. At the same time, be careful not to set it so high that your site can be taken down by an enormous return from an API call. For more information about how to set global configuration parameters, see "Describe Your Deployment" in the Installation Guide.</para>
+		<para>To decrease the page size limit for an individual API command, override the global setting with the page and pagesize parameters, which are available in any list* command (listCapabilities, listDiskOfferings, etc.).</para>
+		<itemizedlist>
+			<listitem><para>Both parameters must be specified together.</para></listitem>
+			<listitem><para>The value of the pagesize parameter must be smaller than the value of default.page.size. That is, you can not increase the number of possible items in a result page, only decrease it.</para></listitem>
+		</itemizedlist>
+		<para>For syntax information on the list* commands, see the API Reference.</para>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml b/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml
new file mode 100644
index 0000000..417eb71
--- /dev/null
+++ b/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="migrate-datadisk-volume-new-storage-pool">
+	<title>Migrating a Data Disk Volume to a New Storage Pool</title>
+			<orderedlist>
+				<listitem><para>Log in to the CloudPlatform UI as a user or admin.</para></listitem>
+				<listitem><para>Detach the data disk from the VM. See Detaching and Moving Volumes <xref linkend="detach-move-volumes"/>  (but skip the “reattach” step at the end. You will do that after migrating to new storage).</para></listitem>
+				<listitem><para>Call the CloudPlatform API command migrateVolume and pass in the volume ID and the ID of any storage pool in the zone.</para></listitem>
+				<listitem><para>Watch for the volume status to change to Migrating, then back to Ready.</para></listitem>
+				<listitem><para>Attach the volume to any desired VM running in the same cluster as the new storage server. See Attaching a Volume <xref linkend="attaching-volume"/> </para></listitem>
+			</orderedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml b/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml
new file mode 100644
index 0000000..dbf6138
--- /dev/null
+++ b/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml
@@ -0,0 +1,18 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="migrate-vm-rootvolume-volume-new-storage-pool">
+	<title>Migrating a VM Root Volume to a New Storage Pool</title>
+	<para>When migrating the root disk volume, the VM must first be stopped, and users can not access the VM. After migration is complete, the VM can be restarted.</para>
+			<orderedlist>
+				<listitem><para>Log in to the CloudPlatform UI as a user or admin.</para></listitem>
+				<listitem><para>Detach the data disk from the VM. See Detaching and Moving Volumes <xref linkend="detach-move-volumes"/>  (but skip the “reattach” step at the end. You will do that after migrating to new storage).</para></listitem>
+				<listitem><para>Stop the VM.</para></listitem>
+				<listitem><para>Call the CloudPlatform API command migrateVirtualMachine with the ID of the VM to migrate and the IDs of a destination host and destination storage pool in the same zone.</para></listitem>
+				<listitem><para>Watch for the VM status to change to Migrating, then back to Stopped.</para></listitem>
+				<listitem><para>Restart the VM.</para></listitem>
+			</orderedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/minimum-system-requirements.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/minimum-system-requirements.xml b/docs/en-US/minimum-system-requirements.xml
new file mode 100644
index 0000000..3973c30
--- /dev/null
+++ b/docs/en-US/minimum-system-requirements.xml
@@ -0,0 +1,60 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="minimum-system-requirements">
+		<title>Minimum System Requirements</title>
+		<section id="management-server-system-requirements">
+			<title>Management Server, Database, and Storage System Requirements</title>
+		<para>
+			The machines that will run the Management Server and MySQL database must meet the following requirements. 
+			The same machines can also be used to provide primary and secondary storage, such as via localdisk or NFS. 
+			The Management Server may be placed on a virtual machine.
+		</para>
+		<itemizedlist>
+			<listitem><para>Operating system:</para>
+				<itemizedlist>
+					<listitem><para>Preferred: RHEL 6.2+ 64-bit (https://access.redhat.com/downloads) or CentOS 6.2+ 64-bit (http://isoredirect.centos.org/centos/6/isos/x86_64/).</para></listitem>
+					<listitem><para>Also supported (v3.0.3 and greater): RHEL and CentOS 5.4-5.x 64-bit</para></listitem>
+					<listitem><para>It is highly recommended that you purchase a RHEL support license.
+						Citrix support can not be responsible for helping fix issues with the underlying OS.</para></listitem>
+				</itemizedlist>
+			</listitem>
+			<listitem><para>64-bit x86 CPU (more cores results in better performance)</para></listitem>
+			<listitem><para>4 GB of memory</para></listitem>
+			<listitem><para>250 GB of local disk (more results in better capability; 500 GB recommended)</para></listitem>
+			<listitem><para>At least 1 NIC</para></listitem>
+			<listitem><para>Statically allocated IP address</para></listitem>
+			<listitem><para>Fully qualified domain name as returned by the hostname command</para></listitem>
+		</itemizedlist>
+		</section>
+		<section id="hypervisor-system-requirements">
+		<title>Host/Hypervisor System Requirements</title>
+		<para>The host is where the cloud services run in the form of guest virtual machines. Each host is one machine that meets the following requirements:</para>
+		<itemizedlist>
+			<listitem><para>Must be 64-bit and must support HVM (Intel-VT or AMD-V enabled).</para></listitem> 
+			<listitem><para>64-bit x86 CPU (more cores results in better performance)</para></listitem>
+			<listitem><para>Hardware virtualization support required</para></listitem>
+			<listitem><para>4 GB of memory</para></listitem>
+			<listitem><para>36 GB of local disk</para></listitem>
+			<listitem><para>At least 1 NIC</para></listitem>
+			<listitem><para>Statically allocated IP Address</para></listitem>
+			<listitem><para>Latest hotfixes applied to hypervisor software</para></listitem>
+			<listitem><para>When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running</para></listitem>
+		</itemizedlist>
+		<para>Hosts have additional requirements depending on the hypervisor. See the requirements listed at the top of the Installation section for your chosen hypervisor:</para>
+		<itemizedlist>
+			<listitem><para>Citrix XenServer Installation</para></listitem>
+			<listitem><para>VMware vSphere Installation and Configuration</para></listitem>
+			<listitem><para>KVM Installation and Configuration</para></listitem>
+			<listitem><para>Oracle VM (OVM) Installation and Configuration</para></listitem>
+		</itemizedlist>
+		<warning>
+			<para>
+				Be sure you fulfill the additional hypervisor requirements and installation steps provided in this Guide. Hypervisor hosts must be properly prepared to work with CloudStack. For example, the requirements for XenServer are listed under Citrix XenServer Installation.
+			</para>
+		</warning>
+		</section>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/modify-delete-service-offerings.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/modify-delete-service-offerings.xml b/docs/en-US/modify-delete-service-offerings.xml
new file mode 100644
index 0000000..601a8bd
--- /dev/null
+++ b/docs/en-US/modify-delete-service-offerings.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="modify-delete-service-offerings">
+    <title>Modifying or Deleting a Service Offering</title>
+    <para>Service offerings cannot be changed once created. This applies to both compute offerings and disk offerings.</para>
+    <para>A service offering can be deleted. If it is no longer in use, it is deleted immediately and permanently. If the service offering is still in use, it will remain in the database until all the virtual machines referencing it have been deleted. After deletion by the administrator, a service offering will not be available to end users that are creating new instances.</para>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/multi_node_overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/multi_node_overview.xml b/docs/en-US/multi_node_overview.xml
new file mode 100644
index 0000000..b753340
--- /dev/null
+++ b/docs/en-US/multi_node_overview.xml
@@ -0,0 +1,19 @@
+	<section id="multi_node_installation_overview">
+		<title>Management Server Multi-Node Installation Overview</title>
+		<para>
+			This section describes installing multiple Management Servers and installing MySQL on a node separate from the Management Servers. The machines must meet the system requirements described in System Requirements.
+		</para>
+		<warning><para>For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server.</para></warning>
+		<para>
+			The procedure for a multi-node installation is:
+		</para>
+			<orderedlist>
+				<listitem><para>Prepare the Operating System</para></listitem>
+				<listitem><para>Install the First Management Server</para></listitem>
+				<listitem><para>Install and Configure the Database</para></listitem>
+				<listitem><para>Prepare NFS Shares</para></listitem>
+				<listitem><para>Prepare and Start Additional Management Servers</para></listitem>
+				<listitem><para>Prepare the System VM Template</para></listitem>
+			</orderedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/multiple-system-vm-vmware.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/multiple-system-vm-vmware.xml b/docs/en-US/multiple-system-vm-vmware.xml
new file mode 100644
index 0000000..a809b67
--- /dev/null
+++ b/docs/en-US/multiple-system-vm-vmware.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="multiple-system-vm-vmware">
+    <title>Multiple System VM Support for VMware</title>
+    <para>Every CloudPlatform zone has single System VM for template processing tasks such as downloading templates, uploading templates, and uploading ISOs. In a zone where VMware is being used, additional System VMs can be launched to process VMware-specific tasks such as taking snapshots and creating private templates. The CloudPlatform management server launches additional System VMs for VMware-specific tasks as the load increases. The management server monitors and weights all commands sent to these System VMs and performs dynamic load balancing and scaling-up of more System VMs.</para>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/network-offering-usage-record-format.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/network-offering-usage-record-format.xml b/docs/en-US/network-offering-usage-record-format.xml
new file mode 100644
index 0000000..8535544
--- /dev/null
+++ b/docs/en-US/network-offering-usage-record-format.xml
@@ -0,0 +1,20 @@
+<section id="network-offering-usage-record-format">
+	<title>Network Offering Usage Record Format</title>
+	<itemizedlist>
+			<listitem><para>account – name of the account</para></listitem>
+			<listitem><para>accountid – ID of the account</para></listitem>
+			<listitem><para>domainid – ID of the domain in which this account resides</para></listitem>
+			<listitem><para>zoneid – Zone where the usage occurred</para></listitem>
+			<listitem><para>description – A string describing what the usage record is tracking</para></listitem>
+			<listitem><para>usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)</para></listitem>
+			<listitem><para>usagetype – A number representing the usage type (see Usage Types)</para></listitem>
+			<listitem><para>rawusage – A number representing the actual usage in hours</para></listitem>
+			<listitem><para>usageid – ID of the network offering</para></listitem>
+			<listitem><para>usagetype – A number representing the usage type (see Usage Types)</para></listitem>
+			<listitem><para>offeringid – Network offering ID</para></listitem>
+			<listitem><para>virtualMachineId – The ID of the virtual machine</para></listitem>
+			<listitem><para>virtualMachineId – The ID of the virtual machine</para></listitem>
+			<listitem><para>startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record</para></listitem>
+		</itemizedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/network-offerings.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/network-offerings.xml b/docs/en-US/network-offerings.xml
new file mode 100644
index 0000000..87af7eb
--- /dev/null
+++ b/docs/en-US/network-offerings.xml
@@ -0,0 +1,28 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="network-offerings">
+    <title>Network Offerings</title>
+    <note><para>For the most up-to-date list of supported network services, see the CloudPlatform UI or call listNetworkServices.</para></note>
+    <para>A network offering is a named set of network services, such as:</para>
+    <itemizedlist>
+        <listitem><para>DHCP</para></listitem>
+        <listitem><para>DNS</para></listitem>
+        <listitem><para>Source NAT</para></listitem>
+        <listitem><para>Static NAT</para></listitem>
+        <listitem><para>Port Forwarding</para></listitem>
+        <listitem><para>Load Balancing</para></listitem>
+        <listitem><para>Firewall</para></listitem>
+        <listitem><para>VPN</para></listitem>
+        <listitem><para>Optional) Name one of several available providers to use for a given service, such as Juniper for the firewall</para></listitem>
+        <listitem><para>(Optional) Network tag to specify which physical network to use</para></listitem>
+    </itemizedlist>
+    <para>When creating a new VM, the user chooses one of the available network offerings, and that determines which network services the VM can use.</para>
+    <para>The CloudPlatform administrator can create any number of custom network offerings, in addition to the default network offerings provided by CloudPlatform. By creating multiple custom network offerings, you can set up your cloud to offer different classes of service on a single multi-tenant physical network.  For example, while the underlying physical wiring may be the same for two tenants, tenant A may only need simple firewall protection for their website, while tenant B may be running a web server farm and require a scalable firewall solution, load balancing solution, and alternate networks for accessing the database backend.</para>
+    <note><para>If you create load balancing rules while using a network service offering that includes an external load balancer device such as NetScaler, and later change the network service offering to one that uses the CloudPlatform virtual router, you must create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function.</para></note>
+    <para>When creating a new virtual network, the CloudPlatform administrator chooses which network offering to enable for that network. Each virtual network is associated with one network offering. A virtual network can be upgraded or downgraded by changing its associated network offering.  If you do this, be sure to reprogram the physical network to match.</para>
+    <para>CloudPlatform also has internal network offerings for use by CloudPlatform system VMs. These network offerings are not visible to users but can be modified by administrators.</para>
+    
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/network-service-providers.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/network-service-providers.xml b/docs/en-US/network-service-providers.xml
new file mode 100644
index 0000000..2c711ca
--- /dev/null
+++ b/docs/en-US/network-service-providers.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="network-service-providers">
+    <title>Network Service Providers</title>
+    <note><para>For the most up-to-date list of supported network service providers, see the CloudPlatform UI or call listNetworkServiceProviders.</para></note>
+    <para>A service provider (also called a network element) is hardware or virtual appliance that makes a network service possible; for example, a firewall appliance can be installed in the cloud to provide firewall service. On a single network, multiple providers can provide the same network service. For example, a firewall service may be provided by Cisco or Juniper devices in the same physical network.</para>
+    <para>You can have multiple instances of the same service provider in a network (say, more than one Juniper SRX device).</para>
+    <para>If different providers are set up to provide the same service on the network, the administrator can create network offerings so users can specify which network service provider they prefer (along with the other choices offered in network offerings). Otherwise, CloudPlatform will choose which provider to use whenever the service is called for. </para>
+    <formalpara>
+        <title>Supported Network Service Providers</title>
+        <para>CloudPlatform ships with an internal list of the supported service providers, and you can choose from this list when creating a network offering.</para>
+    </formalpara>
+    
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/network-usage-record-format.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/network-usage-record-format.xml b/docs/en-US/network-usage-record-format.xml
new file mode 100644
index 0000000..fbcc883
--- /dev/null
+++ b/docs/en-US/network-usage-record-format.xml
@@ -0,0 +1,17 @@
+<section id="network-usage-record-format">
+	<title>Network Usage Record Format</title>
+	<para>For network usage (bytes sent/received), the following fields exist in a usage record.</para>
+	<itemizedlist>
+			<listitem><para>account – name of the account</para></listitem>
+			<listitem><para>accountid – ID of the account</para></listitem>
+			<listitem><para>domainid – ID of the domain in which this account resides</para></listitem>
+			<listitem><para>zoneid – Zone where the usage occurred</para></listitem>
+			<listitem><para>description – A string describing what the usage record is tracking</para></listitem>
+			<listitem><para>usagetype – A number representing the usage type (see Usage Types)</para></listitem>
+			<listitem><para>rawusage – A number representing the actual usage in hours</para></listitem>
+			<listitem><para>usageid – Device ID (virtual router ID or external device ID)</para></listitem>
+			<listitem><para>type – Device type (domain router, external load balancer, etc.)</para></listitem>
+			<listitem><para>startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record</para></listitem>
+		</itemizedlist>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/networking-in-a-pod.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/networking-in-a-pod.xml b/docs/en-US/networking-in-a-pod.xml
new file mode 100644
index 0000000..08cf36f
--- /dev/null
+++ b/docs/en-US/networking-in-a-pod.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="networking-in-a-pod">
+	<title>Networking in a Pod</title>
+	<para>Figure 2 illustrates network setup within a single pod. The hosts are connected to a pod-level switch. At a minimum, the hosts should have one physical uplink to each switch. Bonded NICs are supported as well. The pod-level switch is a pair of redundant gigabit switches with 10 G uplinks.</para>
+	<mediaobject>
+		<imageobject>
+			<imagedata fileref="./images/.png" />
+		</imageobject>
+		<textobject><phrase>ReleaseIPButton.png: button to release an IP</phrase></textobject>
+	</mediaobject>
+	<para>Servers are connected as follows:</para>
+	<itemizedlist>
+	<listitem><para>Storage devices are connected to only the network that carries management traffic.</para></listitem>
+	<listitem><para>Hosts are connected to networks for both management traffic and public traffic. </para></listitem>
+	<listitem><para>Hosts are also connected to one or more networks carrying guest traffic.</para></listitem>
+	</itemizedlist>
+	<para>We recommend the use of multiple physical Ethernet cards to implement each network interface as well as redundant switch fabric in order to maximize throughput and improve reliability.</para>
+						
+	</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/networking-in-a-zone.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/networking-in-a-zone.xml b/docs/en-US/networking-in-a-zone.xml
new file mode 100644
index 0000000..75dba08
--- /dev/null
+++ b/docs/en-US/networking-in-a-zone.xml
@@ -0,0 +1,18 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="networking-in-a-zone">
+	<title>Networking in a Zone</title>
+	<para>Figure 3 illustrates the network setup within a single zone.</para>
+	<mediaobject>
+		<imageobject>
+			<imagedata fileref="./images/.png" />
+		</imageobject>
+		<textobject><phrase>ReleaseIPButton.png: Depicts network setup in a single zone</phrase></textobject>
+	</mediaobject>
+	<para>A firewall for management traffic operates in the NAT mode. The network typically is assigned IP addresses in the 192.168.0.0/16 Class B private address space. Each pod is assigned IP addresses in the 192.168.*.0/24 Class C private address space.</para>
+	<para>Each zone has its own set of public IP addresses. Public IP addresses from different zones do not overlap.</para>
+						
+	</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/networking-overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/networking-overview.xml b/docs/en-US/networking-overview.xml
new file mode 100644
index 0000000..bff714f
--- /dev/null
+++ b/docs/en-US/networking-overview.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="networking-overview">
+		<title>Networking Overview</title>
+		<para>
+			&PRODUCT; offers two types of networking scenario:
+		</para>
+		<itemizedlist>
+			<listitem><para>Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering).</para></listitem>
+			<listitem><para>Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks.</para></listitem>
+		</itemizedlist>
+		<para>For more details, see Network Setup.</para>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/networking_overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/networking_overview.xml b/docs/en-US/networking_overview.xml
new file mode 100644
index 0000000..1080ed7
--- /dev/null
+++ b/docs/en-US/networking_overview.xml
@@ -0,0 +1,12 @@
+	<section id="networking_overview">
+		<title>Networking Overview</title>
+		<para>
+			CloudStack offers two types of networking scenario:
+		</para>
+		<itemizedlist>
+			<listitem><para>Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering).</para></listitem>
+			<listitem><para>Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks.</para></listitem>
+		</itemizedlist>
+		<para>For more details, see Network Setup.</para>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/nfs-shares-on-management-server.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/nfs-shares-on-management-server.xml b/docs/en-US/nfs-shares-on-management-server.xml
new file mode 100644
index 0000000..dcddc47
--- /dev/null
+++ b/docs/en-US/nfs-shares-on-management-server.xml
@@ -0,0 +1,95 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="nfs-shares-on-management-server">
+	<title>Using the Management Server As the NFS Server</title>
+	<para>This section tells how to set up NFS shares for primary and secondary storage on the same node with the Management Server. 
+		This is more typical of a trial installation, but is technically possible in a larger deployment.
+		It is assumed that you will have less than 16TB of storage on the host.</para> 
+	<para>The exact commands for the following steps may vary depending on your operating system version.</para>
+	<orderedlist>
+		<listitem><para>On the Management Server host, create two directories that you will use for primary and secondary storage. For example:</para>
+		<programlisting>
+# mkdir -p /export/primary
+# mkdir -p /export/secondary
+		</programlisting>
+		</listitem>
+		<listitem><para>To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example:</para>
+			<programlisting># vi /etc/exports</programlisting>
+			<para>Insert the following line.</para>
+			<programlisting>/export  *(rw,async,no_root_squash)</programlisting>
+		</listitem>
+		<listitem><para>Export the /export directory.</para>
+			<programlisting># exportfs -a</programlisting>
+		</listitem>
+		<listitem><para>Edit the /etc/sysconfig/nfs file.</para>
+			<programlisting># vi /etc/sysconfig/nfs</programlisting>
+			<para>Uncomment the following lines:</para>
+			<programlisting>
+LOCKD_TCPPORT=32803
+LOCKD_UDPPORT=32769
+MOUNTD_PORT=892
+RQUOTAD_PORT=875
+STATD_PORT=662
+STATD_OUTGOING_PORT=2020
+			</programlisting>
+		</listitem>
+		<listitem><para>Edit the /etc/sysconfig/iptables file.</para>
+			<programlisting># vi /etc/sysconfig/iptables</programlisting>
+			<para>Add the following lines at the beginning of the INPUT chain:</para>
+			<programlisting>
+-A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT
+-A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT
+-A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT
+-A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT
+-A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT
+-A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT				
+			</programlisting>
+		</listitem>
+		<listitem><para>Run the following commands:</para>
+			<programlisting>
+# service iptables restart
+# service iptables save
+			</programlisting>
+		</listitem>
+		<listitem><para>If NFS v4 communication is used between client and server, add your domain to /etc/idmapd.conf on both the hypervisor host and Management Server.</para>
+			<programlisting># vi /etc/idmapd.conf</programlisting>
+			<para>Remove the character # from the beginning of the Domain line in idmapd.conf and replace the value in the file with your own domain. In the example below, the domain is company.com.</para>
+			<programlisting>Domain = company.com</programlisting>
+		</listitem>
+		<listitem><para>Reboot the Management Server host.</para>
+			<para>Two NFS shares called /export/primary and /export/secondary are now set up.</para>
+		</listitem>
+		<listitem><para>It is recommended that you test to be sure the previous steps have been successful.</para>
+			<orderedlist numeration="loweralpha">
+				<listitem><para>Log in to the hypervisor host.</para></listitem>
+				<listitem><para>Be sure NFS and rpcbind are running. The commands might be different depending on your OS. For example:</para>
+					<programlisting>
+# service rpcbind start
+# service nfs start
+# chkconfig nfs on
+# chkconfig rpcbind on
+# reboot						
+					</programlisting>
+				</listitem>
+				<listitem><para>Log back in to the hypervisor host and try to mount the /export directories. For example (substitute your own management server name):</para>
+					<programlisting>
+# mkdir /primarymount
+# mount -t nfs &lt;management-server-name&gt;:/export/primary /primarymount
+# umount /primarymount
+# mkdir /secondarymount
+# mount -t nfs &lt;management-server-name&gt;:/export/secondary /secondarymount
+# umount /secondarymount						
+					</programlisting>
+				</listitem>
+			</orderedlist>
+		</listitem>
+	</orderedlist>
+</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/nfs-shares-on-separate-server.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/nfs-shares-on-separate-server.xml b/docs/en-US/nfs-shares-on-separate-server.xml
new file mode 100644
index 0000000..7d70d8c
--- /dev/null
+++ b/docs/en-US/nfs-shares-on-separate-server.xml
@@ -0,0 +1,28 @@
+<section id="nfs-shares-on-separate-server">
+	<title>Using a Separate NFS Server</title>
+	<para>This section tells how to set up NFS shares for secondary and (optionally) primary storage on an NFS server running on a separate node from the Management Server.</para> 
+	<para>The exact commands for the following steps may vary depending on your operating system version.</para>
+	<warning><para>(KVM only) Ensure that no volume is already mounted at your NFS mount point.</para></warning>
+	<orderedlist>
+		<listitem><para>On the storage server, create an NFS share for secondary storage and, if you are using NFS for primary storage as well, create a second NFS share. For example:</para>
+		<programlisting>
+# mkdir -p /export/primary
+# mkdir -p /export/secondary
+		</programlisting>
+		</listitem>
+		<listitem><para>To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example:</para>
+			<programlisting># vi /etc/exports</programlisting>
+			<para>Insert the following line.</para>
+			<programlisting>/export  *(rw,async,no_root_squash)</programlisting>
+		</listitem>
+		<listitem><para>Export the /export directory.</para>
+			<programlisting># exportfs -a</programlisting>
+		</listitem>
+		<listitem><para>On the management server, create a mount point for secondary storage. For example:</para>
+			<programlisting># mkdir -p /mnt/secondary</programlisting>
+		</listitem>
+		<listitem><para>Mount the secondary storage on your Management Server. Replace the example NFS server name and NFS share paths below with your own.</para>
+			<programlisting># mount -t nfs nfsservername:/nfs/share/secondary /mnt/secondary</programlisting>
+		</listitem>
+	</orderedlist>
+</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml b/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml
new file mode 100644
index 0000000..444a715
--- /dev/null
+++ b/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="ongoing-configuration-of-external-firewalls-loadbalancer">
+    <title>Ongoing Configuration of External Firewalls and Load Balancers</title>
+    <para>Additional user actions (e.g. setting a port forward) will cause further programming of the firewall and load balancer.  A user may request additional public IP addresses and forward traffic received at these IPs to specific VMs. This is accomplished by enabling static NAT for a public IP address, assigning the IP to a VM, and specifying a set of protocols and port ranges to open. When a static NAT rule is created, CloudPlatform programs the zone's external firewall with the following objects:</para>
+    <itemizedlist>
+        <listitem><para>A static NAT rule that maps the public IP address to the private IP address of a VM.</para></listitem>
+        <listitem><para>A security policy that allows traffic within the set of protocols and port ranges that are specified.</para></listitem>
+        <listitem><para>A firewall filter counter that measures the number of bytes of incoming traffic to the public IP.</para></listitem>
+    </itemizedlist>
+    <para>The number of incoming and outgoing bytes through source NAT, static NAT, and load balancing rules is measured and saved on each external element. This data is collected on a regular basis and stored in the CloudPlatform database.</para>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/over-provisioning-service-offering-limits.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/over-provisioning-service-offering-limits.xml b/docs/en-US/over-provisioning-service-offering-limits.xml
new file mode 100644
index 0000000..17704bc
--- /dev/null
+++ b/docs/en-US/over-provisioning-service-offering-limits.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="over-provisioning-service-offering-limits">
+    <title>Over-Provisioning and Service Offering Limits</title>
+    <para>CloudPlatform performs CPU over-provisioning based on an over-provisioning ratio configured by the administrator.  This is defined by the cpu.overprovisioning.factor global configuration variable.</para>
+    <para>CloudPlatform performs CPU over-provisioning based on an over-provisioning ratio configured by the administrator.  This is defined by the cpu.overprovisioning.factor global configuration variable</para>
+    <para>Service offerings limits (e.g. 1 GHz, 1 core) are strictly enforced for core count.  For example, a guest with a service offering of one core will have only one core available to it regardless of other activity on the Host.  </para>
+    <para>Service offering limits for gigahertz are enforced only in the presence of contention for CPU resources.  For example, suppose that a guest was created with a service offering of 1 GHz on a Host that has 2 GHz cores, and that guest is the only guest running on the Host.  The guest will have the full 2 GHz available to it.  When multiple guests are attempting to use the CPU a weighting factor is used to schedule CPU resources.  The weight is based on the clock speed in the service offering.  Guests receive a CPU allocation that is proportionate to the GHz in the service offering.   For example, a guest created from a 2 GHz service offering will receive twice the CPU allocation as a guest created from a 1 GHz service offering. CloudPlatform does not perform memory over-provisioning.</para>
+    </section>     
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/per-domain-limits.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/per-domain-limits.xml b/docs/en-US/per-domain-limits.xml
new file mode 100644
index 0000000..af770a5
--- /dev/null
+++ b/docs/en-US/per-domain-limits.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="per-domain-limits">
+    <title>Per-Domain Limits</title>
+    <para>CloudPlatform allows the configuration of limits on a domain basis.   With a domain limit in place, all users still have their account limits.  They are additionally limited, as a group, to not exceed the resource limits set on their domain.  Domain limits aggregate the usage of all accounts in the domain as well as all accounts in all subdomains of that domain. Limits set at the root domain level apply to the sum of resource usage by the accounts in all domains and sub-domains below that root domain.</para>
+    <para>To set a domain limit:</para>
+    <orderedlist>
+        <listitem><para>Log in to the CloudPlatform UI.</para></listitem>
+        <listitem><para>In the left navigation tree, click Domains.</para></listitem>
+        <listitem><para>3.	Select the domain you want to modify. The current domain limits are displayed. A value of -1 shows that there is no limit in place.</para></listitem>
+        <listitem><para>Click the Edit button<inlinemediaobject>
+            <imageobject>
+                <imagedata fileref="./images/edit-icon.png" />
+            </imageobject>
+            <textobject><phrase>editbutton.png: edits the settings.</phrase></textobject>
+        </inlinemediaobject></para></listitem>
+    </orderedlist>    
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/performance-monitoring.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/performance-monitoring.xml b/docs/en-US/performance-monitoring.xml
new file mode 100644
index 0000000..d1c3fda
--- /dev/null
+++ b/docs/en-US/performance-monitoring.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="performance-monitoring">
+    <title>Performance Monitoring</title>
+    <para>Host and guest performance monitoring is available to end users and administrators. This allows the user to monitor their utilization of resources and determine when it is appropriate to choose a more powerful service offering or larger disk.</para>
+   </section>
+      

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/pod-add.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/pod-add.xml b/docs/en-US/pod-add.xml
new file mode 100644
index 0000000..d6f018a
--- /dev/null
+++ b/docs/en-US/pod-add.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="pod-add">
+	<title>Adding a Pod</title>
+	<para>TODO</para>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/port-forwarding.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/port-forwarding.xml b/docs/en-US/port-forwarding.xml
new file mode 100644
index 0000000..06beaf3
--- /dev/null
+++ b/docs/en-US/port-forwarding.xml
@@ -0,0 +1,33 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="port-forwarding">
+	<title>Port Forwarding</title>
+	<para>A port forward service is a set of port forwarding rules that define a policy. A port forward service is then applied to one or more guest VMs. The guest VM then has its inbound network access managed according to the policy defined by the port forwarding service.  You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses to be forwarded.</para>
+	<para>A guest VM can be in any number of port forward services. Port forward services can be defined but have no members. If a guest VM is part of more than one network, port forwarding rules will function only if they are defined on the default network</para>
+	<para>You cannot use port forwarding to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See Security Groups.</para>
+	<para>To set up port forwarding:</para>
+	<orderedlist>
+		<listitem><para>Log in to the CloudPlatform UI as an administrator or end user.</para></listitem>
+		<listitem><para>If you have not already done so, add a public IP address range to a zone in CloudPlatform. See Adding a Zone and Pod in the Installation Guide.</para></listitem>
+		<listitem><para>Add one or more VM instances to CloudPlatform.</para></listitem>
+		<listitem><para>In the left navigation bar, click Network.</para></listitem>
+		<listitem><para>Click the name of the guest network where the VMs are running.</para>
+		</listitem>
+		<listitem><para>Choose an existing IP address or acquire a new IP address. (See Acquiring a New IP Address on page 73.) Click the name of the IP address in the list.</para></listitem>
+		<listitem><para>Click the Configuration tab.</para></listitem>
+		<listitem><para>In the Port Forwarding node of the diagram, click View All.</para></listitem>
+		<listitem><para>Fill in the following:</para>
+		<itemizedlist>
+			<listitem><para><emphasis role="bold">Public Port</emphasis>. The port to which public traffic will be
+						addressed on the IP address you acquired in the previous step.</para></listitem>
+			<listitem><para><emphasis role="bold">Private Port</emphasis>. The port on which the instance is listening for
+						forwarded public traffic.</para></listitem>
+			<listitem><para><emphasis role="bold">Protocol</emphasis>. The communication protocol in use between the two
+						ports</para></listitem>
+		</itemizedlist></listitem>
+		<listitem><para>Click Add</para></listitem>		
+	</orderedlist>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/prepare-nfs-shares.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/prepare-nfs-shares.xml b/docs/en-US/prepare-nfs-shares.xml
new file mode 100644
index 0000000..b0b0e38
--- /dev/null
+++ b/docs/en-US/prepare-nfs-shares.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="prepare-nfs-shares">
+	<title>Prepare NFS Shares</title>
+	<para>&PRODUCT; needs a place to keep primary and secondary storage (see Cloud Infrastructure Overview). Both of these can be NFS shares. This section tells how to set up the NFS shares before adding the storage to &PRODUCT;.</para>
+	<para>For primary storage, you can use iSCSI instead.</para> 
+	<para>The requirements for primary and secondary storage are described in:</para>
+	<itemizedlist>
+		<listitem><para>About Primary Storage</para></listitem>
+		<listitem><para>About Secondary Storage</para></listitem>
+	</itemizedlist>
+	<para>A production installation typically uses a separate NFS server. See <xref linkend="nfs-shares-on-separate-server"/>.</para>
+	<para>You can also use the Management Server node as the NFS server. This is more typical of a trial installation, but is technically possible in a larger deployment.
+		See <xref linkend="nfs-shares-on-management-server"/>.
+	</para>
+	<xi:include href="nfs-shares-on-separate-server.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+	<xi:include href="nfs-shares-on-management-server.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/prepare-os.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/prepare-os.xml b/docs/en-US/prepare-os.xml
new file mode 100644
index 0000000..0e23315
--- /dev/null
+++ b/docs/en-US/prepare-os.xml
@@ -0,0 +1,86 @@
+	<section id="prepare-os">
+		<title>Prepare the Operating System</title>
+		<para>
+			The OS must be prepared to host the Management Server using the following steps. These steps must be performed on each Management Server node.
+		</para>
+			<orderedlist>
+				<listitem><para>Log in to your OS as root.</para></listitem>
+				<listitem><para>Check for a fully qualified hostname.</para>
+					<programlisting># hostname --fqdn</programlisting>
+					<para>This should return a fully qualified hostname such as "kvm1.lab.example.org".  If it does not, edit /etc/hosts so that it does.</para>
+				</listitem>
+				<listitem><para>Set SELinux to be permissive by default.</para>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>Check to see whether SELinux is installed on your machine. If not, you can skip to step 4.</para>
+							<para>In RHEL or CentOS, SELinux is installed and enabled by default. You can verify this with:</para>
+							<programlisting># rpm -qa | grep selinux</programlisting>
+							<para condition="community">In Ubuntu, SELinux is not installed by default. You can verify this with:</para>
+							<programlisting condition="community"># dpkg --list 'selinux'</programlisting>
+						</listitem>
+						<listitem><para>Set the SELINUX variable in /etc/selinux/config to “permissive”. This ensures that the permissive setting will be maintained after a system reboot.</para>
+							<para condition="community">In RHEL or CentOS:</para>
+							<programlisting># vi /etc/selinux/config</programlisting>
+							<para condition="community">In Ubuntu (do this step only if SELinux was found on the machine in the previous step):</para>
+							<programlisting condition="community"># selinux-config-enforcing permissive</programlisting>					
+						</listitem>
+						<listitem><para>Then set SELinux to permissive starting immediately, without requiring a system reboot.</para>
+							<para>In CentOS:</para>
+							<programlisting># setenforce permissive</programlisting>
+							<para>In RHEL:</para>
+							<programlisting># setenforce 0</programlisting>
+							<para condition="community">In Ubuntu (do this step only if SELinux was found on the machine):</para>
+							<programlisting condition="community"># setenforce permissive</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+				<listitem><para>Make sure that the machine can reach the Internet.</para>
+					<programlisting># ping www.google.com</programlisting>
+				</listitem>
+				<listitem><para>(RHEL 6.2) If you do not have a Red Hat Network account, you need to prepare a local Yum repository.</para>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>If you are working with a physical host, insert the RHEL 6.2 installation CD. If you are using a VM, attach the RHEL6 ISO.</para></listitem>
+						<listitem><para>Mount the CDROM to /media.</para></listitem>
+						<listitem><para>Create a repo file at /etc/yum.repos.d/rhel6.repo. In the file, insert the following lines:</para>
+							<programlisting>
+[rhel]
+name=rhel6
+baseurl=file:///media
+enabled=1
+gpgcheck=0	
+							</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+				<listitem><para>Turn on NTP for time synchronization.</para>
+					<note><para>NTP is required to synchronize the clocks of the servers in your cloud.</para></note>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>Install NTP.</para>
+							<para condition="community">On RHEL or CentOS:</para>
+							<programlisting># yum install ntp</programlisting>
+							<para condition="community">On Ubuntu:</para>
+							<programlisting condition="community"># apt-get install ntp</programlisting>
+						</listitem>
+						<listitem><para>Edit the NTP configuration file to point to your NTP server.</para>
+							<programlisting># vi /etc/ntp.conf</programlisting>
+							<para>Add one or more server lines in this file with the names of the NTP servers 
+								you want to use. For example:</para>
+							<programlisting>server 0.xenserver.pool.ntp.org
+server 1.xenserver.pool.ntp.org
+server 2.xenserver.pool.ntp.org
+server 3.xenserver.pool.ntp.org
+</programlisting>
+						</listitem>
+						<listitem><para>Restart the NTP client.</para>
+							<programlisting># service ntpd restart</programlisting>
+						</listitem>
+						<listitem><para>Make sure NTP will start again upon reboot.</para>
+							<para condition="community">On RHEL or CentOS:</para>
+							<programlisting># chkconfig ntpd on</programlisting>
+							<para condition="community">On Ubuntu:</para>
+							<programlisting># chkconfig ntp on</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+				<listitem><para>Repeat all of these steps on every host where the Management Server will be installed.</para></listitem>
+			</orderedlist>
+	</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/prepare-system-vm-template.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/prepare-system-vm-template.xml b/docs/en-US/prepare-system-vm-template.xml
new file mode 100644
index 0000000..060b892
--- /dev/null
+++ b/docs/en-US/prepare-system-vm-template.xml
@@ -0,0 +1,37 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="prepare-system-vm-template">
+	<title>Prepare the System VM Template</title>
+	<para>Secondary storage must be seeded with a template that is used for &PRODUCT; system VMs.</para>
+	<note><para>When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text.</para></note>
+	<orderedlist>
+		<listitem><para>On the Management Server, run one or more of the following cloud-install-sys-tmplt commands to retrieve and decompress the system VM template. Run the command for each hypervisor type that you expect end users to run in this Zone.</para>
+			<para>If your secondary storage mount point is not named /mnt/secondary, substitute your own mount point name.</para>
+			<para>If you set the &PRODUCT; database encryption type to "web" when you set up the database, you must now add the parameter -s &lt;management-server-secret-key&gt;. See About Password and Key Encryption.</para>
+			<para>This process will require approximately 5 GB of free space on the local file system and up to 30 minutes each time it runs.</para>
+			<itemizedlist>
+				<listitem><para>For XenServer:</para>
+					<programlisting># /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2 -h xenserver -s &lt;optional-management-server-secret-key&gt; -F</programlisting>
+				</listitem>
+				<listitem><para>For vSphere:</para>
+					<programlisting># /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.ova -h vmware -s &lt;optional-management-server-secret-key&gt;  -F</programlisting>
+				</listitem>
+				<listitem><para>For KVM:</para>
+					<programlisting># /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -s &lt;optional-management-server-secret-key&gt; -F</programlisting>
+				</listitem>
+			</itemizedlist>
+		</listitem>
+		<listitem><para>2.	If you are using a separate NFS server, perform this step. If you are using the Management Server as the NFS server, you MUST NOT perform this step.</para>
+			<para>When the script has finished, unmount secondary storage and remove the created directory.</para>
+			<programlisting>
+# umount /mnt/secondary 
+# rmdir /mnt/secondary
+			</programlisting>
+		</listitem>
+		<listitem><para>Repeat these steps for each secondary storage server.</para>
+		</listitem>
+	</orderedlist>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/prepare_os.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/prepare_os.xml b/docs/en-US/prepare_os.xml
new file mode 100644
index 0000000..782a0b2
--- /dev/null
+++ b/docs/en-US/prepare_os.xml
@@ -0,0 +1,84 @@
+	<section id="prepare_os">
+		<title>Prepare the Operating System</title>
+		<para>
+			The OS must be prepared to host the Management Server using the following steps. These steps must be performed on each Management Server node.
+		</para>
+			<orderedlist>
+				<listitem><para>Log in to your OS as root.</para></listitem>
+				<listitem><para>Check for a fully qualified hostname.</para>
+					<programlisting># hostname --fqdn</programlisting>
+					<para>This should return a fully qualified hostname such as "kvm1.lab.example.org".  If it does not, edit /etc/hosts so that it does.</para>
+				</listitem>
+				<listitem><para>Set SELinux to be permissive by default.</para>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>Check to see whether SELinux is installed on your machine. If not, you can skip to step 4.</para>
+							<para>In RHEL or CentOS, SELinux are installed and enabled by default. You can verify this with:</para>
+							<programlisting># rpm -qa | grep selinux</programlisting>
+							<para>In Ubuntu, SELinux is not installed by default. You can verify this with:</para>
+							<programlisting># dpkg --list 'selinux'</programlisting>
+						</listitem>
+						<listitem><para>Set the SELINUX variable in /etc/selinux/config to “permissive”. This ensures that the permissive setting will be maintained after a system reboot.</para>
+							<para>In RHEL or CentOS:</para>
+							<programlisting># vi /etc/selinux/config</programlisting>
+							<para>In Ubuntu (do this step only if SELinux was found on the machine in the previous step):</para>
+							<programlisting># selinux-config-enforcing permissive</programlisting>					
+						</listitem>
+						<listitem><para>Then set SELinux to permissive starting immediately, without requiring a system reboot.</para>
+							<para>In CentOS:</para>
+							<programlisting># setenforce permissive</programlisting>
+							<para>In RHEL:</para>
+							<programlisting># setenforce 0</programlisting>
+							<para>In Ubuntu (do this step only if SELinux was found on the machine):</para>
+							<programlisting># setenforce permissive</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+				<listitem><para>Make sure that the Management Server can reach the Internet.</para>
+					<programlisting># ping www.google.com</programlisting>
+				</listitem>
+				<listitem><para>(RHEL 6.2) If you do not have a Red Hat Network account, you need to prepare a local Yum repository.</para>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>If you are working with a physical host, insert the RHEL 6.2 installation CD. If you are using a VM, attach the RHEL6 ISO.</para></listitem>
+						<listitem><para>Mount the CDROM to /media.</para></listitem>
+						<listitem><para>Create a repo file at /etc/yum.repos.d/rhel6.repo. In the file, insert the following lines:</para>
+							<programlisting>
+[rhel]
+name=rhel6
+baseurl=file:///media
+enabled=1
+gpgcheck=0	
+							</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+				<listitem><para>Turn on NTP for time synchronization.</para>
+					<note><para>NTP is required to synchronize the clocks of the servers in your cloud.</para></note>
+					<orderedlist numeration="loweralpha">
+						<listitem><para>Install NTP.</para>
+							<para>On RHEL or CentOS:</para>
+							<programlisting># yum install ntp</programlisting>
+							<para>On Ubuntu:</para>
+							<programlisting># apt-get install ntp</programlisting>
+						</listitem>
+						<listitem><para>Edit the NTP configuration file to point to your NTP server.</para>
+							<programlisting># vi /etc/ntp.conf</programlisting>
+							<para>For example, you can use one of the following:</para>
+							<programlisting>0.xenserver.pool.ntp.org
+1.xenserver.pool.ntp.org
+2.xenserver.pool.ntp.org
+3.xenserver.pool.ntp.org
+</programlisting>
+						</listitem>
+						<listitem><para>Restart the NTP client.</para>
+							<programlisting># service ntpd restart</programlisting>
+						</listitem>
+						<listitem><para>Make sure NTP will start again upon reboot.</para>
+							<para>On RHEL or CentOS:</para>
+							<programlisting># chkconfig ntpd on</programlisting>
+							<para>On Ubuntu:</para>
+							<programlisting># chkconfig ntp on</programlisting>
+						</listitem>
+					</orderedlist>
+				</listitem>
+			</orderedlist>
+	</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/primary-storage-add.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/primary-storage-add.xml b/docs/en-US/primary-storage-add.xml
new file mode 100644
index 0000000..f8b49a9
--- /dev/null
+++ b/docs/en-US/primary-storage-add.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="primary-storage-add">
+	<title>Adding Primary Storage</title>
+	<para>TODO</para>
+</section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/primary-storage-outage-and-data-loss.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/primary-storage-outage-and-data-loss.xml b/docs/en-US/primary-storage-outage-and-data-loss.xml
new file mode 100644
index 0000000..1e2e588
--- /dev/null
+++ b/docs/en-US/primary-storage-outage-and-data-loss.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="primary-storage-outage-and-data-loss">
+    <title>Primary Storage Outage and Data Loss</title>
+    <para>When a primary storage outage occurs the hypervisor immediately stops all VMs stored on that storage device. Guests that are marked for HA will be restarted as soon as practical when the primary storage comes back on line. With NFS, the hypervisor may allow the virtual machines to continue running depending on the nature of the issue. For example, an NFS hang will cause the guest VMs to be suspended until storage connectivity is restored.Primary storage is not designed to be backed up. Individual volumes in primary storage can be backed up using snapshots.</para>
+   </section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/primary-storage.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/primary-storage.xml b/docs/en-US/primary-storage.xml
new file mode 100644
index 0000000..25afc21
--- /dev/null
+++ b/docs/en-US/primary-storage.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="primary-storage">
+		<title>Primary Storage</title>
+		<para>This section gives concepts and technical details about CloudPlatform primary storage. For information about how to install and configure primary storage through the CloudPlatform UI, see the Advanced Installation Guide.</para>
+	</section>
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/private-public-template.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/private-public-template.xml b/docs/en-US/private-public-template.xml
new file mode 100644
index 0000000..1acc91a
--- /dev/null
+++ b/docs/en-US/private-public-template.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="private-public-template">
+	<title>Private and Public Templates</title>
+	<para>When a user creates a template, it can be designated private or public.</para>
+	<para>Private templates are only available to the user who created them. By default, an uploaded template is private.</para>
+	<para>When a user marks a template as “public,” the template becomes available to all users in all accounts in the user's domain, as well as users in any other domains that have access to the Zone where the template is stored. This depends on whether the Zone, in turn, was defined as private or public. A private Zone is assigned to a single domain, and a public Zone is accessible to any domain. If a public template is created in a private Zone, it is available only to users in the domain assigned to that Zone. If a public template is created in a public Zone, it is available to all users in all domains.</para>
+	
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/projects.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/projects.xml b/docs/en-US/projects.xml
new file mode 100644
index 0000000..37ca7b1
--- /dev/null
+++ b/docs/en-US/projects.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="projects">
+    <title>Projects</title>
+    <para>Projects are used to organize people and resources. CloudPlatform users within a single domain can group themselves into project teams so they can collaborate and share virtual resources such as VMs, snapshots, templates, data disks, and IP addresses. CloudPlatform tracks resource usage per project as well as per user, so the usage can be billed to either a user account or a project. For example, a private cloud within a software company might have all members of the QA department assigned to one project, so the company can track the resources used in testing while the project members can more easily isolate their efforts from other users of the same cloud</para>
+    <para>You can configure CloudPlatform to allow any user to create a new project, or you can restrict that ability to just CloudPlatform administrators. Once you have created a project, you become that project’s administrator, and you can add others within your domain to the project. CloudPlatform can be set up either so that you can add people directly to a project, or so that you have to send an invitation which the recipient must accept. Project members can view and manage all virtual resources created by anyone in the project (for example, share VMs). A user can be a member of any  number of projects and can switch views in the CloudPlatform UI to show only project-related information, such as project VMs, fellow project members, project-related alerts, and so on.</para>
+    <para>The project administrator can pass on the role to another project member.  The project administrator can also add more members, remove members from the project, set new resource limits (as long as they are below the global defaults set by the CloudPlatform administrator), and delete the project. When the administrator removes a member from the project, resources created by that user, such as VM instances, remain with the project. This brings us to the subject of resource ownership and which resources can be used by a project.</para>
+    <para>Resources created within a project are owned by the project, not by any particular CloudPlatform account, and they can be used only within the project. A user who belongs to one or more projects can still create resources outside of those projects, and those resources belong to the user’s account; they will not be counted against the project’s usage or resource limits. You can create project-level networks to isolate traffic within the project and provide network services such as port forwarding, load balancing, VPN, and static NAT. A project can also make use of certain types of resources from outside the project, if those resources are shared. For example, a shared network or public template is available to any project in the domain. A project can get access to a private template if the template’s owner will grant permission. A project can use any service offering or disk offering available in its domain; however, you can not create private service and disk offerin
 gs at the project level..</para>
+</section>
+      

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/provisioning-auth-api.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/provisioning-auth-api.xml b/docs/en-US/provisioning-auth-api.xml
new file mode 100644
index 0000000..d7c5184
--- /dev/null
+++ b/docs/en-US/provisioning-auth-api.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="provisioning-auth-api">
+    <title>Provisioning and Authentication API</title>
+    <para>CloudPlatform expects that a customer will have their own user provisioning infrastructure. It provides APIs to integrate with these existing systems where the systems call out to CloudPlatform to add/remove users..</para>
+    <para>CloudPlatform supports pluggable authenticators. By default, CloudPlatform assumes it is provisioned with the user’s password, and as a result authentication is done locally. However, external authentication is possible as well. For example, see Using an LDAP Server for User Authentication .</para>
+   </section>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/provisioning-steps-overview.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/provisioning-steps-overview.xml b/docs/en-US/provisioning-steps-overview.xml
new file mode 100644
index 0000000..3c1d465
--- /dev/null
+++ b/docs/en-US/provisioning-steps-overview.xml
@@ -0,0 +1,27 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="provisioning-steps-overview">
+	<title>Overview of Provisioning Steps</title>
+	<para>After the Management Server is installed and running, you can add the compute resources for it to manage. For an overview of how a &PRODUCT; cloud infrastructure is organized, see <xref linkend="cloud-infrastructure-overview" />.</para>
+	<para>To provision the cloud infrastructure, or to scale it up at any time, follow these procedures:</para>
+	<orderedlist>
+		<listitem><para>Change the root password. See <xref linkend="changing-root-password" />.</para></listitem>
+		<listitem><para>Add a zone. See <xref linkend="zone-add" />.</para></listitem>
+		<listitem><para>Add more pods (optional). See <xref linkend="pod-add" />.</para></listitem>
+		<listitem><para>Add more clusters (optional). See <xref linkend="cluster-add" />.</para></listitem>
+		<listitem><para>Add more hosts (optional). See <xref linkend="host-add" />.</para></listitem>
+		<listitem><para>Add primary storage. See <xref linkend="primary-storage-add" />.</para></listitem>
+		<listitem><para>Add secondary storage. See <xref linkend="secondary-storage-add" />.</para></listitem>
+		<listitem><para>Initialize and test the new cloud. See <xref linkend="initialize-and-test" />.</para></listitem>
+	</orderedlist>
+	<para>When you have finished these steps, you will have a deployment with the following basic structure:</para>
+	<mediaobject>
+		<imageobject>
+			<imagedata fileref="./images/provisioning-overview.png" />
+		</imageobject>
+		<textobject><phrase>provisioning-overview.png: Conceptual overview of a basic deployment</phrase></textobject>
+	</mediaobject>
+</section>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e874fdb2/docs/en-US/provisioning-steps.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/provisioning-steps.xml b/docs/en-US/provisioning-steps.xml
new file mode 100644
index 0000000..62ab9fa
--- /dev/null
+++ b/docs/en-US/provisioning-steps.xml
@@ -0,0 +1,18 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "file:///C:/Program%20Files%20(x86)/Publican/DocBook_DTD/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+<section id="provisioning-steps">
+	<title>Steps to Provisioning Your Cloud Infrastructure</title>
+	<para>This section tells how to add zones, pods, clusters, hosts, storage, and networks to your cloud. If you are unfamiliar with these entities, please begin by looking through <xref linkend="cloud-infrastructure-concepts" />.</para>
+    <xi:include href="provisioning-steps-overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="changing-root-password.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="zone-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="pod-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="cluster-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="host-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="primary-storage-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="secondary-storage-add.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="initialize-and-test.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</section>
\ No newline at end of file


Mime
View raw message