hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1413992 [2/3] - in /hadoop/common/branches/branch-1-win: ./ bin/ conf/ src/packages/win/ src/packages/win/HadoopServiceHost/ src/packages/win/HadoopServiceHost/Properties/ src/packages/win/resources/ src/packages/win/scripts/ src/packages/...
Date Tue, 27 Nov 2012 04:13:26 GMT
Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/InstallApi.psm1
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/InstallApi.psm1?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/InstallApi.psm1 (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/InstallApi.psm1 Tue Nov 27 04:13:19 2012
@@ -0,0 +1,1325 @@
+### Licensed to the Apache Software Foundation (ASF) under one or more
+### contributor license agreements.  See the NOTICE file distributed with
+### this work for additional information regarding copyright ownership.
+### The ASF licenses this file to You under the Apache License, Version 2.0
+### (the "License"); you may not use this file except in compliance with
+### the License.  You may obtain a copy of the License at
+###
+###     http://www.apache.org/licenses/LICENSE-2.0
+###
+### Unless required by applicable law or agreed to in writing, software
+### distributed under the License is distributed on an "AS IS" BASIS,
+### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+### See the License for the specific language governing permissions and
+### limitations under the License.
+
+###
+### A set of basic PowerShell routines that can be used to install and
+### manage Hadoop services on a single node. For use-case see install.ps1.
+###
+
+###
+### Global variables
+###
+$ScriptDir = Resolve-Path (Split-Path $MyInvocation.MyCommand.Path)
+$HadoopCoreVersion = "@version@"
+
+###
+### Uncomment and update below section for testing from sources
+###
+#$HadoopCoreVersion = "1.1.0-SNAPSHOT"
+###
+### End of testing section
+###
+
+### core-site.xml properties for folders that should be ACLed and deleted on
+### uninstall
+$CorePropertyFolderList = @("fs.checkpoint.dir", "fs.checkpoint.edits.dir")
+
+### hdfs-site.xml properties for folders
+$HdfsPropertyFolderList = @("dfs.name.dir", "dfs.data.dir")
+
+### mapred-site.xml properties for folders
+$MapRedPropertyFolderList = @("mapred.local.dir")
+
+### Returns the value of the given propertyName from the given xml file.
+###
+### Arguments:
+###     xmlFileName: Xml file full path
+###     propertyName: Name of the property to retrieve
+function FindXmlPropertyValue(
+    [string]
+    [parameter( Position=0, Mandatory=$true )]
+    $xmlFileName,
+    [string]
+    [parameter( Position=1, Mandatory=$true )]
+    $propertyName)
+{
+    $value = $null
+    
+    if ( Test-Path $xmlFileName )
+    {
+        $xml = [xml] (Get-Content $xmlFileName)
+        $xml.SelectNodes('/configuration/property') | ? { $_.name -eq $propertyName } | % { $value = $_.value }
+        $xml.ReleasePath
+    }
+    
+    $value
+}
+
+### Helper routing that converts a $null object to nothing. Otherwise, iterating over
+### a $null object with foreach results in a loop with one $null element.
+function empty-null($obj)
+{
+   if ($obj -ne $null) { $obj }
+}
+
+### Gives full permissions on the folder to the given user 
+function GiveFullPermissions(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $folder,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $username)
+{
+    Write-Log "Giving user/group `"$username`" full permissions to `"$folder`""
+    $cmd = "icacls `"$folder`" /grant ${username}:(OI)(CI)F"
+    Invoke-CmdChk $cmd
+}
+
+### Checks if the given space separated roles are in the given array of
+### supported roles.
+function CheckRole(
+    [string]
+    [parameter( Position=0, Mandatory=$true )]
+    $roles,
+    [array]
+    [parameter( Position=1, Mandatory=$true )]
+    $supportedRoles
+    )
+{
+    foreach ( $role in $roles.Split(" ") )
+    {
+        if ( -not ( $supportedRoles -contains $role ) )
+        {
+            throw "CheckRole: Passed in role `"$role`" is outside of the supported set `"$supportedRoles`""
+        }
+    }
+}
+
+###############################################################################
+###
+### Installs Hadoop Core component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###
+###############################################################################
+function InstallCore(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential
+    )
+{
+    $username = $serviceCredential.UserName
+    
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-@version@.winpkg.log"
+    Test-JavaHome
+
+    ### $hadoopInstallToDir: the directory that contains the application, after unzipping
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    $hadoopInstallToBin = Join-Path "$hadoopInstallToDir" "bin"
+
+    Write-Log "hadoopInstallToDir: $hadoopInstallToDir"
+    Write-Log "hadoopInstallToBin: $hadoopInstallToBin" 
+    Write-Log "Username: $username"
+
+    ###
+    ### Set HADOOP_HOME environment variable
+    ###
+    Write-Log "Setting the HADOOP_HOME environment variable at machine scope to `"$hadoopInstallToDir`""
+    [Environment]::SetEnvironmentVariable("HADOOP_HOME", $hadoopInstallToDir, [EnvironmentVariableTarget]::Machine)
+    $ENV:HADOOP_HOME = "$hadoopInstallToDir"
+
+    ###
+    ### Begin install
+    ###
+    Write-Log "Installing Apache Hadoop Core hadoop-$HadoopCoreVersion to $nodeInstallRoot"
+    
+    ### Create Node Install Root directory
+    if( -not (Test-Path "$nodeInstallRoot"))
+    {
+        Write-Log "Creating Node Install Root directory: `"$nodeInstallRoot`""
+        $cmd = "mkdir `"$nodeInstallRoot`""
+        Invoke-CmdChk $cmd
+    }
+
+    ###
+    ###  Unzip Hadoop distribution from compressed archive
+    ###
+    Write-Log "Extracting Hadoop Core archive into $hadoopInstallToDir"
+    if ( Test-Path ENV:UNZIP_CMD )
+    {
+        ### Use external unzip command if given
+        $unzipExpr = $ENV:UNZIP_CMD.Replace("@SRC", "`"$HDP_RESOURCES_DIR\hadoop-$HadoopCoreVersion.zip`"")
+        $unzipExpr = $unzipExpr.Replace("@DEST", "`"$nodeInstallRoot`"")
+        ### We ignore the error code of the unzip command for now to be
+        ### consistent with prior behavior.
+        Invoke-Ps $unzipExpr
+    }
+    else
+    {
+        $shellApplication = new-object -com shell.application
+        $zipPackage = $shellApplication.NameSpace("$HDP_RESOURCES_DIR\hadoop-$HadoopCoreVersion.zip")
+        $destinationFolder = $shellApplication.NameSpace($nodeInstallRoot)
+        $destinationFolder.CopyHere($zipPackage.Items(), 20)
+    }
+
+    ###
+    ### Copy the streaming jar to the Hadoop lib directory
+    ###
+    Write-Log "Copying the streaming jar to the Hadoop lib directory"
+    $xcopyStreaming_cmd = "copy /Y `"$hadoopInstallToDir\contrib\streaming\hadoop-streaming-$HadoopCoreVersion.jar`" `"$hadoopInstallToDir\lib\hadoop-streaming.jar`""
+    Invoke-CmdChk $xcopyStreaming_cmd
+    
+    ###
+    ###  Copy template config files
+    ###
+    $xcopy_cmd = "xcopy /EIYF `"$HDP_INSTALL_PATH\..\template\conf\*.xml`" `"$hadoopInstallToDir\conf`""
+    Invoke-CmdChk $xcopy_cmd
+
+    $xcopy_cmd = "xcopy /EIYF `"$HDP_INSTALL_PATH\..\template\conf\*.properties`" `"$hadoopInstallToDir\bin`""
+    Invoke-CmdChk $xcopy_cmd
+
+    $xcopy_cmd = "xcopy /EIYF `"$HDP_INSTALL_PATH\..\template\bin`" `"$hadoopInstallToDir\bin`""
+    Invoke-CmdChk $xcopy_cmd
+    
+    ###
+    ### Grant Hadoop user access to $hadoopInstallToDir
+    ###
+    GiveFullPermissions $hadoopInstallToDir $username
+
+    ###
+    ### ACL Hadoop logs directory such that machine users can write to it
+    ###
+    if( -not (Test-Path "$hadoopInstallToDir\logs"))
+    {
+        Write-Log "Creating Hadoop logs folder"
+        $cmd = "mkdir `"$hadoopInstallToDir\logs`""
+        Invoke-CmdChk $cmd
+    }
+    GiveFullPermissions "$hadoopInstallToDir\logs" "Users"
+
+    Write-Log "Installation of Apache Hadoop Core complete"
+}
+
+###############################################################################
+###
+### Uninstalls Hadoop Core component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###
+### Dev Note: We use non-Chk Invoke methods on uninstall, since uninstall should
+###           not fail.
+###
+###############################################################################
+function UninstallCore(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot)
+{
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    ### If Hadoop Core root does not exist exit early
+    if ( -not (Test-Path $hadoopInstallToDir) )
+    {
+        return
+    }
+    
+    ###
+    ### Remove Hadoop Core folders defined in configuration files
+    ###
+    $xmlFile = Join-Path $hadoopInstallToDir "conf\core-site.xml"
+    foreach ($property in $CorePropertyFolderList)
+    {
+        [String]$folder = FindXmlPropertyValue $xmlFile $property
+        $folder = $folder.Trim()
+        if ( ( $folder -ne $null ) -and ( Test-Path $folder ) )
+        {
+            Write-Log "Removing Hadoop `"$property`" located under `"$folder`""
+            $cmd = "rd /s /q `"$folder`""
+            Invoke-Cmd $cmd
+        }
+    }
+
+    ###
+    ### Remove all Hadoop binaries
+    ###    
+    Write-Log "Removing Hadoop `"$hadoopInstallToDir`""
+    $cmd = "rd /s /q `"$hadoopInstallToDir`""
+    Invoke-Cmd $cmd
+
+    ### Removing HADOOP_HOME environment variable
+    Write-Log "Removing the HADOOP_HOME environment variable"
+    [Environment]::SetEnvironmentVariable( "HADOOP_HOME", $null, [EnvironmentVariableTarget]::Machine )
+}
+
+### Creates and configures the service.
+function CreateAndConfigureHadoopService(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $service,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceBinDir,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=2, Mandatory=$true )]
+    $serviceCredential
+)
+{
+    if ( -not ( Get-Service "$service" -ErrorAction SilentlyContinue ) )
+    {
+        Write-Log "Creating service `"$service`" as $serviceBinDir\$service.exe"
+        $xcopyServiceHost_cmd = "copy /Y `"$ENV:HADOOP_HOME\bin\HadoopServiceHost.exe`" `"$serviceBinDir\$service.exe`""
+        Invoke-CmdChk $xcopyServiceHost_cmd
+
+        #HadoopServiceHost.exe will write to this log but does not create it
+        #Creating the event log needs to be done from an elevated process, so we do it here
+        if( -not ([Diagnostics.EventLog]::SourceExists( "$service" )))
+        {
+            [Diagnostics.EventLog]::CreateEventSource( "$service", "" )
+        }
+
+        Write-Log "Adding service $service"
+        $s = New-Service -Name "$service" -BinaryPathName "$serviceBinDir\$service.exe" -Credential $serviceCredential -DisplayName "Apache Hadoop $service"
+        if ( $s -eq $null )
+        {
+            throw "CreateAndConfigureHadoopService: Service `"$service`" creation failed"
+        }
+
+        $cmd="$ENV:WINDIR\system32\sc.exe failure $service reset= 30 actions= restart/5000"
+        Invoke-CmdChk $cmd
+
+        $cmd="$ENV:WINDIR\system32\sc.exe config $service start= auto"
+        Invoke-CmdChk $cmd
+
+        Set-ServiceAcl $service
+    }
+    else
+    {
+        Write-Log "Service `"$service`" already exists, skipping service creation"
+    }
+}
+
+### Stops and deletes the Hadoop service.
+function StopAndDeleteHadoopService(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $service
+)
+{
+    Write-Log "Stopping $service"
+    $s = Get-Service $service -ErrorAction SilentlyContinue 
+
+    if( $s -ne $null )
+    {
+        Stop-Service $service
+        $cmd = "sc.exe delete $service"
+        Invoke-Cmd $cmd
+    }
+}
+
+###############################################################################
+###
+### Installs Hadoop HDFS component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     hdfsRole: Space separated list of HDFS roles that should be installed.
+###               (for example, "namenode secondarynamenode")
+###
+###############################################################################
+function InstallHdfs(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential,
+    [String]
+    [Parameter( Position=2, Mandatory=$false )]
+    $hdfsRole
+    )
+{
+    $username = $serviceCredential.UserName
+   
+    ###
+    ### Setup defaults if not specified
+    ###
+    
+    if( $hdfsRole -eq $null )
+    {
+        $hdfsRole = "namenode datanode secondarynamenode"
+    }
+
+    ### Verify that hdfsRoles are in the supported set
+    CheckRole $hdfsRole @("namenode","datanode","secondarynamenode")
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-@version@.winpkg.log"
+    Test-JavaHome
+
+    ### $hadoopInstallDir: the directory that contains the application, after unzipping
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    $hadoopInstallToBin = Join-Path "$hadoopInstallToDir" "bin"
+    
+    ### Hadoop Core must be installed before HDFS
+    if( -not (Test-Path $hadoopInstallToDir ))
+    {
+        throw "InstallHdfs: InstallCore must be called before InstallHdfs"
+    }
+
+    Write-Log "HdfsRole: $hdfsRole"
+
+    ###
+    ### Copy hdfs configs
+    ###
+    Write-Log "Copying HDFS configs"
+    $xcopy_cmd = "xcopy /EIYF `"$HDP_INSTALL_PATH\..\template\conf\hdfs-site.xml`" `"$hadoopInstallToDir\conf`""
+    Invoke-CmdChk $xcopy_cmd
+
+    ###
+    ### Create Hadoop Windows Services and grant user ACLS to start/stop
+    ###
+
+    Write-Log "Node HDFS Role Services: $hdfsRole"
+    $allServices = $hdfsRole
+
+    Write-Log "Installing services $allServices"
+
+    foreach( $service in empty-null $allServices.Split(' '))
+    {
+        CreateAndConfigureHadoopService $service $hadoopInstallToBin $serviceCredential
+    }
+
+    ###
+    ### Setup HDFS service config
+    ###
+    Write-Log "Copying configuration for $hdfsRole"
+
+    foreach( $service in empty-null $hdfsRole.Split( ' ' ))
+    {
+        Write-Log "Creating service config ${hadoopInstallToBin}\$service.xml"
+        $cmd = "$hadoopInstallToBin\hdfs.cmd --service $service > `"$hadoopInstallToBin\$service.xml`""
+        Invoke-CmdChk $cmd
+    }
+
+    Write-Log "Installation of Hadoop HDFS complete"
+}
+
+###############################################################################
+###
+### Uninstalls Hadoop HDFS component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###
+### Dev Note: We use non-Chk Invoke methods on uninstall, since uninstall should
+###           not fail.
+###
+###############################################################################
+function UninstallHdfs(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot)
+{
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    ###
+    ### Stop and delete services
+    ###
+    foreach( $service in ("namenode", "datanode", "secondarynamenode"))
+    {
+        StopAndDeleteHadoopService $service
+    }
+    
+    ### If Hadoop Core root does not exist exit early
+    if ( -not (Test-Path $hadoopInstallToDir) )
+    {
+        return
+    }
+    
+    ###
+    ### Remove Hadoop HDFS folders defined in configuration files
+    ###
+    $xmlFile = Join-Path $hadoopInstallToDir "conf\hdfs-site.xml"
+    foreach ($property in $HdfsPropertyFolderList)
+    {
+        [String]$folder = FindXmlPropertyValue $xmlFile $property
+        $folder = $folder.Trim()
+        ### TODO: Support for JBOD and NN replication
+        if ( ( $folder -ne $null ) -and ( Test-Path $folder ) )
+        {
+            Write-Log "Removing Hadoop `"$property`" located under `"$folder`""
+            $cmd = "rd /s /q `"$folder`""
+            Invoke-Cmd $cmd
+        }
+    }
+}
+
+###############################################################################
+###
+### Installs Hadoop MapReduce component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     hdfsRole: Space separated list of MapRed roles that should be installed.
+###               (for example, "jobtracker historyserver")
+###
+###############################################################################
+function InstallMapRed(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential,
+    [String]
+    [Parameter( Position=2, Mandatory=$false )]
+    $mapredRole
+    )
+{
+    $username = $serviceCredential.UserName
+   
+    ###
+    ### Setup defaults if not specified
+    ###
+    
+    if( $mapredRole -eq $null )
+    {
+        $mapredRole = "jobtracker tasktracker historyserver"
+    }
+    
+    ### Verify that mapredRoles are in the supported set
+    CheckRole $mapredRole @("jobtracker","tasktracker","historyserver")
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-@version@.winpkg.log"
+    Test-JavaHome
+
+    ### $hadoopInstallDir: the directory that contains the application, after unzipping
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    $hadoopInstallToBin = Join-Path "$hadoopInstallToDir" "bin"
+    
+    ### Hadoop Core must be installed before MapRed
+    if( -not (Test-Path $hadoopInstallToDir ))
+    {
+        throw "InstallMapRed: InstallCore must be called before InstallMapRed"
+    }
+
+    Write-Log "MapRedRole: $mapredRole"
+
+    ###
+    ### Copy mapred configs
+    ###
+    Write-Log "Copying MapRed configs"
+    $xcopy_cmd = "xcopy /EIYF `"$HDP_INSTALL_PATH\..\template\conf\mapred-site.xml`" `"$hadoopInstallToDir\conf`""
+    Invoke-CmdChk $xcopy_cmd
+
+    ###
+    ### Create Hadoop Windows Services and grant user ACLS to start/stop
+    ###
+
+    Write-Log "Node MapRed Role Services: $mapredRole"
+    $allServices = $mapredRole
+
+    Write-Log "Installing services $allServices"
+
+    foreach( $service in empty-null $allServices.Split(' '))
+    {
+        CreateAndConfigureHadoopService $service $hadoopInstallToBin $serviceCredential
+    }
+
+    ###
+    ### Setup MapRed service config
+    ###
+    Write-Log "Copying configuration for $mapredRole"
+
+    foreach( $service in empty-null $mapredRole.Split( ' ' ))
+    {
+        Write-Log "Creating service config ${hadoopInstallToBin}\$service.xml"
+        $cmd = "$hadoopInstallToBin\mapred.cmd --service $service > `"$hadoopInstallToBin\$service.xml`""
+        Invoke-CmdChk $cmd
+    }
+
+    Write-Log "Installation of Hadoop MapReduce complete"
+}
+
+###############################################################################
+###
+### Uninstalls Hadoop MapRed component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###
+### Dev Note: We use non-Chk Invoke methods on uninstall, since uninstall should
+###           not fail.
+###
+###############################################################################
+function UninstallMapRed(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot)
+{
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    ###
+    ### Stop and delete services
+    ###
+    foreach( $service in ("jobtracker", "tasktracker", "historyserver"))
+    {
+        StopAndDeleteHadoopService $service
+    }
+    
+    ### If Hadoop Core root does not exist exit early
+    if ( -not (Test-Path $hadoopInstallToDir) )
+    {
+        return
+    }
+    
+    ###
+    ### Remove Hadoop MapRed folders defined in configuration files
+    ###
+    $xmlFile = Join-Path $hadoopInstallToDir "conf\mapred-site.xml"
+    foreach ($property in $MapRedPropertyFolderList)
+    {
+        [String]$folder = FindXmlPropertyValue $xmlFile $property
+        $folder = $folder.Trim()
+        if ( ( $folder -ne $null ) -and ( Test-Path $folder ) )
+        {
+            Write-Log "Removing Hadoop `"$property`" located under `"$folder`""
+            $cmd = "rd /s /q `"$folder`""
+            Invoke-Cmd $cmd
+        }
+    }
+}
+
+### Helper routine that updates the given fileName XML file with the given
+### key/value configuration values. The XML file is expected to be in the
+### Hadoop format. For example:
+### <configuration>
+###   <property>
+###     <name.../><value.../>
+###   </property>
+### </configuration>
+function UpdateXmlConfig(
+    [string]
+    [parameter( Position=0, Mandatory=$true )]
+    $fileName, 
+    [hashtable]
+    [parameter( Position=1 )]
+    $config = @{} )
+{
+    $xml = [xml] (Get-Content $fileName)
+
+    foreach( $key in empty-null $config.Keys )
+    {
+        $value = $config[$key]
+        $found = $False
+        $xml.SelectNodes('/configuration/property') | ? { $_.name -eq $key } | % { $_.value = $value; $found = $True }
+        if ( -not $found )
+        {
+            $newItem = $xml.CreateElement("property")
+            $newItem.AppendChild($xml.CreateElement("name")) | Out-Null
+            $newItem.AppendChild($xml.CreateElement("value")) | Out-Null
+            $newItem.name = $key
+            $newItem.value = $value
+            $xml["configuration"].AppendChild($newItem) | Out-Null
+        }
+    }
+    
+    $xml.Save($fileName)
+    $xml.ReleasePath
+}
+
+### Helper routine that ACLs the folders defined in folderList properties.
+### The routine will look for the property value in the given xml config file
+### and give full permissions on that folder to the given username.
+###
+### Dev Note: All folders that need to be ACLed must be defined in *-site.xml
+### files.
+function AclFoldersForUser(
+    [string]
+    [parameter( Position=0, Mandatory=$true )]
+    $xmlFileName,
+    [string]
+    [parameter( Position=1, Mandatory=$true )]
+    $username,
+    [array]
+    [parameter( Position=2, Mandatory=$true )]
+    $folderList )
+{
+    $xml = [xml] (Get-Content $xmlFileName)
+
+    foreach( $key in empty-null $folderList )
+    {
+        $folderName = $null
+        $xml.SelectNodes('/configuration/property') | ? { $_.name -eq $key } | % { $folderName = $_.value }
+        if ( $folderName -eq $null )
+        {
+            throw "AclFoldersForUser: Trying to ACLs the folder $key which is not defined in $xmlFileName"
+        }
+        
+        ### TODO: Support for JBOD and NN Replication
+        $folderParent = Split-Path $folderName -parent
+
+        if( -not (Test-Path $folderParent))
+        {
+            Write-Log "AclFoldersForUser: Creating Directory `"$folderParent`" for ACLing"
+            mkdir $folderParent
+            
+            ### TODO: ACL only if the folder does not exist. Otherwise, assume that
+            ### it is ACLed properly.
+            GiveFullPermissions $folderParent $username
+        }
+    }
+    
+    $xml.ReleasePath
+}
+
+### Runs the given configs thru the alias transformation and returns back
+### the new list of configs with all alias dependent options resolved.
+###
+### Supported aliases:
+###  core-site:
+###     hdfs_namenode_host -> (fs.default.name, hdfs://$value:8020)
+###
+###  hdfs-site:
+###     hdfs_namenode_host -> (dfs.http.address, $value:50070)
+###                           (dfs.https.address, $value:50470)
+###     hdfs_secondary_namenode_host ->
+###             (dfs.secondary.http.address, $value:50090)
+###
+###  mapred-site:
+###     mapreduce_jobtracker_host -> (mapred.job.tracker, $value:50300)
+###                                  (mapred.job.tracker.http.address, $value:50030)
+###     mapreduce_historyservice_host ->
+###             (mapreduce.history.server.http.address, $value:51111)
+###
+function ProcessAliasConfigOptions(
+    [String]
+    [parameter( Position=0 )]
+    $component,
+    [hashtable]
+    [parameter( Position=1 )]
+    $configs)
+{
+    $result = @{}
+    Write-Log "ProcessAliasConfigOptions: Resolving `"$component`" configs"
+    if ( $component -eq "core" )
+    {
+        foreach( $key in empty-null $configs.Keys )
+        {
+            if ( $key -eq "hdfs_namenode_host" )
+            {
+                $result.Add("fs.default.name",  "hdfs://localhost:8020".Replace("localhost", $configs[$key]))
+            }
+            else
+            {
+                $result.Add($key, $configs[$key])
+            }
+        }
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        foreach( $key in empty-null $configs.Keys )
+        {
+            if ( $key -eq "hdfs_namenode_host" )
+            {
+                $result.Add("dfs.http.address", "localhost:50070".Replace("localhost", $configs[$key]))
+                $result.Add("dfs.https.address", "localhost:50470".Replace("localhost", $configs[$key]))
+            }
+            elseif ( $key -eq "hdfs_secondary_namenode_host" )
+            {
+                $result.Add("dfs.secondary.http.address", "localhost:50090".Replace("localhost", $configs[$key]))
+            }
+            else
+            {
+                $result.Add($key, $configs[$key])
+            }
+        }
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        foreach( $key in empty-null $configs.Keys )
+        {
+            if ( $key -eq "mapreduce_jobtracker_host" )
+            {
+                $result.Add("mapred.job.tracker", "localhost:50300".Replace("localhost", $configs[$key]))
+                $result.Add("mapred.job.tracker.http.address", "localhost:50030".Replace("localhost", $configs[$key]))
+            }
+            elseif ( $key -eq "mapreduce_historyservice_host" )
+            {
+                $result.Add("mapreduce.history.server.http.address",  "localhost:51111".Replace("localhost", $configs[$key]))
+            }
+            else
+            {
+                $result.Add($key, $configs[$key])
+            }
+        }
+    }
+    else
+    {
+        throw "ProcessAliasConfigOptions: Unknown component name `"$component`""
+    }
+    
+    return $result
+}
+
+###############################################################################
+###
+### Alters the configuration of the Hadoop Core component.
+###
+### Arguments:
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     configs: Configuration that should be applied.
+###              For example, @{"fs.checkpoint.edits.dir" = "C:\Hadoop\hdfs\2nne"}
+###     aclAllFolders: If true, all folders defined in core-site.xml will be ACLed
+###                    If false, only the folders listed in configs will be ACLed.
+###
+###############################################################################
+function ConfigureCore(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential,
+    [hashtable]
+    [parameter( Position=2 )]
+    $configs = @{},
+    [bool]
+    [parameter( Position=3 )]
+    $aclAllFolders = $True
+    )
+{
+    $username = $serviceCredential.UserName
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    ### Hadoop Core must be installed before ConfigureCore is called
+    if( -not ( Test-Path $hadoopInstallToDir ))
+    {
+        throw "ConfigureCore: InstallCore must be called before ConfigureCore"
+    }
+    
+    ###
+    ### Apply core-site.xml configuration changes
+    ###
+    $coreSiteXmlFile = Join-Path $hadoopInstallToDir "conf\core-site.xml"
+    UpdateXmlConfig $coreSiteXmlFile $configs
+    
+    if ($aclAllFolders)
+    {
+        ###
+        ### ACL all folders
+        ###
+        AclFoldersForUser $coreSiteXmlFile $username $CorePropertyFolderList
+    }
+    else
+    {
+        ###
+        ### ACL only folders that were modified as part of the config updates
+        ###
+        foreach( $key in empty-null $config.Keys )
+        {
+            $folderList = @()
+            if ($CorePropertyFolderList -contains $key)
+            {
+                $folderList = $folderList + @($key)
+            }
+            
+            AclFoldersForUser $coreSiteXmlFile $username $folderList
+        }
+    }
+}
+
+###############################################################################
+###
+### Alters the configuration of the Hadoop HDFS component.
+###
+### Arguments:
+###   See ConfigureCore
+###############################################################################
+function ConfigureHdfs(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential,
+    [hashtable]
+    [parameter( Position=2 )]
+    $configs = @{},
+    [bool]
+    [parameter( Position=3 )]
+    $aclAllFolders = $True
+    )
+{
+    $username = $serviceCredential.UserName
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    if( -not (Test-Path $hadoopInstallToDir ))
+    {
+        throw "ConfigureHdfs: InstallCore and InstallHdfs must be called before ConfigureHdfs"
+    }
+    
+    ### TODO: Support JBOD and NN replication
+    
+    ###
+    ### Apply configuration changes to hdfs-site.xml
+    ###
+    $xmlFile = Join-Path $hadoopInstallToDir "conf\hdfs-site.xml"
+    UpdateXmlConfig $xmlFile $configs
+    
+    if ($aclAllFolders)
+    {
+        ###
+        ### ACL all folders
+        ###
+        AclFoldersForUser $xmlFile $username $HdfsPropertyFolderList
+    }
+    else
+    {
+        ###
+        ### ACL only folders that were modified as part of the config updates
+        ###
+        foreach( $key in empty-null $config.Keys )
+        {
+            $folderList = @()
+            if ($HdfsPropertyFolderList -contains $key)
+            {
+                $folderList = $folderList + @($key)
+            }
+            
+            AclFoldersForUser $xmlFile $username $folderList
+        }
+    }
+}
+
+###############################################################################
+###
+### Alters the configuration of the Hadoop MapRed component.
+###
+### Arguments:
+###   See ConfigureCore
+###############################################################################
+function ConfigureMapRed(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=1, Mandatory=$true )]
+    $serviceCredential,
+    [hashtable]
+    [parameter( Position=2 )]
+    $configs = @{},
+    [bool]
+    [parameter( Position=3 )]
+    $aclAllFolders = $True
+    )
+{
+    $username = $serviceCredential.UserName
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+    
+    if( -not (Test-Path $hadoopInstallToDir ))
+    {
+        throw "ConfigureMapRed: InstallCore and InstallMapRed must be called before ConfigureMapRed"
+    }
+    
+    ###
+    ### Apply configuration changes to mapred-site.xml
+    ###
+    $xmlFile = Join-Path $hadoopInstallToDir "conf\mapred-site.xml"
+    UpdateXmlConfig $xmlFile $configs
+    
+    if ($aclAllFolders)
+    {
+        ###
+        ### ACL all folders
+        ###
+        AclFoldersForUser $xmlFile $username $MapRedPropertyFolderList
+    }
+    else
+    {
+        ###
+        ### ACL only folders that were modified as part of the config updates
+        ###
+        foreach( $key in empty-null $config.Keys )
+        {
+            $folderList = @()
+            if ($MapRedPropertyFolderList -contains $key)
+            {
+                $folderList = $folderList + @($key)
+            }
+            
+            AclFoldersForUser $xmlFile $username $folderList
+        }
+    }
+}
+
+###############################################################################
+###
+### Installs Hadoop component.
+###
+### Arguments:
+###     component: Component to be installed, it can be "core, "hdfs" or "mapreduce"
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     role: Space separated list of roles that should be installed.
+###           (for example, "jobtracker historyserver" for mapreduce)
+###
+###############################################################################
+function Install(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=2, Mandatory=$true )]
+    $serviceCredential,
+    [String]
+    [Parameter( Position=3, Mandatory=$false )]
+    $role
+    )
+{
+    if ( $component -eq "core" )
+    {
+        InstallCore $nodeInstallRoot $serviceCredential
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        InstallHdfs $nodeInstallRoot $serviceCredential $role
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        InstallMapRed $nodeInstallRoot $serviceCredential $role
+    }
+    else
+    {
+        throw "Install: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Uninstalls Hadoop component.
+###
+### Arguments:
+###     component: Component to be uninstalled, it can be "core, "hdfs" or "mapreduce"
+###     nodeInstallRoot: Install folder (for example "C:\Hadoop")
+###
+###############################################################################
+function Uninstall(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $nodeInstallRoot
+    )
+{
+    if ( $component -eq "core" )
+    {
+        UninstallCore $nodeInstallRoot
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        UninstallHdfs $nodeInstallRoot
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        UninstallMapRed $nodeInstallRoot
+    }
+    else
+    {
+        throw "Uninstall: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Alters the configuration of the Hadoop Core component.
+###
+### Arguments:
+###     component: Component to be configured, it can be "core, "hdfs" or "mapreduce"
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     configs: Configuration that should be applied.
+###              For example, @{"fs.checkpoint.edits.dir" = "C:\Hadoop\hdfs\2nne"}
+###              Some configuration parameter are aliased, see ProcessAliasConfigOptions
+###              for details.
+###     aclAllFolders: If true, all folders defined in core-site.xml will be ACLed
+###                    If false, only the folders listed in configs will be ACLed.
+###
+###############################################################################
+function Configure(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=2, Mandatory=$true )]
+    $serviceCredential,
+    [hashtable]
+    [parameter( Position=3 )]
+    $configs = @{},
+    [bool]
+    [parameter( Position=4 )]
+    $aclAllFolders = $True
+    )
+{
+    ### Process alias config options first
+    $configs = ProcessAliasConfigOptions $component $configs
+    
+    if ( $component -eq "core" )
+    {
+        ConfigureCore $nodeInstallRoot $serviceCredential $configs $aclAllFolders
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        ConfigureHdfs $nodeInstallRoot $serviceCredential $configs $aclAllFolders
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        ConfigureMapRed $nodeInstallRoot $serviceCredential $configs $aclAllFolders
+    }
+    else
+    {
+        throw "Configure: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Alters the configuration of the Hadoop Core component.
+###
+### Arguments:
+###     component: Component to be configured, it can be "core, "hdfs" or "mapreduce"
+###     nodeInstallRoot: Target install folder (for example "C:\Hadoop")
+###     serviceCredential: Credential object used for service creation
+###     configFilePath: Configuration that will be copied to $HADOOP_HOME\conf
+###
+###############################################################################
+function ConfigureWithFile(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $nodeInstallRoot,
+    [System.Management.Automation.PSCredential]
+    [Parameter( Position=2, Mandatory=$true )]
+    $serviceCredential,
+    [String]
+    [parameter( Position=3 )]
+    $configFilePath
+    )
+{
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $ScriptDir "hadoop-$HadoopCoreVersion.winpkg.log" $ENV:WINPKG_BIN
+    $hadoopInstallToDir = Join-Path "$nodeInstallRoot" "hadoop-$HadoopCoreVersion"
+
+    ### TODO: We need additional checks on the input params.
+
+    ### Copy over the given file
+    $xcopy_cmd = "xcopy /IYF `"$configFilePath`" `"$hadoopInstallToDir\conf`""
+    Invoke-CmdChk $xcopy_cmd
+
+    if ( $component -eq "core" )
+    {
+        ConfigureCore $nodeInstallRoot $serviceCredential
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        ConfigureHdfs $nodeInstallRoot $serviceCredential
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        ConfigureMapRed $nodeInstallRoot $serviceCredential
+    }
+    else
+    {
+        throw "Configure: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Start component services.
+###
+### Arguments:
+###     component: Component name
+###     roles: List of space separated service to start
+###
+###############################################################################
+function StartService(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $roles
+    )
+{
+    Write-Log "Starting `"$component`" `"$roles`" services"
+
+    if ( $component -eq "core" )
+    {
+        Write-Log "StartService: Hadoop Core does not have any services"
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        ### Verify that roles are in the supported set
+        CheckRole $roles @("namenode","datanode","secondarynamenode")
+        
+        foreach ( $role in $roles.Split(" ") )
+        {
+            Write-Log "Starting $role service"
+            Start-Service $role
+        }
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        ### Verify that roles are in the supported set
+        CheckRole $roles @("jobtracker","tasktracker","historyserver")
+        
+        foreach ( $role in $roles.Split(" ") )
+        {
+            Write-Log "Starting $role service"
+            Start-Service $role
+        }
+    }
+    else
+    {
+        throw "StartService: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Stop component services.
+###
+### Arguments:
+###     component: Component name
+###     roles: List of space separated service to stop
+###
+###############################################################################
+function StopService(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $component,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $roles
+    )
+{
+    Write-Log "Stopping `"$component`" `"$roles`" services"
+
+    if ( $component -eq "core" )
+    {
+        Write-Log "StopService: Hadoop Core does not have any services"
+    }
+    elseif ( $component -eq "hdfs" )
+    {
+        ### Verify that roles are in the supported set
+        CheckRole $roles @("namenode","datanode","secondarynamenode")
+        
+        foreach ( $role in $roles.Split(" ") )
+        {
+            Write-Log "Stopping $role service"
+            Stop-Service $role
+        }
+    }
+    elseif ( $component -eq "mapreduce" )
+    {
+        ### Verify that roles are in the supported set
+        CheckRole $roles @("jobtracker","tasktracker","historyserver")
+        
+        foreach ( $role in $roles.Split(" ") )
+        {
+            Write-Log "Stopping $role service"
+            Stop-Service $role
+        }
+    }
+    else
+    {
+        throw "StartService: Unsupported compoment argument."
+    }
+}
+
+###############################################################################
+###
+### Formats the namenode.
+###
+### Arguments:
+###
+###############################################################################
+function FormatNamenode(
+    [bool]
+    [Parameter( Position=0, Mandatory=$false )]
+    $force = $false)
+{
+    Write-Log "Formatting Namenode"
+    
+    if ( -not ( Test-Path ENV:HADOOP_HOME ) )
+    {
+        throw "FormatNamenode: HADOOP_HOME not set"
+    }
+    Write-Log "HADOOP_HOME set to `"$env:HADOOP_HOME`""
+
+    if ( $force )
+    {
+        $cmd = "echo Y | $ENV:HADOOP_HOME\bin\hadoop.cmd namenode -format"
+    }
+    else
+    {
+        $cmd = "$ENV:HADOOP_HOME\bin\hadoop.cmd namenode -format"
+    }
+    Invoke-CmdChk $cmd
+}
+
+###
+### Public API
+###
+Export-ModuleMember -Function Install
+Export-ModuleMember -Function Uninstall
+Export-ModuleMember -Function Configure
+Export-ModuleMember -Function ConfigureWithFile
+Export-ModuleMember -Function StartService
+Export-ModuleMember -Function StopService
+Export-ModuleMember -Function FormatNamenode
+
+###
+### Private API (exposed for test only)
+###
+Export-ModuleMember -Function UpdateXmlConfig
+Export-ModuleMember -Function ProcessAliasConfigOptions
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/TestInstallApi.ps1
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/TestInstallApi.ps1?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/TestInstallApi.ps1 (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/TestInstallApi.ps1 Tue Nov 27 04:13:19 2012
@@ -0,0 +1,538 @@
+### Licensed to the Apache Software Foundation (ASF) under one or more
+### contributor license agreements.  See the NOTICE file distributed with
+### this work for additional information regarding copyright ownership.
+### The ASF licenses this file to You under the Apache License, Version 2.0
+### (the "License"); you may not use this file except in compliance with
+### the License.  You may obtain a copy of the License at
+###
+###     http://www.apache.org/licenses/LICENSE-2.0
+###
+### Unless required by applicable law or agreed to in writing, software
+### distributed under the License is distributed on an "AS IS" BASIS,
+### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+### See the License for the specific language governing permissions and
+### limitations under the License.
+
+###
+### Global test variables
+###
+
+$ScriptDir = Resolve-Path (Split-Path $MyInvocation.MyCommand.Path)
+
+### Templates
+$Username = "@test.wininstaller.username@"
+$Password = "@test.wininstaller.password@"
+$HadoopCoreVersion = "@version@"
+
+###
+### Uncomment and update below section for testing from sources
+###
+#$Username = "hadoop"
+#$Password = "TestUser123"
+#$ENV:HADOOP_NODE_INSTALL_ROOT = "C:\Hadoop\test"
+#$ENV:WINPKG_LOG = "winpkg_core_install.log"
+#$HadoopCoreVersion = "1.1.0-SNAPSHOT"
+###
+### End of testing section
+###
+
+$NodeInstallRoot = "$ENV:HADOOP_NODE_INSTALL_ROOT"
+$SecurePassword = ConvertTo-SecureString $Password -AsPlainText -Force
+$ServiceCredential = New-Object System.Management.Automation.PSCredential ("$ENV:COMPUTERNAME\$Username", $SecurePassword)
+if ($ServiceCredential -eq $null)
+{
+    throw "Failed to create PSCredential object, please check your username/password parameters"
+}
+
+function Assert(
+    [String]
+    [parameter( Position=0 )]
+    $message,
+    [bool]
+    [parameter( Position=1 )]
+    $condition = $false
+    )
+{
+    if ( -not $condition )
+    {
+        throw $message
+    }
+}
+
+function CoreInstallTestBasic()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Assert "ENV:HADOOP_HOME must be set" (Test-Path ENV:HADOOP_HOME)
+    Assert "ENV:HADOOP_HOME folder must exist" (Test-Path $ENV:HADOOP_HOME)
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function CoreInstallTestIdempotent()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function HdfsInstallTestBasic()
+{
+    Install "core" $NodeInstallRoot $ServiceCredential ""
+    Install "hdfs" $NodeInstallRoot $ServiceCredential "namenode datanode"
+    Uninstall "hdfs" $NodeInstallRoot
+    Uninstall "core" $NodeInstallRoot
+}
+
+function HdfsInstallTestNoCore()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "Hdfs" $NodeInstallRoot $ServiceCredential "namenode"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "Hdfs" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallHdfs should fail if InstallCore was not called before"
+    }
+}
+
+function HdfsInstallTestRoleNoSupported()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "Core" $NodeInstallRoot $ServiceCredential ""
+        Install "Hdfs" $NodeInstallRoot $ServiceCredential "namenode UNKNOWN"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "Hdfs" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallHdfs should fail if the given role is not supported"
+    }
+}
+
+function HdfsInstallTestInvalidUser()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "Core" $NodeInstallRoot $ServiceCredential ""
+        
+        $invalidCredential = New-Object System.Management.Automation.PSCredential ("$ENV:COMPUTERNAME\INVALIDUSER", $SecurePassword)
+        
+        Install "Hdfs" $NodeInstallRoot $invalidCredential "namenode"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "Core" $NodeInstallRoot
+        Uninstall "Hdfs" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallHdfs should fail if username is invalid"
+    }
+}
+
+function MapRedInstallTestBasic()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker"
+    Uninstall "MapReduce" $NodeInstallRoot
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function MapRedInstallTestNoCore()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "MapReduce" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallMapRed should fail if InstallCore was not called before"
+    }
+}
+
+function MapRedInstallTestInvalidUser()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "Core" $NodeInstallRoot $ServiceCredential ""
+        
+        $invalidCredential = New-Object System.Management.Automation.PSCredential ("$ENV:COMPUTERNAME\INVALIDUSER", $SecurePassword)
+        
+        Install "MapReduce" $NodeInstallRoot $invalidCredential "jobtracker"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "Core" $NodeInstallRoot
+        Uninstall "MapReduce" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallMapRed should fail if username is invalid"
+    }
+}
+
+function MapRedInstallTestRoleNoSupported()
+{
+    $testFailed = $true
+    
+    try
+    {
+        Install "Core" $NodeInstallRoot $ServiceCredential ""
+        Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker INVALIDROLE"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    } finally
+    {
+        ### Cleanup
+        Uninstall "MapReduce" $NodeInstallRoot
+    }
+    
+    if ( $testFailed )
+    {
+        throw "InstallMapRed should fail if the given role is not supported"
+    }
+}
+
+function InstallAllTestBasic()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Install "Hdfs" $NodeInstallRoot $ServiceCredential "namenode datanode secondarynamenode"
+    Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker tasktracker historyserver"
+    
+    # Cleanup
+    Uninstall "MapReduce" $NodeInstallRoot
+    Uninstall "Hdfs" $NodeInstallRoot
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function InstallAllTestIdempotent()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Install "Hdfs" $NodeInstallRoot $ServiceCredential "namenode datanode secondarynamenode"
+    Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker tasktracker historyserver"
+    
+    # Install all services again
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Install "Hdfs" $NodeInstallRoot $ServiceCredential "namenode datanode secondarynamenode"
+    Install "MapReduce" $NodeInstallRoot $ServiceCredential "jobtracker tasktracker historyserver"
+    
+    # Cleanup
+    Uninstall "MapReduce" $NodeInstallRoot
+    Uninstall "Hdfs" $NodeInstallRoot
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function ValidateXmlConfigValue($xmlFileName, $key, $expectedValue)
+{
+    $xml = [xml](gc $xmlFileName)
+    $result = $xml.SelectNodes('/configuration/property') | ? { $_.name -eq $key }
+    if ( ($result -eq $null) -or (-not ( $result.value -eq $expectedValue ) ) )
+    {
+        throw "TEST FAILED: Key/Value $key/$expectedValue not found in the configuration file"
+    }
+}
+
+function TestUpdateXmlConfig()
+{
+    $xmlTemplate = "<?xml version=`"1.0`"?>`
+    <configuration>`
+    </configuration>"
+    $testFile = Join-Path $ScriptDir "testFile.xml"
+    write-output $xmlTemplate | out-file -encoding ascii $testFile
+    
+    ### Create empty configuration xml
+    UpdateXmlConfig $testFile
+    
+    ### Add two properties to it
+    UpdateXmlConfig $testFile @{"key1" = "value1";"key2" = "value2"}
+    
+    ### Verify that properties are present
+    ValidateXmlConfigValue $testFile "key1" "value1"
+    ValidateXmlConfigValue $testFile "key2" "value2"
+    
+    ### Update key1 property value and add key3 property
+    UpdateXmlConfig $testFile @{"key1" = "value1Updated";"key3" = "value3"}
+    
+    ### Verify updated values
+    ValidateXmlConfigValue $testFile "key1" "value1Updated"
+    ValidateXmlConfigValue $testFile "key2" "value2"
+    ValidateXmlConfigValue $testFile "key3" "value3"
+}
+
+function CoreConfigureTestBasic()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Configure "Core" $NodeInstallRoot $ServiceCredential
+    
+    $coreSiteXml = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\conf\core-site.xml"
+    
+    ### Verify default value for fs.checkpoint.dir
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.dir" "C:\Hadoop\hdfs\2nn"
+    
+    ### Change Hadoop core configuration
+    Configure "Core" $NodeInstallRoot $ServiceCredential @{
+        "fs.checkpoint.dir" = "$NodeInstallRoot\hdfs\2nn";
+        "fs.checkpoint.edits.dir" = "$NodeInstallRoot\hdfs\2nn";
+        "fs.default.name" = "asv://host:8000"}
+    
+    ### Verify that the update took place
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.dir" "$NodeInstallRoot\hdfs\2nn"
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.edits.dir" "$NodeInstallRoot\hdfs\2nn"
+    ValidateXmlConfigValue $coreSiteXml "fs.default.name" "asv://host:8000"
+    
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function CoreConfigureWithFileTestBasic()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    
+    $coreSiteXml = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\conf\core-site.xml"
+    $coreSiteXmlTmp = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\core-site.xml"
+    
+    Copy-Item $coreSiteXml (Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion")
+    UpdateXmlConfig $coreSiteXmlTmp @{"fs.checkpoint.dir" = "$NodeInstallRoot\hdfs2\2nn"}
+    
+    ### Configure Core with a new file
+    ConfigureWithFile "Core" $NodeInstallRoot $ServiceCredential $coreSiteXmlTmp
+    
+    ### Verify that new config took place
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.dir" "$NodeInstallRoot\hdfs2\2nn"
+    
+    Uninstall "Core" $NodeInstallRoot
+}
+
+function InstallAndConfigAllTestBasic()
+{
+    Install "core" $NodeInstallRoot $ServiceCredential ""
+    Configure "core" $NodeInstallRoot $ServiceCredential @{
+        "fs.checkpoint.dir" = "$NodeInstallRoot\hdfs\2nn";
+        "fs.checkpoint.edits.dir" = "$NodeInstallRoot\hdfs\2nn"}
+    
+    $hdfsSiteXml = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\conf\hdfs-site.xml"
+    $mapRedSiteXml = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\conf\mapred-site.xml"
+    
+    Install "hdfs" $NodeInstallRoot $ServiceCredential "namenode datanode secondarynamenode"
+    Configure "hdfs" $NodeInstallRoot $ServiceCredential @{
+        "dfs.name.dir" = "$NodeInstallRoot\hdfs\nn2";
+        "dfs.data.dir" = "$NodeInstallRoot\hdfs\dn2";
+        "dfs.webhdfs.enabled" = "false"}
+    
+    ### Verify that the update took place
+    ValidateXmlConfigValue $hdfsSiteXml "dfs.name.dir" "$NodeInstallRoot\hdfs\nn2"
+    ValidateXmlConfigValue $hdfsSiteXml "dfs.data.dir" "$NodeInstallRoot\hdfs\dn2"
+    ValidateXmlConfigValue $hdfsSiteXml "dfs.webhdfs.enabled" "false"
+    
+    Install "mapreduce" $NodeInstallRoot $ServiceCredential "jobtracker tasktracker historyserver"
+    Configure "mapreduce" $NodeInstallRoot $ServiceCredential @{
+        "mapred.job.tracker" = "host:port";
+        "mapred.local.dir" = "$NodeInstallRoot\hdfs\mapred\local2"}
+        
+    ### Verify that the update took place
+    ValidateXmlConfigValue $mapRedSiteXml "mapred.job.tracker" "host:port"
+    ValidateXmlConfigValue $mapRedSiteXml "mapred.local.dir" "$NodeInstallRoot\hdfs\mapred\local2"
+    
+    # Cleanup
+    Uninstall "mapreduce" $NodeInstallRoot
+    Uninstall "hdfs" $NodeInstallRoot
+    Uninstall "core" $NodeInstallRoot
+}
+
+function TestStartStopServiceRoleNoSupported()
+{
+    $testFailed = $true
+    ### Test starting services with invalid roles
+    try
+    {
+        StartService "hdfs" "namenode INVALIDROLE"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    }
+    
+    if ( $testFailed )
+    {
+        throw "StartService should fail if the given role is not supported"
+    }
+    
+    try
+    {
+        StartService "mapreduce" "jobtracker INVALIDROLE"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    }
+
+    if ( $testFailed )
+    {
+        throw "StartService should fail if the given role is not supported"
+    }
+
+    ### Test stopping services with invalid roles
+    try
+    {
+        StopService "hdfs" "namenode INVALIDROLE"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    }
+    
+    if ( $testFailed )
+    {
+        throw "StartService should fail if the given role is not supported"
+    }
+    
+    try
+    {
+        StopService "mapreduce" "jobtracker INVALIDROLE"
+    }
+    catch [Exception]
+    {
+        $testFailed = $false
+    }
+    
+    if ( $testFailed )
+    {
+        throw "StartService should fail if the given role is not supported"
+    }
+}
+
+function TestProcessAliasConfigOptions()
+{
+    $result = ProcessAliasConfigOptions "core" @{
+        "dfs.name.dir" = "$NodeInstallRoot\hdfs\nn2";
+        "hdfs_namenode_host" = "machine1"}
+
+    Assert "TestProcessAliasConfigOptions: hdfs_namenode_host not resolved correctly" ( $result["fs.default.name"] -eq "hdfs://machine1:8020" )
+    Assert "TestProcessAliasConfigOptions: dfs.name.dir not resolved correctly" ( $result["dfs.name.dir"] -eq "$NodeInstallRoot\hdfs\nn2" )
+    
+    $result = ProcessAliasConfigOptions "hdfs" @{
+        "hdfs_secondary_namenode_host" = "machine1";
+        "hdfs_namenode_host" = "machine2"}
+    Assert "TestProcessAliasConfigOptions: hdfs_secondary_namenode_host not resolved correctly" ( $result["dfs.secondary.http.address"] -eq "machine1:50090" )
+    Assert "TestProcessAliasConfigOptions: hdfs_namenode_host not resolved correctly" ( $result["dfs.http.address"] -eq "machine2:50070" )
+    Assert "TestProcessAliasConfigOptions: hdfs_namenode_host not resolved correctly" ( $result["dfs.https.address"] -eq "machine2:50470" )
+    
+    $result = ProcessAliasConfigOptions "mapreduce" @{
+        "mapreduce_jobtracker_host" = "machine1" }
+    Assert "TestProcessAliasConfigOptions: mapreduce_jobtracker_host not resolved correctly" ( $result["mapred.job.tracker"] -eq "machine1:50300" )
+    Assert "TestProcessAliasConfigOptions: mapreduce_jobtracker_host not resolved correctly" ( $result["mapred.job.tracker.http.address"] -eq "machine1:50030" )
+}
+
+function CoreConfigureWithAliasesTest()
+{
+    Install "Core" $NodeInstallRoot $ServiceCredential ""
+    Configure "Core" $NodeInstallRoot $ServiceCredential
+    
+    $coreSiteXml = Join-Path $NodeInstallRoot "hadoop-$HadoopCoreVersion\conf\core-site.xml"
+
+    ### Change Hadoop core configuration
+    Configure "Core" $NodeInstallRoot $ServiceCredential @{
+        "fs.checkpoint.dir" = "$NodeInstallRoot\hdfs\2nn";
+        "fs.checkpoint.edits.dir" = "$NodeInstallRoot\hdfs\2nn";
+        "hdfs_namenode_host" = "machine1"}
+    
+    ### Verify that the update took place
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.dir" "$NodeInstallRoot\hdfs\2nn"
+    ValidateXmlConfigValue $coreSiteXml "fs.checkpoint.edits.dir" "$NodeInstallRoot\hdfs\2nn"
+    ValidateXmlConfigValue $coreSiteXml "fs.default.name" "hdfs://machine1:8020"
+    
+    Uninstall "Core" $NodeInstallRoot
+}
+
+
+try
+{
+    ###
+    ### Import dependencies
+    ###
+    $utilsModule = Import-Module -Name "$ScriptDir\..\resources\Winpkg.Utils.psm1" -ArgumentList ("HADOOP") -PassThru
+    $apiModule = Import-Module -Name "$ScriptDir\InstallApi.psm1" -PassThru
+
+    ###
+    ### Test methods
+    ###
+    CoreInstallTestBasic
+    CoreInstallTestIdempotent
+    HdfsInstallTestBasic
+    HdfsInstallTestNoCore
+    HdfsInstallTestInvalidUser
+    HdfsInstallTestRoleNoSupported
+    MapRedInstallTestBasic
+    MapRedInstallTestNoCore
+    MapRedInstallTestInvalidUser
+    MapRedInstallTestRoleNoSupported
+    InstallAllTestBasic
+    InstallAllTestIdempotent
+    TestUpdateXmlConfig
+    CoreConfigureTestBasic
+    CoreConfigureWithFileTestBasic
+    InstallAndConfigAllTestBasic
+    TestStartStopServiceRoleNoSupported
+    TestProcessAliasConfigOptions
+    CoreConfigureWithAliasesTest
+
+    # Start/StopService should be tested E2E as it requires all Hadoop binaries
+    # to be installed (this test only installs a small subset so that it runs
+    # faster).
+    
+    Write-Host "TEST COMPLETED SUCCESSFULLY"
+}
+finally
+{
+	if( $utilsModule -ne $null )
+	{
+		Remove-Module $apiModule
+        Remove-Module $utilsModule
+	}
+}
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.cmd
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.cmd?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.cmd (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.cmd Tue Nov 27 04:13:19 2012
@@ -0,0 +1,19 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+powershell.exe -NoProfile -InputFormat none -ExecutionPolicy unrestricted -File %~dp0install.ps1 %*
+goto :eof
+

Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.ps1
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.ps1?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.ps1 (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/install.ps1 Tue Nov 27 04:13:19 2012
@@ -0,0 +1,157 @@
+### Licensed to the Apache Software Foundation (ASF) under one or more
+### contributor license agreements.  See the NOTICE file distributed with
+### this work for additional information regarding copyright ownership.
+### The ASF licenses this file to You under the Apache License, Version 2.0
+### (the "License"); you may not use this file except in compliance with
+### the License.  You may obtain a copy of the License at
+###
+###     http://www.apache.org/licenses/LICENSE-2.0
+###
+### Unless required by applicable law or agreed to in writing, software
+### distributed under the License is distributed on an "AS IS" BASIS,
+### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+### See the License for the specific language governing permissions and
+### limitations under the License.
+
+###
+### Install script that can be used to install Hadoop as a Single-Node cluster.
+### To invoke the scipt, run the following command from PowerShell:
+###   install.ps1 -username <username> -password <password>
+###
+### where:
+###   <username> and <password> represent account credentials used to run
+###   Hadoop services as Windows services.
+###
+### Account must have the following two privileges, otherwise
+### installation/runtime will fail.
+###   SeServiceLogonRight
+###   SeCreateSymbolicLinkPrivilege
+###
+### By default, Hadoop is installed to "C:\Hadoop". To change this set
+### HADOOP_NODE_INSTALL_ROOT environment variable to a location were
+### you'd like Hadoop installed.
+###
+### Script pre-requisites:
+###   JAVA_HOME must be set to point to a valid Java location.
+###
+### To uninstall previously installed Single-Node cluster run:
+###   uninstall.ps1
+###
+### NOTE: Notice @version@ strings throughout the file. First compile
+### winpkg with "ant winpkg", that will replace the version string.
+### To install, use:
+###   build\hadoop-@version@.winpkg.zip#scripts\install.ps1
+###
+
+param(
+    [String]
+    [Parameter( Position=0, Mandatory=$true )]
+    $username,
+    [String]
+    [Parameter( Position=1, Mandatory=$true )]
+    $password,
+    [String]
+    $hdfsRoles = "namenode datanode secondarynamenode",
+    [String]
+    $mapredRoles = "jobtracker tasktracker historyserver",
+    [Switch]
+    $skipNamenodeFormat = $false
+    )
+
+function Main( $scriptDir )
+{
+    if ( -not (Test-Path ENV:WINPKG_LOG))
+    {
+        $ENV:WINPKG_LOG = "hadoop.core.winpkg.log"
+    }
+
+    $HadoopCoreVersion = "@version@"
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $scriptDir "hadoop-$hadoopCoreVersion.winpkg.log"
+    Test-JavaHome
+
+    ### $hadoopInstallDir: the directory that contains the appliation, after unzipping
+    $hadoopInstallToBin = Join-Path "$ENV:HADOOP_NODE_INSTALL_ROOT" "hadoop-$hadoopCoreVersion\bin"
+    $nodeInstallRoot = "$ENV:HADOOP_NODE_INSTALL_ROOT"
+    
+    Write-Log "nodeInstallRoot: $nodeInstallRoot"
+    Write-Log "hadoopInstallToBin: $hadoopInstallToBin"
+
+    Write-Log "Username: $username"
+
+    ###
+    ### Initialize root directory used for Core, HDFS and MapRed local folders
+    ###
+    if( -not (Test-Path ENV:HDFS_DATA_DIR))
+    {
+        $ENV:HDFS_DATA_DIR = Join-Path "$ENV:HADOOP_NODE_INSTALL_ROOT" "HDFS"
+    }
+    
+    ###
+    ### Create the Credential object from the given username and password
+    ###
+    $securePassword = ConvertTo-SecureString $password -AsPlainText -Force
+    $serviceCredential = New-Object System.Management.Automation.PSCredential ("$ENV:COMPUTERNAME\$username", $securePassword)
+
+    ###
+    ### Stop all services before proceeding with the install step, otherwise
+    ### files will be in-use and installation can fail
+    ###
+    Write-Log "Stopping MapRed services if already running before proceeding with install"
+    StopService "mapreduce" "jobtracker tasktracker historyserver"
+
+    Write-Log "Stopping HDFS services if already running before proceeding with install"
+    StopService "hdfs" "namenode datanode secondarynamenode"
+
+    ###
+    ### Install and Configure Core
+    ###
+    Install "Core" $NodeInstallRoot $serviceCredential ""
+    Configure "Core" $NodeInstallRoot $serviceCredential @{
+        "fs.checkpoint.dir" = "$ENV:HDFS_DATA_DIR\2nn";
+        "fs.checkpoint.edits.dir" = "$ENV:HDFS_DATA_DIR\2nn"}
+
+    ###
+    ### Install and Configure HDFS
+    ###
+    Install "Hdfs" $NodeInstallRoot $serviceCredential $hdfsRoles
+    Configure "Hdfs" $NodeInstallRoot $serviceCredential @{
+        "dfs.name.dir" = "$ENV:HDFS_DATA_DIR\nn";
+        "dfs.data.dir" = "$ENV:HDFS_DATA_DIR\dn"}
+
+    if ($skipNamenodeFormat -ne $true) 
+    {
+        ###
+        ### Format the namenode
+        ###
+        FormatNamenode $false
+    }
+    else
+    {
+        Write-Log "Skipping Namenode format"
+    }
+
+    ###
+    ### Install and Configure MapRed
+    ###
+    Install "MapReduce" $NodeInstallRoot $serviceCredential $mapRedRoles
+    Configure "MapReduce" $NodeInstallRoot $serviceCredential @{
+        "mapred.local.dir" = "$ENV:HDFS_DATA_DIR\mapred\local"}
+
+    Write-Log "Install of Hadoop Core, HDFS, MapRed completed successfully"
+}
+
+try
+{ 
+    $scriptDir = Resolve-Path (Split-Path $MyInvocation.MyCommand.Path)
+    $utilsModule = Import-Module -Name "$scriptDir\..\resources\Winpkg.Utils.psm1" -ArgumentList ("HADOOP") -PassThru
+    $apiModule = Import-Module -Name "$scriptDir\InstallApi.psm1" -PassThru
+    Main $scriptDir
+}
+finally
+{
+    if( $utilsModule -ne $null )
+    {
+        Remove-Module $apiModule
+        Remove-Module $utilsModule
+    }
+}
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.cmd
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.cmd?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.cmd (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.cmd Tue Nov 27 04:13:19 2012
@@ -0,0 +1,19 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+powershell.exe -NoProfile -InputFormat none -ExecutionPolicy unrestricted -File %~dp0uninstall.ps1 %*
+goto :eof
+

Added: hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.ps1
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.ps1?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.ps1 (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/scripts/uninstall.ps1 Tue Nov 27 04:13:19 2012
@@ -0,0 +1,62 @@
+### Licensed to the Apache Software Foundation (ASF) under one or more
+### contributor license agreements.  See the NOTICE file distributed with
+### this work for additional information regarding copyright ownership.
+### The ASF licenses this file to You under the Apache License, Version 2.0
+### (the "License"); you may not use this file except in compliance with
+### the License.  You may obtain a copy of the License at
+###
+###     http://www.apache.org/licenses/LICENSE-2.0
+###
+### Unless required by applicable law or agreed to in writing, software
+### distributed under the License is distributed on an "AS IS" BASIS,
+### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+### See the License for the specific language governing permissions and
+### limitations under the License.
+
+function Main
+{
+    if ( -not (Test-Path ENV:WINPKG_LOG))
+    {
+        $ENV:WINPKG_LOG = "hadoop.core.winpkg.log"
+    }
+
+    $HDP_INSTALL_PATH, $HDP_RESOURCES_DIR = Initialize-InstallationEnv $scriptDir "hadoop-@version@.winpkg.log"
+    $nodeInstallRoot = "$ENV:HADOOP_NODE_INSTALL_ROOT"
+
+    if ( -not (Test-Path ENV:HDFS_DATA_DIR))
+    {
+        $ENV:HDFS_DATA_DIR = Join-Path "$ENV:HADOOP_NODE_INSTALL_ROOT" "HDFS"
+    }
+    
+    ###
+    ### Uninstall MapRed, Hdfs and Core
+    ###
+    Uninstall "MapReduce" $nodeInstallRoot
+    Uninstall "Hdfs" $nodeInstallRoot
+    Uninstall "Core" $nodeInstallRoot
+
+    ###
+    ### Cleanup any remaining content under HDFS data dir
+    ###
+    Write-Log "Removing HDFS_DATA_DIR `"$ENV:HDFS_DATA_DIR`""
+    $cmd = "rd /s /q `"$ENV:HDFS_DATA_DIR`""
+    Invoke-Cmd $cmd
+    
+    Write-Log "Uninstall of Hadoop Core, HDFS, MapRed completed successfully"
+}
+
+try
+{
+    $scriptDir = Resolve-Path (Split-Path $MyInvocation.MyCommand.Path)
+    $utilsModule = Import-Module -Name "$scriptDir\..\resources\Winpkg.Utils.psm1" -ArgumentList ("HADOOP") -PassThru
+    $apiModule = Import-Module -Name "$scriptDir\InstallApi.psm1" -PassThru
+    Main $scriptDir
+}
+finally
+{
+    if( $utilsModule -ne $null )
+    {
+        Remove-Module $apiModule
+        Remove-Module $utilsModule
+    }
+}

Added: hadoop/common/branches/branch-1-win/src/packages/win/template/bin/start_daemons.cmd
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/template/bin/start_daemons.cmd?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/template/bin/start_daemons.cmd (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/template/bin/start_daemons.cmd Tue Nov 27 04:13:19 2012
@@ -0,0 +1,28 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+setlocal enabledelayedexpansion
+
+echo Starting Hadoop services
+
+@rem
+@rem  Start services 
+@rem
+for %%i in (namenode datanode secondarynamenode jobtracker tasktracker historyserver) do (
+  echo Starting %%i
+  "%windir%\system32\net.exe" start %%i
+)
+
+endlocal
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/packages/win/template/bin/stop_daemons.cmd
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/template/bin/stop_daemons.cmd?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/template/bin/stop_daemons.cmd (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/template/bin/stop_daemons.cmd Tue Nov 27 04:13:19 2012
@@ -0,0 +1,28 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+setlocal enabledelayedexpansion
+
+echo Stopping Hadoop services
+
+@rem
+@rem  Stop services 
+@rem
+for %%i in (namenode datanode secondarynamenode jobtracker tasktracker historyserver) do (
+  echo Stopping %%i
+  "%windir%\system32\net.exe" stop %%i
+)
+
+endlocal
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/packages/win/template/conf/core-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/template/conf/core-site.xml?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/template/conf/core-site.xml (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/template/conf/core-site.xml Tue Nov 27 04:13:19 2012
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>/hadoop/hdfs/tmp</value>
+    <description>A base for other temporary directories.</description>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.dir</name>
+    <value>C:\Hadoop\hdfs\2nn</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.edits.dir</name>
+    <value>C:\Hadoop\hdfs\2nn</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as fs.checkpoint.dir
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.period</name>
+    <value>86400</value>
+    <description>The number of seconds between two periodic checkpoints.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>2048000000</value>
+    <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+  </property>
+
+</configuration>

Added: hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hadoop-metrics2.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hadoop-metrics2.properties?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hadoop-metrics2.properties (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hadoop-metrics2.properties Tue Nov 27 04:13:19 2012
@@ -0,0 +1,48 @@
+#syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+
+#namenode.sink.file.filename=namenode-metrics.out
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#jobtracker.sink.file.filename=jobtracker-metrics.out
+
+#tasktracker.sink.file.filename=tasktracker-metrics.out
+
+#maptask.sink.file.filename=maptask-metrics.out
+
+#reducetask.sink.file.filename=reducetask-metrics.out
+
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobtracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#tasktracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#maptask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#reducetask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+*.period=60

Added: hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hdfs-site.xml?rev=1413992&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hdfs-site.xml (added)
+++ hadoop/common/branches/branch-1-win/src/packages/win/template/conf/hdfs-site.xml Tue Nov 27 04:13:19 2012
@@ -0,0 +1,99 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>dfs.name.dir</name>
+    <value>C:\Hadoop\hdfs\nn</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>heartbeat.recheck.interval</name>
+    <value>1</value>
+    <description>None </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>C:\Hadoop\hdfs\dn</value>
+  </property>
+  
+  <property>
+    <name>dfs.replication</name>
+    <value>1</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+       literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+     </description>
+  </property>
+
+  <property>
+     <name>dfs.permissions</name>
+     <value>false</value>
+     <description>
+        If "true", enable permission checking in HDFS.
+        If "false", permission checking is turned off,
+        but all other behavior is unchanged.
+        Switching from one parameter value to the other does not change the mode,
+        owner or group of files or directories.
+     </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.http.address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>localhost:50470</value>
+  <description>The https address where namenode binds</description>
+  </property>
+
+</configuration>



Mime
View raw message