ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From spose...@apache.org
Subject [1/2] ambari git commit: AMBARI-9658: Enhance parameter configs for Pig, Files and CapSch views
Date Tue, 17 Feb 2015 02:43:49 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk fd07a3e34 -> 976e4bcf7


AMBARI-9658: Enhance parameter configs for Pig, Files and CapSch views 

Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e0a9c9c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e0a9c9c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e0a9c9c

Branch: refs/heads/trunk
Commit: 3e0a9c9ce06be2d73d84454b5a5753f51303aec4
Parents: d4f17d3
Author: sposetti <compoze1@yahoo.com>
Authored: Mon Feb 16 21:40:31 2015 -0500
Committer: sposetti <compoze1@yahoo.com>
Committed: Mon Feb 16 21:40:31 2015 -0500

----------------------------------------------------------------------
 contrib/views/capacity-scheduler/readme.md      |  8 +--
 .../src/main/resources/view.xml                 |  7 ++-
 contrib/views/files/readme.md                   | 18 +-----
 contrib/views/files/src/main/resources/view.xml |  8 ++-
 contrib/views/pig/readme.md                     | 64 ++------------------
 contrib/views/pig/src/main/resources/view.xml   | 32 +++++++---
 6 files changed, 45 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/capacity-scheduler/readme.md
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/readme.md b/contrib/views/capacity-scheduler/readme.md
index adfd2d2..e772508 100644
--- a/contrib/views/capacity-scheduler/readme.md
+++ b/contrib/views/capacity-scheduler/readme.md
@@ -23,7 +23,7 @@ This View provides a UI to manage queues for the YARN Capacity Scheduler.
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - YARN
 
 Build
@@ -59,9 +59,9 @@ From the Ambari Administration interface, create a view instance.
 | Details: Instance Name | CS_1 |
 | Details: Display Name | Queue Manager |
 | Details: Description | Browse and manage YARN Capacity Scheduler queues |
-| Properties: ambari.server.url | http://c6401.ambari.apache.org:8080/api/v1/clusters/MyCluster
|
-| Properties: ambari.server.username | admin |
-| Properties: ambari.server.password | password |
+| Properties: Ambari Cluster URL | http://c6401.ambari.apache.org:8080/api/v1/clusters/MyCluster
|
+| Properties: Operator Username | admin |
+| Properties: Operator Password | password |
 
 Login to Ambari and browse to the view instance.
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/capacity-scheduler/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/view.xml b/contrib/views/capacity-scheduler/src/main/resources/view.xml
index 49f7e67..a986dc4 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/view.xml
+++ b/contrib/views/capacity-scheduler/src/main/resources/view.xml
@@ -18,24 +18,25 @@
     <name>CAPACITY-SCHEDULER</name>
     <label>Capacity Scheduler</label>
     <version>0.3.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <parameter>
         <name>ambari.server.url</name>
-        <description>The Ambari REST API cluster resource.</description>
+        <description>Enter the Ambari REST API cluster resource.</description>
         <label>Ambari Cluster URL</label>
         <placeholder>http://ambari.server:8080/api/v1/clusters/MyCluster</placeholder>
         <required>true</required>
     </parameter>
     <parameter>
         <name>ambari.server.username</name>
-        <description>The Cluster Operator username (for example: admin).</description>
+        <description>Enter the Cluster Operator username (for example: admin).</description>
         <label>Operator Username</label>
         <placeholder>admin</placeholder>
         <required>true</required>
     </parameter>
     <parameter>
         <name>ambari.server.password</name>
-        <description>The Cluster Operator password (for example: password).</description>
+        <description>Enter the Cluster Operator password (for example: password).</description>
         <label>Operator Password</label>
         <required>true</required>
         <masked>true</masked>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/files/readme.md
----------------------------------------------------------------------
diff --git a/contrib/views/files/readme.md b/contrib/views/files/readme.md
index 9f81147..53497fc 100644
--- a/contrib/views/files/readme.md
+++ b/contrib/views/files/readme.md
@@ -23,7 +23,7 @@ This View provides a UI to browse HDFS, create directories and upload +
download
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - HDFS with WebHDFS configured
 
 Build
@@ -42,20 +42,6 @@ Place the view archive on the Ambari Server and restart to deploy.
     cp files-0.1.0-SNAPSHOT.jar /var/lib/ambari-server/resources/views/
     ambari-server restart
 
-View Definition
------
-
-    <parameter>
-        <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
-        <required>true</required>
-    </parameter>
-    <parameter>
-        <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
-        <required>false</required>
-    </parameter>
-
 Cluster Configuration
 -----
 
@@ -93,7 +79,7 @@ From the Ambari Administration interface, create a Files view instance.
 | Details: Instance Name | FILES_1 |
 | Details: Display Name | Files |
 | Details: Description | Browse HDFS files and directories |
-| Properties: webhdfs.url | webhdfs://c6401.ambari.apache.org:50070 |
+| Properties: WebHDFS FileSystem URI | webhdfs://c6401.ambari.apache.org:50070 |
 
 Login to Ambari as "admin" and browse to the view instance.
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/files/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/files/src/main/resources/view.xml b/contrib/views/files/src/main/resources/view.xml
index 5bd72d5..7dbdfa9 100644
--- a/contrib/views/files/src/main/resources/view.xml
+++ b/contrib/views/files/src/main/resources/view.xml
@@ -18,15 +18,19 @@
     <name>FILES</name>
     <label>Files</label>
     <version>0.1.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <parameter>
         <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
+        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address
property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
+        <label>WebHDFS FileSystem URI</label>
+        <placeholder>webhdfs://namenode:50070</description>
         <required>true</required>
     </parameter>
     <parameter>
         <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
+        <description>User and doAs for proxy user for HDFS. By default, uses the currently
logged-in Ambari user.</description>
+        <label>WebHDFS Username</label>
         <required>false</required>
     </parameter>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/pig/readme.md
----------------------------------------------------------------------
diff --git a/contrib/views/pig/readme.md b/contrib/views/pig/readme.md
index c9714bc..3ab5698 100644
--- a/contrib/views/pig/readme.md
+++ b/contrib/views/pig/readme.md
@@ -25,7 +25,7 @@ UDFs with your pig scripts.
 Requirements
 -----
 
-- Ambari 1.7.0
+- Ambari 1.7.0 or later
 - HDFS with WebHDFS configured
 - WebHCat with Pig configured
 
@@ -45,60 +45,6 @@ Place the view archive on the Ambari Server and restart to deploy.
     cp pig-0.1.0-SNAPSHOT.jar /var/lib/ambari-server/resources/views/
     ambari-server restart
 
-View Definition
------
-
-    <!-- HDFS Configs -->
-    <parameter>
-        <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URL (example: webhdfs://namenode.host:50070)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
-        <required>false</required>
-    </parameter>
-
-    <!-- WebHCat Configs -->
-    <parameter>
-        <name>webhcat.url</name>
-        <description>WebHCat URL (example: http://webhcat.host:50111/templeton/v1)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>webhcat.username</name>
-        <description>User and doAs for proxy user for WebHCat</description>
-        <required>false</required>
-    </parameter>
-
-    <!-- General Configs -->
-    <parameter>
-        <name>dataworker.username</name>
-        <description>The username (defaults to ViewContext username)</description>
-        <required>false</required>
-    </parameter>
-
-    <parameter>
-        <name>scripts.dir</name>
-        <description>HDFS directory path to store Pig scripts (example: /tmp/${username}/scripts)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>jobs.dir</name>
-        <description>HDFS directory path to store Pig job status (example: /tmp/${username}/jobs)</description>
-        <required>true</required>
-    </parameter>
-
-    <parameter>
-        <name>store.dir</name>
-        <description>HDFS directory to store meta information about Pig scripts and
jobs (example: /tmp/${username}/store)</description>
-        <required>false</required>
-    </parameter>
-
 Cluster Configuration
 -----
 Configure HDFS for a proxy user. In core-site.xml, add the following properties:
@@ -158,10 +104,10 @@ From the Ambari Administration interface, create a Pig view instance.
 | Details: Instance Name | PIG_1 |
 | Details: Display Name | Pig |
 | Details: Description | Save and execute Pig scripts |
-| Properties: webhdfs.url | webhdfs://c6401.ambari.apache.org:50070 |
-| Properties: webhcat.url | http://c6401.ambari.apache.org:50111/templeton/v1 |
-| Properties: scripts.dir | /tmp/${username}/scripts |
-| Properties: jobs.dir | /tmp/${username}/jobs |
+| Properties: WebHDFS FileSystem URI | webhdfs://c6401.ambari.apache.org:50070 |
+| Properties: WebHCat URL | http://c6401.ambari.apache.org:50111/templeton/v1 |
+| Properties: Scripts HDFS Directory | /tmp/${username}/scripts |
+| Properties: Jobs HDFS Directory | /tmp/${username}/jobs |
 
 Login to Ambari as "admin" and browse to the view instance.
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e0a9c9c/contrib/views/pig/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/pig/src/main/resources/view.xml b/contrib/views/pig/src/main/resources/view.xml
index 89e3c56..91b604e 100644
--- a/contrib/views/pig/src/main/resources/view.xml
+++ b/contrib/views/pig/src/main/resources/view.xml
@@ -18,55 +18,71 @@
     <name>PIG</name>
     <label>Pig</label>
     <version>0.1.0</version>
+    <min-ambari-version>1.7.*</min-ambari-version>
 
     <!-- HDFS Configs -->
     <parameter>
         <name>webhdfs.url</name>
-        <description>WebHDFS FileSystem URI (example: webhdfs://namenode:50070)</description>
+        <description>Enter the WebHDFS FileSystem URI. Typically this is the dfs.namenode.http-address
property in the hdfs-site.xml configuration. URL must be accessible from Ambari Server.</description>
+        <label>WebHDFS FileSystem URI</label>
+        <placeholder>webhdfs://namenode:50070</placeholder>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>webhdfs.username</name>
-        <description>User and doAs for proxy user for HDFS</description>
+        <description>User and doAs for proxy user for HDFS. By default, uses the currently
logged-in Ambari user.</description>
+        <label>WebHDFS Username</label>
         <required>false</required>
     </parameter>
 
     <!-- WebHCat Configs -->
     <parameter>
         <name>webhcat.url</name>
-        <description>WebHCat URL (example: http://webhcat.host:50111/templeton/v1)</description>
+        <description>Enter the WebHCat URL for accessing WebHCat. URL must be accessible
from Ambari Server.</description>
+        <label>WebHCat URL</label>
+        <placeholder>http://webhcat.host:50111/templeton/v1</placeholder>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>webhcat.username</name>
-        <description>User and doAs for proxy user for WebHCat</description>
+        <description>User and doAs for proxy user for WebHCat. By default, uses the
currently logged-in Ambari user.</description>
+        <label>WebHCat Username</label>
         <required>false</required>
     </parameter>
 
     <!-- General Configs -->
     <parameter>
         <name>dataworker.username</name>
-        <description>The username (defaults to ViewContext username)</description>
+        <description>The dataworker username. By default, users the currently logged-in
Ambari user.</description>
+        <label>Dataworker Username</label>
         <required>false</required>
     </parameter>
 
     <parameter>
         <name>scripts.dir</name>
-        <description>HDFS directory to store Pig scripts (example: /user/${username}/scripts)</description>
+        <description>HDFS directory to store Pig scripts.</description>
+        <label>Scripts HDFS Directory</label>
+        <placeholder>/user/${username}/pig/scripts</placeholder>
+        <default-value>/user/${username}/pig/scripts</default-value>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>jobs.dir</name>
-        <description>HDFS directory to store Pig job status (example: /user/${username}/jobs)</description>
+        <description>HDFS directory to store Pig job status.</description>
+        <label>Jobs HDFS Directory</label>
+        <placeholder>/user/${username}/pig/jobs</placeholder>
+        <default-value>/user/${username}/pig/jobs</default-value>
         <required>true</required>
     </parameter>
 
     <parameter>
         <name>store.dir</name>
-        <description>HDFS directory to store meta information about Pig scripts and
jobs (example: /user/${username}/store)</description>
+        <description>HDFS directory to store meta information about Pig scripts and
jobs.</description>
+        <label>Meta HDFS Directory</label>
+        <placeholder>/user/${username}/pig/store</placeholder>
         <required>false</required>
     </parameter>
 


Mime
View raw message