ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jlun...@apache.org
Subject [22/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)
Date Wed, 17 Dec 2014 20:05:54 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
new file mode 100644
index 0000000..0c69ca9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
new file mode 100644
index 0000000..a08fb31
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+Alias /ganglia "{{ganglia_web_path}}"
+
+<Directory "{{ganglia_web_path}}">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+#  AuthName "Ganglia Access"
+#  AuthType Basic
+#  AuthUserFile /etc/ganglia/htpasswd.users
+#  Require valid-user
+</Directory>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..ffb4e84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+{% for x in ganglia_clusters %}
+    {{ x[0] }}       	  {{ganglia_server_host}}  {{ x[1] }}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..0b68623
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,46 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+RRD_ROOTDIR={{rrdcached_base_dir}}
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};
+MODULES_DIR={{modules_dir}}
+GANGLIA_WEB_PATH={{ganglia_web_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..6c24c7f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,85 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+RRDCACHED_WRITE_THREADS={{rrdcached_write_threads}}
+RRDCACHED_TIMEOUT={{rrdcached_timeout}}
+RRDCACHED_FLUSH_TIMEOUT={{rrdcached_flush_timeout}}
+RRDCACHED_DELAY={{rrdcached_delay}}
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
new file mode 100644
index 0000000..65d70e2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
@@ -0,0 +1,361 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+# NOTE: This script is executed by Python 2.4 on Centos 5. 
+# Make sure your changes are compatible.
+
+import cgi
+import glob
+import os
+import re
+import rrdtool
+import sys
+import time
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+'''
+  Loads rrd file info
+'''
+def loadRRDData(file, cf, start, end, resolution):
+  args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
+
+  if start is not None:
+    args.extend(["-s", start])
+  else:
+    args.extend(["-s", "now-10m"])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  return rrdtool.fetch(args)
+
+'''
+  Collects metrics across several matching filenames.
+'''
+def collectStatMetrics(clusterName, hostName, metricName, files, cf, start, end, resolution):
+  if clusterName[0] is not '/':
+    clusterName.insert(0, '/')
+
+  metricParts = metricName.split('.')
+
+  # already know there's at least one
+  metricStat = metricParts[-1]
+  metricName = '.'.join(metricParts[:-1])
+
+  isRate = False
+  if len(metricParts) > 1 and metricParts[-2] == '_rate':
+    isRate = True
+    metricName = '.'.join(metricParts[:-2])
+
+  pattern = re.compile(metricName + '\.rrd$')
+  matchedFiles = filter(pattern.match, files)
+
+  parentPath = os.path.join(*clusterName)
+
+  actualFiles = []
+  for matchedFile in matchedFiles:
+    if hostName != "__SummaryInfo__":
+      osFiles = glob.glob(os.path.join(parentPath, hostName, matchedFile))
+    else:
+      osFiles = glob.glob(os.path.join(parentPath, '*', matchedFile))
+
+    for f in osFiles:
+      if -1 == f.find("__SummaryInfo__"):
+        actualFiles.append(f)
+
+  if len(actualFiles) == 0:
+    return
+
+  '''
+  [
+    {
+      "step_value": update each iteration
+      "count": increase by 1 each iteration
+      "sum": increase by value each iteration
+      "avg": update each iteration as sum/count
+      "min": update each iteration if step_value < old min OR min is missing (first time)
+      "max": update each iteration if step_value > old max OR max is missing (first time)
+    }
+  ]
+  '''
+
+  timestamp = None
+  stepsize = None
+  concreteMetricName = None
+  vals = None # values across all files
+
+  for file in actualFiles:
+    rrdMetric = loadRRDData(file, cf, start, end, resolution)
+    
+    if timestamp is None and stepsize is None and concreteMetricName is None:
+      timestamp = rrdMetric[0][0]
+      stepsize = rrdMetric[0][2]
+      
+      if not isRate:
+        suffix = metricStat
+      else:
+        suffix = '_rate.' + metricStat
+      
+      concreteMetricName = file.split(os.sep).pop().replace('rrd', suffix)
+
+    metricValues = rrdMetric[2]
+
+    if vals is None:
+      vals = [None] * len(metricValues)
+
+    i = 0
+    for tuple in metricValues:
+      if vals[i] is None:
+        vals[i] = {}
+        vals[i]['count'] = 0
+        vals[i]['_sum'] = 0.0
+        vals[i]['_avg'] = 0.0
+        vals[i]['_min'] = 999999999999.99
+        vals[i]['_max'] = 0.0
+
+      rawValue = tuple[0]
+      vals[i]['step_value'] = rawValue
+      if rawValue is None:
+        i += 1
+        continue
+
+      if isRate:
+        if 0 == i:
+          rawValue = 0.0
+        elif vals[i-1]['step_value'] is None:
+          rawValue = 0.0
+        else:
+          rawValue = (rawValue - vals[i-1]['step_value']) / stepsize
+      
+      vals[i]['count'] += 1 
+      vals[i]['_sum'] += rawValue
+
+      vals[i]['_avg'] = vals[i]['_sum']/vals[i]['count']
+
+      if rawValue < vals[i]['_min']:
+        vals[i]['_min'] = rawValue
+
+      if rawValue > vals[i]['_max']:
+        vals[i]['_max'] = rawValue
+      
+      i += 1
+
+  sys.stdout.write("sum\n")
+  sys.stdout.write(clusterName[len(clusterName)-1] + "\n")
+  sys.stdout.write(hostName + "\n")
+  sys.stdout.write(concreteMetricName + "\n")
+  sys.stdout.write(str(timestamp) + "\n")
+  sys.stdout.write(str(stepsize) + "\n")
+
+  for val in vals:
+    if val['step_value'] is None:
+      sys.stdout.write("[~n]")
+    else:
+      sys.stdout.write(str(val[metricStat]))
+    sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+
+  return
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+ 
+  rrdMetric = loadRRDData(file, cf, start, end, resolution)
+
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "{{rrdcached_base_dir}}"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(os.path.join(rrdPath,cluster)):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          need_stats = False
+          parts = metric.split(".")
+          if len(parts) > 0 and parts[-1] in ['_min', '_max', '_avg', '_sum']:
+              need_stats = True
+
+          if need_stats and not pointInTime:
+            collectStatMetrics(pathParts[:-1], pathParts[-1], metric, files, cf, start, end, resolution)
+          else:
+            #Regex as metric name
+            metricRegex = metric + '\.rrd$'
+            p = re.compile(metricRegex)
+            matchedFiles = filter(p.match, files)
+            for matchedFile in matchedFiles:
+              printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                         os.path.join(path, matchedFile), cf, start, end,
+                         resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
new file mode 100644
index 0000000..fa911e1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
@@ -0,0 +1,124 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }    
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Maser CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "https": "{{hbase-site/hbase.master.info.port}}",
+            "https_property": "{{cluster-env/security_enabled}}",
+            "https_property_value": "true",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
new file mode 100644
index 0000000..231baa6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>HBase RegionServer Heap Size.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>512</value>
+    <description>HBase RegionServer maximum value for minimum heap size.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..57b3845
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
new file mode 100644
index 0000000..2f12801
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
new file mode 100644
index 0000000..84900d1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
@@ -0,0 +1,331 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>86400000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>2</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
new file mode 100644
index 0000000..fd290df
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.0.2.0</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>


Mime
View raw message