ranger-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject [39/44] ARGUS-1. Initial code commit (Selvamohan Neethiraj via omalley)
Date Thu, 14 Aug 2014 20:50:50 GMT
http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/scripts/install.sh
----------------------------------------------------------------------
diff --git a/hbase-agent/scripts/install.sh b/hbase-agent/scripts/install.sh
new file mode 100644
index 0000000..5d1a371
--- /dev/null
+++ b/hbase-agent/scripts/install.sh
@@ -0,0 +1,261 @@
+#!/bin/bash
+
+
+function create_jceks()
+{
+
+alias=$1
+pass=$2
+jceksFile=$3
+
+ret=`hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile} 2>&1`
+res=`echo $ret | grep 'already exist'`
+
+if ! [ "${res}" == "" ]
+then
+   echo "Credential file already exists,recreating the file..."
+   hadoop credential delete ${alias} --provider jceks://file${jceksFile}
+   hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile}
+fi
+}
+
+hbase_dir=/usr/lib/hbase
+hbase_lib_dir=${hbase_dir}/lib
+hbase_conf_dir=/etc/hbase/conf
+
+hdp_dir=/usr/lib/hadoop
+hdp_lib_dir=/usr/lib/hadoop/lib
+hdp_conf_dir=/etc/hadoop/conf
+
+export CONFIG_FILE_OWNER="hbase:hadoop"
+
+
+if [ ! -d "${hdp_dir}" ]
+then
+	echo "ERROR: Invalid HADOOP HOME Directory: [${hdp_dir}]. Exiting ..."
+	exit 1
+fi
+
+#echo "Hadoop Configuration Path: ${hdp_conf_dir}"
+
+if [ ! -f ${hdp_conf_dir}/hadoop-env.sh ]
+then
+	echo "ERROR: Invalid HADOOP CONF Directory: [${hdp_conf_dir}]."
+	echo "ERROR: Unable to locate: hadoop-env.sh. Exiting ..."
+	exit 1
+fi
+
+install_dir=`dirname $0`
+
+[ "${install_dir}" = "." ] && install_dir=`pwd`
+
+#echo "Current Install Directory: [${install_dir}]"
+
+#verify mysql-connector path is valid
+MYSQL_CONNECTOR_JAR=`grep '^MYSQL_CONNECTOR_JAR'  ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+echo "[I] Checking MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR" 
+if test -f "$MYSQL_CONNECTOR_JAR"; then
+	echo "[I] MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR file found" 
+else
+	echo "[E] MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR does not exists" ; exit 1;
+fi
+#copying mysql connector jar file to lib directory
+cp $MYSQL_CONNECTOR_JAR ${install_dir}/lib
+
+
+#
+# --- Backup current configuration for backup - START
+#
+
+COMPONENT_NAME=hbase
+
+XASECURE_VERSION=`cat ${install_dir}/version`
+
+CFG_DIR=${hive_conf_dir}
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+CUR_CFG_DIR_FILE=${XASECURE_ROOT}/.config_dir
+PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+
+backup_dt=`date '+%Y%m%d%H%M%S'`
+
+if [ -d "${PRE_INSTALL_CONFIG}" ]
+then
+	PRE_INSTALL_CONFIG="${PRE_INSTALL_CONFIG}.${backup_dt}"
+fi
+
+if [ -d ${CFG_DIR} ]
+then
+	( cd ${CFG_DIR} ; find . -print | cpio -pdm ${PRE_INSTALL_CONFIG} )
+	[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-${backup_dt}
+	echo ${XASECURE_VERSION} > ${CUR_VERSION_FILE}
+	echo ${CFG_DIR} > ${CUR_CFG_DIR_FILE}
+else
+	echo "ERROR: Unable to find configuration directory: [${CFG_DIR}]"
+	exit 1
+fi
+
+cp -f ${install_dir}/uninstall.sh ${XASECURE_ROOT}/
+
+#
+# --- Backup current configuration for backup  - END
+
+dt=`date '+%Y%m%d%H%M%S'`
+for f in ${install_dir}/conf/*
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		if [ ! -f ${hbase_conf_dir}/${fn} ]
+		then
+			echo "+cp ${f} ${hbase_conf_dir}/${fn}"
+			cp ${f} ${hbase_conf_dir}/${fn}
+		else
+			echo "WARN: ${fn} already exists in the ${hbase_conf_dir} - Using existing configuration ${fn}"
+		fi
+	fi
+done
+
+#echo "Hadoop XASecure Library Path: ${hdp_lib_dir}"
+
+if [ ! -d ${hbase_lib_dir} ]
+then
+	echo "+mkdir -p ${hbase_lib_dir}"
+	mkdir -p ${hbase_lib_dir}
+fi
+
+for f in ${install_dir}/dist/*.jar
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		echo "+cp ${f} ${hbase_lib_dir}/${fn}"
+		cp ${f} ${hbase_lib_dir}/${fn}
+	fi
+done
+
+
+for f in ${install_dir}/dist/*.jar
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		echo "+cp ${f} ${hbase_lib_dir}/${fn}"
+		cp ${f} ${hbase_lib_dir}/${fn}
+	fi
+done
+
+if [ -d ${install_dir}/lib ]
+then
+	for f in ${install_dir}/lib/*.jar
+	do
+		if [ -f ${f} ]
+		then
+			fn=`basename $f`
+			if [ -f ${hbase_lib_dir}/${fn} ]
+			then
+				cdt=`date '+%s'`
+				echo "+mv ${hbase_lib_dir}/${fn} ${hbase_lib_dir}/.${fn}.${cdt}"
+				mv ${hbase_lib_dir}/${fn} ${hbase_lib_dir}/.${fn}.${cdt}
+			fi
+			echo "+cp ${f} ${hbase_lib_dir}/${fn}"
+			cp ${f} ${hbase_lib_dir}/${fn}
+		fi
+	done
+fi
+
+
+CredFile=`grep '^CREDENTIAL_PROVIDER_FILE' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+if ! [ `echo ${CredFile} | grep '^/.*'` ]
+then
+  echo "Please enter the Credential File Store with proper file path"
+  exit 1
+fi
+
+#
+# Generate Credential Provider file and Credential for Audit DB access.
+#
+
+
+auditCredAlias="auditDBCred"
+
+auditdbCred=`grep '^XAAUDIT.DB.PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${auditCredAlias} ${auditdbCred} ${CredFile}
+
+
+#
+# Generate Credential Provider file and Credential for SSL KEYSTORE AND TRUSTSTORE
+#
+
+
+sslkeystoreAlias="sslKeyStore"
+
+sslkeystoreCred=`head -1 /etc/xasecure/ssl/certs/${repoName}.maze`
+
+create_jceks ${sslkeystoreAlias} ${sslkeystoreCred} ${CredFile}
+
+
+
+ssltruststoreAlias="sslTrustStore"
+
+ssltruststoreCred=`grep '^SSL_TRUSTSTORE_PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${ssltruststoreAlias} ${ssltruststoreCred} ${CredFile}
+
+chown ${CONFIG_FILE_OWNER} ${CredFile} 
+
+PROP_ARGS="-p  ${install_dir}/install.properties"
+
+for f in ${install_dir}/installer/conf/*-changes.cfg
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		orgfn=`echo $fn | sed -e 's:-changes.cfg:.xml:'`
+		fullpathorgfn="${hbase_conf_dir}/${orgfn}"
+		if [ ! -f ${fullpathorgfn} ]
+		then
+			echo "ERROR: Unable to find ${fullpathorgfn}"
+			exit 1
+		fi
+		archivefn="${hbase_conf_dir}/.${orgfn}.${dt}"
+		newfn="${hbase_conf_dir}/.${orgfn}-new.${dt}"
+		cp ${fullpathorgfn} ${archivefn}
+		if [ $? -eq 0 ]
+		then
+			cp="${install_dir}/installer/lib/*:/usr/lib/hadoop/*:/usr/lib/hadoop/lib/*"
+			java -cp "${cp}" com.xasecure.utils.install.XmlConfigChanger -i ${archivefn} -o ${newfn} -c ${f} ${PROP_ARGS}
+			if [ $? -eq 0 ]
+			then
+				diff -w ${newfn} ${fullpathorgfn} > /dev/null 2>&1
+				if [ $? -ne 0 ]
+				then
+					#echo "Changing config file:  ${fullpathorgfn} with following changes:"
+					#echo "==============================================================="
+					#diff -w ${newfn} ${fullpathorgfn}
+					#echo "==============================================================="
+					echo "NOTE: Current config file: ${fullpathorgfn} is being saved as ${archivefn}"
+					#echo "==============================================================="
+					cp ${newfn} ${fullpathorgfn}
+				fi
+			else
+				echo "ERROR: Unable to make changes to config. file: ${fullpathorgfn}"
+				echo "exiting ...."
+				exit 1
+			fi
+			else
+			echo "ERROR: Unable to save config. file: ${fullpathorgfn}  to ${archivefn}"
+			echo "exiting ...."
+			exit 1
+		fi
+	fi
+done
+
+chmod go-rwx ${hbase_conf_dir}/xasecure-policymgr-ssl.xml
+
+chown ${CONFIG_FILE_OWNER} ${hbase_conf_dir}/xasecure-policymgr-ssl.xml
+
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/scripts/uninstall.sh
----------------------------------------------------------------------
diff --git a/hbase-agent/scripts/uninstall.sh b/hbase-agent/scripts/uninstall.sh
new file mode 100644
index 0000000..e441b9d
--- /dev/null
+++ b/hbase-agent/scripts/uninstall.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+COMPONENT_NAME=hbase
+CFG_DIR=/etc/${COMPONENT_NAME}/conf
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+CUR_CFG_DIR_FILE=${XASECURE_ROOT}/.config_dir
+if [ -f ${CUR_VERSION_FILE} ]
+then
+	XASECURE_VERSION=`cat ${CUR_VERSION_FILE}`
+	PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ -d "${PRE_INSTALL_CONFIG}" ]
+	then
+		if [ -f ${CUR_CFG_DIR_FILE} ] 
+		then
+			CFG_DIR=`cat ${CUR_CFG_DIR_FILE}`
+		fi 
+		[ -d ${CFG_DIR} ] && mv ${CFG_DIR} ${CFG_DIR}-${dt}
+		( cd ${PRE_INSTALL_CONFIG} ; find . -print | cpio -pdm ${CFG_DIR} )
+		[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-uninstalled-${dt}
+		echo "XASecure version - ${XASECURE_VERSION} has been uninstalled successfully."
+	else
+		echo "ERROR: Unable to find pre-install configuration directory: [${PRE_INSTALL_CONFIG}]"
+		exit 1
+	fi
+else
+	cd ${CFG_DIR}
+	saved_files=`find . -type f -name '.*' |  sort | grep -v -- '-new.' | grep '[0-9]*$' | grep -v -- '-[0-9]*$' | sed -e 's:\.[0-9]*$::' | sed -e 's:^./::' | sort -u`
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ "${saved_files}" != "" ]
+	then
+	        for f in ${saved_files}
+	        do
+	                oldf=`ls ${f}.[0-9]* | sort | head -1`
+	                if [ -f "${oldf}" ]
+	                then
+	                        nf=`echo ${f} | sed -e 's:^\.::'`
+	                        if [ -f "${nf}" ]
+	                        then
+	                                echo "+cp -p ${nf} .${nf}-${dt}"
+	                                cp -p ${nf} .${nf}-${dt}
+	                                echo "+cp ${oldf} ${nf}"
+	                                cp ${oldf} ${nf}
+	                        else
+	                                echo "ERROR: ${nf} not found to save. However, old file is being recovered."
+	                                echo "+cp -p ${oldf} ${nf}"
+	                                cp -p ${oldf} ${nf}
+	                        fi
+	                fi
+	        done
+	        echo "XASecure configuration has been uninstalled successfully."
+	fi
+fi

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/src/main/java/com/xasecure/authorization/hbase/Crypt.java
----------------------------------------------------------------------
diff --git a/hbase-agent/src/main/java/com/xasecure/authorization/hbase/Crypt.java b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/Crypt.java
new file mode 100644
index 0000000..84b91da
--- /dev/null
+++ b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/Crypt.java
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hbase;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class Crypt {
+	
+	private static final Log LOG = LogFactory.getLog("SecurityLogger." + XaSecureAuthorizationCoprocessor.class.getName());
+
+	private static byte[] IV = { 01, 21, 22, 86, 121, 45, 98, 28, 111, 72, 54, 39, 96, 47, 84, 13 };
+	private static final byte[] encryptionKey = "324234sdff3a7d8e".getBytes();
+	private static final String CIPHER_ALGO = "AES/CBC/PKCS5Padding";
+	private static final String CIPHER_INIT_ALGO = "AES";
+	
+	private static Crypt me = null ;
+	
+	private Cipher encrypter = null;
+	private Cipher descrypter = null;
+
+
+	public static Crypt getInstance() {
+		if (me == null) {
+			synchronized (Crypt.class) {
+				Crypt other = me ;
+				if (other == null) {
+					me = new Crypt() ;
+				}
+			}
+		}
+		return me ;
+	}
+	
+	private Crypt() {
+		try {
+			encrypter = Cipher.getInstance(CIPHER_ALGO);
+			SecretKeySpec enckey = new SecretKeySpec(encryptionKey, CIPHER_INIT_ALGO);
+			encrypter.init(Cipher.ENCRYPT_MODE, enckey, new IvParameterSpec(IV));
+
+			descrypter = Cipher.getInstance(CIPHER_ALGO);
+			SecretKeySpec deckey = new SecretKeySpec(encryptionKey, CIPHER_INIT_ALGO);
+			descrypter.init(Cipher.DECRYPT_MODE, deckey, new IvParameterSpec(IV));
+		} catch (Throwable t) {
+			LOG.error("Unable to initialzie Encrypt/Decrypt module - Exiting from HBase", t);
+			System.exit(1);
+		}
+	}
+	
+	public synchronized byte[] encrypt(byte[] plainText) throws Exception {
+		byte[] ret =  encrypter.doFinal(plainText);
+		LOG.debug("Encrypted plain text: [" + new String(plainText) + "] => {" +  Hex.encodeHexString(ret)  + "}") ;
+		return ret ;
+	}
+
+	public synchronized byte[] decrypt(byte[] cipherText) throws Exception {
+		byte[] ret =  descrypter.doFinal(cipherText);
+		LOG.debug("Decrypted From text: [" + Hex.encodeHexString(cipherText)   + "] => {" +  new String(ret)   + "}") ;
+		return ret ;
+	}
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessController.java b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessController.java
new file mode 100644
index 0000000..bf380bf
--- /dev/null
+++ b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessController.java
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hbase;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+
+public interface HBaseAccessController {
+	public boolean isAccessAllowed(User user, Action accessAction) ;
+	public boolean isAccessAllowed(User user, byte[] tableName, Action accessAction) ;
+	public boolean isAccessAllowed(User user, byte[] tableName, byte[] columnFamily, byte[] qualifier, Action accessAction) ;
+	public boolean isEncrypted(byte[] tableName, byte[] columnFamily, byte[] qualifier) ;
+	public boolean isAudited(byte[] tableName) ;
+	public boolean isTableHasEncryptedColumn(byte[] tableName) ;
+	public List<UserPermission>  getUserPermissions(User user) ;
+	public List<UserPermission>  getUserPermissions(User user, byte[] tableName) ;
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessControllerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessControllerFactory.java b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessControllerFactory.java
new file mode 100644
index 0000000..2ca2290
--- /dev/null
+++ b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/HBaseAccessControllerFactory.java
@@ -0,0 +1,61 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hbase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.xasecure.authorization.hadoop.config.XaSecureConfiguration;
+import com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants;
+
+public class HBaseAccessControllerFactory {
+	
+	private static final Log LOG = LogFactory.getLog(HBaseAccessControllerFactory.class) ;
+
+	private static HBaseAccessController hBaseAccessController = null ;
+	
+	public static HBaseAccessController getInstance() {
+		if (hBaseAccessController == null) {
+			synchronized(HBaseAccessControllerFactory.class) {
+				HBaseAccessController temp = hBaseAccessController ;
+				if (temp == null) {
+					
+					String hBaseAccessControllerClassName = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.HBASE_ACCESS_VERIFIER_CLASS_NAME_PROP, XaSecureHadoopConstants.HBASE_ACCESS_VERIFIER_CLASS_NAME_DEFAULT_VALUE) ;
+					if (hBaseAccessControllerClassName != null) {
+						try {
+							hBaseAccessControllerClassName = hBaseAccessControllerClassName.trim();
+							hBaseAccessController = (HBaseAccessController) (Class.forName(hBaseAccessControllerClassName).newInstance()) ;
+							LOG.info("Created a new instance of class: [" + hBaseAccessControllerClassName + "] for HBase Access verification.");
+						} catch (InstantiationException e) {
+							LOG.error("Unable to create HBaseAccessController : [" +  hBaseAccessControllerClassName + "]", e);
+						} catch (IllegalAccessException e) {
+							LOG.error("Unable to create HBaseAccessController : [" +  hBaseAccessControllerClassName + "]", e);
+						} catch (ClassNotFoundException e) {
+							LOG.error("Unable to create HBaseAccessController : [" +  hBaseAccessControllerClassName + "]", e);
+						}
+					}
+				}
+			}
+		}
+		return hBaseAccessController ;
+		
+	}
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAccessControlFilter.java
----------------------------------------------------------------------
diff --git a/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAccessControlFilter.java b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAccessControlFilter.java
new file mode 100644
index 0000000..61699a0
--- /dev/null
+++ b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAccessControlFilter.java
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+
+public class XaSecureAccessControlFilter extends FilterBase {
+
+	private byte[] table = null;
+	private User user = null;
+
+	public XaSecureAccessControlFilter(User ugi, byte[] tableName) {
+		table = tableName;
+		user = ugi;
+	}
+	
+
+	@SuppressWarnings("deprecation")
+	@Override
+	public ReturnCode filterKeyValue(Cell kv) throws IOException {
+		HBaseAccessController accessController = HBaseAccessControllerFactory.getInstance();
+		if (accessController.isAccessAllowed(user, table, kv.getFamily(), kv.getQualifier(), TablePermission.Action.READ)) {
+			return ReturnCode.INCLUDE;
+		} else {
+			return ReturnCode.NEXT_COL;
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAuthorizationCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAuthorizationCoprocessor.java b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAuthorizationCoprocessor.java
new file mode 100644
index 0000000..0c85c84
--- /dev/null
+++ b/hbase-agent/src/main/java/com/xasecure/authorization/hbase/XaSecureAuthorizationCoprocessor.java
@@ -0,0 +1,951 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+package com.xasecure.authorization.hbase;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.Collection;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TimeZone;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.MapMaker;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.xasecure.audit.model.EnumRepositoryType;
+import com.xasecure.audit.model.HBaseAuditEvent;
+import com.xasecure.audit.provider.AuditProviderFactory;
+import com.xasecure.authorization.hadoop.config.XaSecureConfiguration;
+import com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants;
+
+public class XaSecureAuthorizationCoprocessor extends BaseRegionObserver implements MasterObserver, RegionServerObserver {
+	private static final Log AUDIT = LogFactory.getLog("xaaudit." + XaSecureAuthorizationCoprocessor.class.getName());
+	private static final Log LOG = LogFactory.getLog(XaSecureAuthorizationCoprocessor.class.getName());
+	private static final String XaSecureModuleName = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_XASECURE_MODULE_ACL_NAME_PROP , XaSecureHadoopConstants.DEFAULT_XASECURE_MODULE_ACL_NAME) ;
+	private static final short  accessGrantedFlag  = 1;
+	private static final short  accessDeniedFlag   = 0;
+	private static final String repositoryName          = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_REPOSITORY_NAME_PROP);
+
+		
+	private static final String SUPERUSER_CONFIG_PROP = "hbase.superuser";
+	private static final byte[] WILDCARD_MATCH = "*".getBytes();
+	
+	private RegionCoprocessorEnvironment regionEnv;
+	private Map<InternalScanner, String> scannerOwners = new MapMaker().weakKeys().makeMap();
+	
+	private HBaseAccessController accessController = HBaseAccessControllerFactory.getInstance();
+	private List<String> superUserList = null;
+	// Utilities Methods 
+	protected byte[] getTableName(RegionCoprocessorEnvironment e) {
+		HRegion region = e.getRegion();
+		byte[] tableName = null;
+		if (region != null) {
+			HRegionInfo regionInfo = region.getRegionInfo();
+			if (regionInfo != null) {
+				tableName = regionInfo.getTable().getName() ;
+			}
+		}
+		return tableName;
+	}
+	protected void isSystemOrSuperUser(Configuration conf) throws IOException {
+		User user = User.getCurrent();
+		if (user == null) {
+			throw new IOException("Unable to obtain the current user, authorization checks for internal operations will not work correctly!");
+		}
+		String currentUser = user.getShortName();
+		List<String> superusers = Lists.asList(currentUser, conf.getStrings(SUPERUSER_CONFIG_PROP, new String[0]));
+		User activeUser = getActiveUser();
+		if (!(superusers.contains(activeUser.getShortName()))) {
+			throw new AccessDeniedException("User '" + (user != null ? user.getShortName() : "null") + "is not system or super user.");
+		}
+	}
+	private boolean isSuperUser(User user) {
+		boolean isSuper = false;
+		isSuper = (superUserList != null && superUserList.contains(user.getShortName()));
+		if (LOG.isDebugEnabled()) {
+			LOG.debug("IsSuperCheck on [" + user.getShortName() + "] returns [" + isSuper + "]");
+		}
+		return isSuper;
+	}
+	protected boolean isSpecialTable(HRegionInfo regionInfo) {
+		return isSpecialTable(regionInfo.getTable().getName());
+	}
+	protected boolean isSpecialTable(byte[] tableName) {
+		return isSpecialTable(Bytes.toString(tableName));
+	}
+	protected boolean isSpecialTable(String tableNameStr) {
+		return tableNameStr.equals("-ROOT-") || tableNameStr.equals(".META.");
+	}
+	@SuppressWarnings("unused")
+	private String getUser() {
+		User u = getActiveUser();
+		if (u == null) {
+			return "(user:unknown)";
+		} else {
+			String groups = (u.getGroupNames() == null) ? "" : StringUtils.join(u.getGroupNames(), ",");
+			return "(user:" + u.getShortName() + ", groups: [" + groups + "])";
+		}
+	}
+	private User getActiveUser() {
+		User user = RequestContext.getRequestUser();
+		if (!RequestContext.isInRequestContext()) {
+			// for non-rpc handling, fallback to system user
+			try {
+				user = User.getCurrent();
+			} catch (IOException e) {
+				LOG.error("Unable to find the current user");
+				user = null;
+			}
+		}
+		return user;
+	}
+	// Methods that are used within the CoProcessor 
+	private void requireScannerOwner(InternalScanner s) throws AccessDeniedException {
+		if (RequestContext.isInRequestContext()) {
+			String requestUserName = RequestContext.getRequestUserName();
+			String owner = scannerOwners.get(s);
+			if (owner != null && !owner.equals(requestUserName)) {
+				throw new AccessDeniedException("User '" + requestUserName + "' is not the scanner owner!");
+			}
+		}
+	}
+	// Methods that are delegated to AUTHManager 
+	public boolean isPermissionGranted(User user, Action action) {
+		if (isSuperUser(user)) {
+			return true;
+		} else {
+			return accessController.isAccessAllowed(user, action);
+		}
+	}
+	public boolean isPermissionGranted(User user, byte[] tableName, Action action) {
+		if (isSuperUser(user)) {
+			return true;
+		} else {
+			return accessController.isAccessAllowed(user, tableName, action);
+		}
+	}
+	public boolean isPermissionGranted(User user, byte[] tableName, byte[] colf, Action action) {
+		if (isSuperUser(user)) {
+			return true;
+		} else {
+			return accessController.isAccessAllowed(user, tableName, colf, WILDCARD_MATCH, action);
+		}
+	}
+	public boolean isPermissionGranted(User user, byte[] tableName, byte[] colf, byte[] col, Action action) {
+		if (isSuperUser(user)) {
+			return true;
+		} else {
+			return accessController.isAccessAllowed(user, tableName, colf, col, action);
+		}
+	}
+	// Methods that are internally used by co-processors 
+	@SuppressWarnings("unchecked")
+	public void requirePermission(String request, Action action, RegionCoprocessorEnvironment rEnv, Map<byte[], ? extends Collection<?>> families) throws IOException {
+		HRegionInfo hri = rEnv.getRegion().getRegionInfo();
+		byte[] tableName = hri.getTable().getName() ;
+		String tableNameStr = Bytes.toString(tableName);
+		if (hri.isMetaTable() || hri.isMetaRegion()) {
+			if (action == TablePermission.Action.READ) {
+				return;
+			}
+		}
+		User user = getActiveUser();
+		if (user == null) {
+			throw new AccessDeniedException("No user associated with request (" + request + ") for action: " + action + "on table:" + tableName);
+		}
+		if (isSuperUser(user)) {
+			return;
+		}
+		if (action == TablePermission.Action.WRITE && (hri.isMetaTable() || hri.isMetaRegion()) && (isPermissionGranted(user, Permission.Action.CREATE) || isPermissionGranted(user, Permission.Action.ADMIN))) {
+			return;
+		}
+		if (isPermissionGranted(user, tableName, (byte[]) null, action)) {
+			return;
+		}
+		if (families != null && families.size() > 0) {
+			// all families must pass
+			for (Map.Entry<byte[], ? extends Collection<?>> family : families.entrySet()) {
+				// a) check for family level access
+				if (isPermissionGranted(user, tableName, family.getKey(), action)) {
+					continue; // family-level permission overrides per-qualifier
+				}
+				// b) qualifier level access can still succeed
+				if ((family.getValue() != null) && (family.getValue().size() > 0)) {
+					if (family.getValue() instanceof Set) { // Set<byte[]> - Set
+															// of Columns
+						// for each qualifier of the family
+						Set<byte[]> qualifierSet = (Set<byte[]>) family.getValue();
+						for (byte[] qualifier : qualifierSet) {
+							if (!isPermissionGranted(user, tableName, family.getKey(), qualifier, action)) {
+								if (accessController.isAudited(tableName)) {
+									auditEvent(request, tableName, family.getKey(), qualifier, null, null, user, accessDeniedFlag);
+								}
+								throw new AccessDeniedException("Insufficient permissions for user '" + user + "',action: " + action + ", tableName:" + tableNameStr + ", family:" + Bytes.toString(family.getKey()) + ",column: " + Bytes.toString(qualifier));
+							}
+						}
+					} else if (family.getValue() instanceof List) { // List<KeyValue>
+																	// - List of
+																	// KeyValue
+																	// pair
+						List<KeyValue> kvList = (List<KeyValue>) family.getValue();
+						for (KeyValue kv : kvList) {
+							if (!isPermissionGranted(user, tableName, family.getKey(), kv.getQualifier(), action)) {
+								if (accessController.isAudited(tableName)) {
+									auditEvent(request, tableName, family.getKey(), kv.getQualifier(), null, null, user, accessDeniedFlag);
+								}
+								throw new AccessDeniedException("Insufficient permissions for user '" + user + "',action: " + action + ", tableName:" + tableNameStr + ", family:" + Bytes.toString(family.getKey()) + ",column: " + Bytes.toString(kv.getQualifier()));
+							}
+						}
+					}
+				} else {
+					if (accessController.isAudited(tableName)) {
+						auditEvent(request, tableName, family.getKey(), null, null, null, user, accessDeniedFlag);
+					}
+					throw new AccessDeniedException("Insufficient permissions for user '" + user + "',action: " + action + ", tableName:" + tableNameStr + ", family:" + Bytes.toString(family.getKey()) + ", no columns found.");
+				}
+			}
+			return;
+		}
+		if (accessController.isAudited(tableName)) {
+			auditEvent(request, tableName, null, null, null, null, user, accessDeniedFlag);
+		}
+		throw new AccessDeniedException("Insufficient permissions for user '" + user + "',action: " + action + ", tableName:" + tableNameStr);
+	}
+	// Check if the user has global permission ...
+	protected void requirePermission(String request, byte[] tableName, Permission.Action action) throws AccessDeniedException {
+		User user = getActiveUser();
+		if (!isPermissionGranted(user, tableName, action)) {
+			if (accessController.isAudited(tableName)) {
+				auditEvent(request, tableName, null, null, null, null, user, accessDeniedFlag);
+			}
+			throw new AccessDeniedException("Insufficient permissions for user '" + getActiveUser() + "' (global, action=" + action + ")");
+		}
+	}
+	protected void requirePermission(String request, byte[] aTableName, byte[] aColumnFamily, byte[] aQualifier, Permission.Action... actions) throws AccessDeniedException {
+		User user = getActiveUser();
+		for (Action action : actions) {
+			if (!isPermissionGranted(user, aTableName, aColumnFamily, aQualifier, action)) {
+				if (accessController.isAudited(aTableName)) {
+					auditEvent(request, aTableName, aColumnFamily, aQualifier, null, null, user, accessDeniedFlag);
+				}
+				throw new AccessDeniedException("Insufficient permissions for user '" + user + "',action: " + action + ", tableName:" + Bytes.toString(aTableName) + ", family:" + Bytes.toString(aColumnFamily) + ",column: " + Bytes.toString(aQualifier));
+			}
+		}
+	}
+	protected void requirePermission(String request, Permission.Action perm, RegionCoprocessorEnvironment env, Collection<byte[]> families) throws IOException {
+		HashMap<byte[], Set<byte[]>> familyMap = new HashMap<byte[], Set<byte[]>>();
+		for (byte[] family : families) {
+			familyMap.put(family, null);
+		}
+		requirePermission(request, perm, env, familyMap);
+	}
+	protected boolean isPermissionGranted(String request, User requestUser, Permission.Action perm, RegionCoprocessorEnvironment env, Map<byte[], NavigableSet<byte[]>> familyMap) {
+		boolean ret = true;
+		try {
+			requirePermission(request, perm, env, familyMap);
+		} catch (Throwable t) {
+			ret = false;
+		}
+		return ret;
+	}
+	protected boolean hasFamilyQualifierPermission(User requestUser, Permission.Action perm, RegionCoprocessorEnvironment env, Map<byte[], NavigableSet<byte[]>> familyMap) {
+		User user = requestUser;
+		byte[] tableName = getTableName(env);
+		if (familyMap != null && familyMap.size() > 0) {
+			for (Map.Entry<byte[], NavigableSet<byte[]>> family : familyMap.entrySet()) {
+				if (family.getValue() != null && !family.getValue().isEmpty()) {
+					for (byte[] qualifier : family.getValue()) {
+						boolean isGranted = isPermissionGranted(user, tableName, family.getKey(), qualifier, perm);
+						LOG.info(":=> hasFamilyQualifierPermission: T(" + Bytes.toString(tableName) + "), family: (" + Bytes.toString(family.getKey() ) + "), Q(" + Bytes.toString(qualifier) + "), Permission: [" + perm + "] => [" + isGranted + "]") ;
+						if (isGranted) {
+							return true;
+						}
+					}
+				} else {
+					boolean isGranted = isPermissionGranted(user, tableName, family.getKey(), perm);
+					LOG.info(":=>  hasFamilyPermission: T(" + Bytes.toString(tableName) + "), family: (" + Bytes.toString(family.getKey() ) + ", Permission: [" + perm + "] => [" + isGranted + "]") ;
+					if (isGranted) {
+						return true;
+					}
+				}
+			}
+		} else {
+			if (LOG.isDebugEnabled()) {
+				LOG.debug("Empty family map passed for permission check");
+			}
+		}
+		return false;
+	}
+	
+	
+	public void checkPermissions(Permission[] permissions) throws IOException {
+		String tableName = regionEnv.getRegion().getTableDesc().getTableName().getNameAsString() ;
+		for (Permission permission : permissions) {
+			if (permission instanceof TablePermission) {
+				TablePermission tperm = (TablePermission) permission;
+				for (Permission.Action action : permission.getActions()) {
+					if (! tperm.getTableName().getNameAsString().equals(tableName)) {
+						throw new AccessDeniedException(String.format("This method can only execute at the table specified in TablePermission. " + "Table of the region:%s , requested table:%s", tableName, 
+																	  tperm.getTableName().getNameAsString()));
+					}
+					HashMap<byte[], Set<byte[]>> familyMap = Maps.newHashMapWithExpectedSize(1);
+					if (tperm.getFamily() != null) {
+						if (tperm.getQualifier() != null) {
+							familyMap.put(tperm.getFamily(), Sets.newHashSet(tperm.getQualifier()));
+						} else {
+							familyMap.put(tperm.getFamily(), null);
+						}
+					}
+					requirePermission("checkPermissions", action, regionEnv, familyMap);
+				}
+			} else {
+				for (Permission.Action action : permission.getActions()) {
+					byte[] tname = regionEnv.getRegion().getTableDesc().getTableName().getName() ;
+					requirePermission("checkPermissions", tname, action);
+				}
+			}
+		}
+	}
+	
+	@Override
+	public void postAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor column) throws IOException {
+		auditEvent("addColumn", tableName.getName(), column.getName(), null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo) throws IOException {
+		auditEvent("assign", regionInfo.getTable().getNameAsString(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postBalance(ObserverContext<MasterCoprocessorEnvironment> c,List<RegionPlan> aRegPlanList) throws IOException {
+		auditEvent("balance", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c, boolean oldValue, boolean newValue) throws IOException {
+		auditEvent("balanceSwitch", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		auditEvent("cloneSnapshot", hTableDescriptor.getNameAsString(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
+		auditEvent("createTable", desc.getNameAsString(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postDelete(ObserverContext<RegionCoprocessorEnvironment> c, Delete delete, WALEdit edit, Durability durability) throws IOException {
+		auditEvent("delete", delete.toString(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, byte[] col) throws IOException {
+		auditEvent("deleteColumn", tableName.getName(), col, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot) throws IOException {
+		auditEvent("deleteSnapShot", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		auditEvent("deleteTable", tableName.getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		auditEvent("disableTable", tableName.getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		auditEvent("enableTable", tableName.getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor descriptor) throws IOException {
+		auditEvent("modifyColumn", tableName.getName(), descriptor.getName(), null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HTableDescriptor htd) throws IOException {
+		auditEvent("modifyTable", tableName.getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region, ServerName srcServer, ServerName destServer) throws IOException {
+		auditEvent("move", region.getTable().getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postOpen(ObserverContext<RegionCoprocessorEnvironment> ctx) {
+		auditEvent("open", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		auditEvent("restoreSnapshot", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postScannerClose(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s) throws IOException {
+		try {
+			scannerOwners.remove(s);
+		} finally {
+			auditEvent("scannerClose", getTableName(c.getEnvironment()), null, null, null, null, getActiveUser(), accessGrantedFlag);
+		}
+	}
+	@Override
+	public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
+		try {
+			User user = getActiveUser();
+			if (user != null && user.getShortName() != null) {
+				scannerOwners.put(s, user.getShortName());
+			}
+		} finally {
+			auditEvent("scannerOpen", getTableName(c.getEnvironment()), null, null, null, null, getActiveUser(), accessGrantedFlag);
+		}
+		return s;
+	}
+	@Override
+	public void postSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		auditEvent("snapshot", hTableDescriptor.getNameAsString(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
+		auditEvent("startMaster", (String) null, null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void postUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo, boolean force) throws IOException {
+		auditEvent("unassign", regionInfo.getTable().getName(), null, null, null, null, getActiveUser(), accessGrantedFlag);
+	}
+	@Override
+	public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor column) throws IOException {
+		requirePermission("addColumn", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append) throws IOException {
+		requirePermission("append", TablePermission.Action.WRITE, c.getEnvironment(), append.getFamilyCellMap());
+		return null;
+	}
+	@Override
+	public void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo) throws IOException {
+		requirePermission("assign", regionInfo.getTable().getName(), null, null, Action.ADMIN);
+	}
+	@Override
+	public void preBalance(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
+		requirePermission("balance", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c, boolean newValue) throws IOException {
+		requirePermission("balanceSwitch", null, Permission.Action.ADMIN);
+		return newValue;
+	}
+	@Override
+	public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> familyPaths) throws IOException {
+		List<byte[]> cfs = new LinkedList<byte[]>();
+		for (Pair<byte[], String> el : familyPaths) {
+			cfs.add(el.getFirst());
+		}
+		requirePermission("bulkLoadHFile", Permission.Action.WRITE, ctx.getEnvironment(), cfs);
+	}
+	@Override
+	public boolean preCheckAndDelete(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Delete delete, boolean result) throws IOException {
+		Collection<byte[]> familyMap = Arrays.asList(new byte[][] { family });
+		requirePermission("checkAndDelete", TablePermission.Action.READ, c.getEnvironment(), familyMap);
+		requirePermission("checkAndDelete", TablePermission.Action.WRITE, c.getEnvironment(), familyMap);
+		return result;
+	}
+	@Override
+	public boolean preCheckAndPut(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Put put, boolean result) throws IOException {
+		Collection<byte[]> familyMap = Arrays.asList(new byte[][] { family });
+		requirePermission("checkAndPut", TablePermission.Action.READ, c.getEnvironment(), familyMap);
+		requirePermission("checkAndPut", TablePermission.Action.WRITE, c.getEnvironment(), familyMap);
+		return result;
+	}
+	@Override
+	public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		requirePermission("cloneSnapshot", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested) throws IOException {
+		requirePermission("close", getTableName(e.getEnvironment()), Permission.Action.ADMIN);
+	}
+	@Override
+	public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner,ScanType scanType) throws IOException {
+		requirePermission("compact", getTableName(e.getEnvironment()), null, null, Action.ADMIN);
+		return scanner;
+	}
+	@Override
+	public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> e, Store store, List<StoreFile> candidates) throws IOException {
+		requirePermission("compactSelection", getTableName(e.getEnvironment()), null, null, Action.ADMIN);
+	}
+	@Override
+	public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c, HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
+		requirePermission("createTable", desc.getName(), Permission.Action.CREATE);
+	}
+	@Override
+	public void preDelete(ObserverContext<RegionCoprocessorEnvironment> c, Delete delete, WALEdit edit, Durability durability) throws IOException {
+		requirePermission("delete", TablePermission.Action.WRITE, c.getEnvironment(), delete.getFamilyCellMap());
+	}
+	@Override
+	public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, byte[] col) throws IOException {
+		requirePermission("deleteColumn", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot) throws IOException {
+		requirePermission("deleteSnapshot", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		requirePermission("deleteTable", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		requirePermission("disableTable", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException {
+		requirePermission("enableTable", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public boolean preExists(ObserverContext<RegionCoprocessorEnvironment> c, Get get, boolean exists) throws IOException {
+		requirePermission("exists", TablePermission.Action.READ, c.getEnvironment(), get.familySet());
+		return exists;
+	}
+	@Override
+	public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
+		requirePermission("flush", getTableName(e.getEnvironment()), null, null, Action.ADMIN);
+	}
+	@Override
+	public void preGetClosestRowBefore(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, Result result) throws IOException {
+		requirePermission("getClosestRowBefore", TablePermission.Action.READ, c.getEnvironment(), (family != null ? Lists.newArrayList(family) : null));
+	}
+	@Override
+	public Result preIncrement(ObserverContext<RegionCoprocessorEnvironment> c, Increment increment) throws IOException {
+		requirePermission("increment", TablePermission.Action.WRITE, c.getEnvironment(), increment.getFamilyCellMap().keySet());
+		
+		return null;
+	}
+	@Override
+	public long preIncrementColumnValue(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
+		requirePermission("incrementColumnValue", TablePermission.Action.WRITE, c.getEnvironment(), Arrays.asList(new byte[][] { family }));
+		return -1;
+	}
+	@Override
+	public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor descriptor) throws IOException {
+		requirePermission("modifyColumn", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HTableDescriptor htd) throws IOException {
+		requirePermission("modifyTable", tableName.getName(), null, null, Action.ADMIN, Action.CREATE);
+	}
+	@Override
+	public void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region, ServerName srcServer, ServerName destServer) throws IOException {
+		requirePermission("move", region.getTable().getName() , null, null, Action.ADMIN);
+	}
+	@Override
+	public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
+		RegionCoprocessorEnvironment env = e.getEnvironment();
+		final HRegion region = env.getRegion();
+		if (region == null) {
+			LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
+			return;
+		} else {
+			HRegionInfo regionInfo = region.getRegionInfo();
+			if (isSpecialTable(regionInfo)) {
+				isSystemOrSuperUser(regionEnv.getConfiguration());
+			} else {
+				requirePermission("open", getTableName(e.getEnvironment()), Action.ADMIN);
+			}
+		}
+	}
+	@Override
+	public void preRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		requirePermission("restoreSnapshot", hTableDescriptor.getName(), Permission.Action.ADMIN);
+	}
+	@Override
+	public void preScannerClose(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s) throws IOException {
+		requireScannerOwner(s);
+	}
+	@Override
+	public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
+		requireScannerOwner(s);
+		return hasNext;
+	}
+	@Override
+	public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
+		RegionCoprocessorEnvironment e = c.getEnvironment();
+		User user = getActiveUser();
+		boolean isGranted = isPermissionGranted("scannerOpen", user, TablePermission.Action.READ, e, scan.getFamilyMap());
+		if (!isGranted) {
+			if (hasFamilyQualifierPermission(user, TablePermission.Action.READ, e, scan.getFamilyMap())) {
+				byte[] table = getTableName(e);
+				XaSecureAccessControlFilter filter = new XaSecureAccessControlFilter(user, table);
+				if (scan.hasFilter()) {
+					FilterList wrapper = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(filter, scan.getFilter()));
+					scan.setFilter(wrapper);
+				} else {
+					scan.setFilter(filter);
+				}
+			} else {
+				throw new AccessDeniedException("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + "' for scanner open on table " + Bytes.toString(getTableName(e)));
+			}
+		}
+		return s;
+	}
+	@Override
+	public void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
+		requirePermission("shutdown", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public void preSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException {
+		requirePermission("snapshot", hTableDescriptor.getName(), Permission.Action.ADMIN);
+	}
+	@Override
+	public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
+		requirePermission("split", getTableName(e.getEnvironment()), null, null, Action.ADMIN);
+	}
+	@Override
+	public void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
+		requirePermission("stopMaster", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public void preStopRegionServer(ObserverContext<RegionServerCoprocessorEnvironment> env) throws IOException {
+		requirePermission("stop", null, Permission.Action.ADMIN);
+	}
+	@Override
+	public void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo, boolean force) throws IOException {
+		requirePermission("unassign", regionInfo.getTable().getName(), null, null, Action.ADMIN);
+	}
+	private String coprocessorType = "unknown";
+	private static final String MASTER_COPROCESSOR_TYPE = "master";
+	private static final String REGIONAL_COPROCESSOR_TYPE = "regional";
+	private static final String REGIONAL_SERVER_COPROCESSOR_TYPE = "regionalServer";
+	@Override
+	public void start(CoprocessorEnvironment env) throws IOException {
+		if (env instanceof MasterCoprocessorEnvironment) {
+			coprocessorType = MASTER_COPROCESSOR_TYPE;
+		} else if (env instanceof RegionServerCoprocessorEnvironment) {
+			coprocessorType = REGIONAL_SERVER_COPROCESSOR_TYPE;
+		} else if (env instanceof RegionCoprocessorEnvironment) {
+			regionEnv = (RegionCoprocessorEnvironment) env;
+			coprocessorType = REGIONAL_COPROCESSOR_TYPE;
+		}
+		if (superUserList == null) {
+			superUserList = new ArrayList<String>();
+			Configuration conf = env.getConfiguration();
+			String[] users = conf.getStrings(SUPERUSER_CONFIG_PROP);
+			if (users != null) {
+				for (String user : users) {
+					user = user.trim();
+					LOG.info("Start() - Adding Super User(" + user + ")");
+					superUserList.add(user);
+				}
+			}
+		}
+		if (LOG.isDebugEnabled()) {
+			LOG.debug("Start of Coprocessor: [" + coprocessorType + "] with superUserList [" + superUserList + "]");
+		}
+	}
+	@Override
+	public void stop(CoprocessorEnvironment env) {
+	}
+	@Override
+	public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit, Durability durability) throws IOException {
+		requirePermission("put", TablePermission.Action.WRITE, c.getEnvironment(), put.getFamilyCellMap());
+	}
+	
+	@Override
+	public void postPut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit, Durability durability) {
+		byte[] tableName = getTableName(c.getEnvironment());
+		if (!isSpecialTable(tableName)) {
+			if (accessController.isAudited(tableName)) {
+				Map<byte[], List<Cell>> colf2KeyValMap = put.getFamilyCellMap() ;
+				for (byte[] colf : colf2KeyValMap.keySet()) {
+					if (colf != null) {
+						List<Cell> kvList = colf2KeyValMap.get(colf);
+						for (Cell kv : kvList) {
+							auditEvent("Put", tableName, CellUtil.cloneFamily(kv),   CellUtil.cloneQualifier(kv) , CellUtil.cloneRow(kv), CellUtil.cloneValue(kv), getActiveUser(), accessGrantedFlag);
+						}
+					}
+				}
+
+				
+			}
+		}
+	}
+	@Override
+	public void preGet(ObserverContext<RegionCoprocessorEnvironment> rEnv, Get get, List<KeyValue> keyValList) throws IOException {
+		RegionCoprocessorEnvironment e = rEnv.getEnvironment();
+		User requestUser = getActiveUser();
+
+
+		if (LOG.isDebugEnabled())
+		{
+			StringBuilder fields = new StringBuilder() ;
+			Map<byte[], NavigableSet<byte[]>> familyMap = get.getFamilyMap() ;
+			if (familyMap != null && familyMap.size() > 0) {
+				for(byte[] columnfamily : familyMap.keySet() ) {
+					if (columnfamily != null && columnfamily.length > 0) {
+						NavigableSet<byte[]> columnNameSet = familyMap.get(columnfamily) ;
+						if (columnNameSet != null && columnNameSet.size() > 0) {
+	 						for(byte[] columnname : columnNameSet) {
+								fields.append("Field[" + Bytes.toString(columnfamily) + ":" + Bytes.toString(columnname) + "],") ;
+							}
+						}
+						else {
+							fields.append("Field[" + Bytes.toString(columnfamily) + ":null],") ;
+						}
+					}
+				}
+			}
+			else {
+				if (familyMap == null){
+					fields.append("{null}") ;
+				}
+				else {
+					fields.append("{empty}") ;
+				}
+			}
+			LOG.debug("preGet is checking permission for the following fields: {" + fields.toString() + "}");
+		}
+		
+		boolean isPermGranted = isPermissionGranted("get", requestUser, TablePermission.Action.READ, e, get.getFamilyMap());
+		
+		if (!isPermGranted) {
+			isPermGranted = hasFamilyQualifierPermission(requestUser, TablePermission.Action.READ, e, get.getFamilyMap());
+			if (isPermGranted) {
+				byte[] table = getTableName(e);
+				XaSecureAccessControlFilter filter = new XaSecureAccessControlFilter(requestUser, table);
+				if (get.getFilter() != null) {
+					FilterList wrapper = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(filter, get.getFilter()));
+					get.setFilter(wrapper);
+				} else {
+					get.setFilter(filter);
+				}
+			} else {
+				throw new AccessDeniedException("Insufficient permissions (table=" + e.getRegion().getTableDesc().getNameAsString() + ", action=READ)");
+			}
+		}
+	}
+	@Override
+	public void postGet(final ObserverContext<RegionCoprocessorEnvironment> env, final Get get, final List<KeyValue> result) throws IOException {
+		HRegionInfo hri = env.getEnvironment().getRegion().getRegionInfo();
+		
+		byte[] tableName = hri.getTable().getName() ;
+		
+		if (!isSpecialTable(tableName)) {
+			try {
+				if (accessController.isAudited(tableName)) {
+					for (KeyValue kv : result) {
+						auditEvent("Get", tableName, kv.getFamily(), kv.getQualifier(), kv.getKey(), kv.getValue(), getActiveUser(), accessGrantedFlag);
+					}
+				}
+			} catch (Throwable t) {
+			}
+		}
+	}
+	
+	private void auditEvent(String eventName, byte[] tableName, byte[] columnFamilyName, byte[] qualifierName, byte[] row, byte[] value, User user, short accessFlag) {
+		auditEvent(eventName, Bytes.toString(tableName), Bytes.toString(columnFamilyName), Bytes.toString(qualifierName), row, value, user, accessFlag);
+	}
+	
+	private void auditEvent(String eventName, String tableName, String columnFamilyName, String qualifierName, byte[] row, byte[] value, User user, short accessFlag) {
+		
+		if (tableName != null && accessController.isAudited(tableName.getBytes())) {
+			
+			String resourceType = "table";
+			String resourceName = tableName;
+			if (columnFamilyName != null && columnFamilyName.length() > 0) {
+				resourceName += "/" + columnFamilyName;
+				resourceType = "columnFamily";
+			}
+			if (qualifierName != null && qualifierName.length() > 0) {
+				resourceName += "/" + qualifierName;
+				resourceType = "column";
+			}
+			
+			HBaseAuditEvent auditEvent = new HBaseAuditEvent();
+
+			auditEvent.setAclEnforcer(XaSecureModuleName);
+			auditEvent.setResourceType(resourceType);
+			auditEvent.setResourcePath(resourceName);
+			auditEvent.setAction(eventName);
+			auditEvent.setAccessType(eventName);
+			auditEvent.setUser(user == null ? XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING  : user.getShortName());
+			auditEvent.setAccessResult(accessFlag);
+			auditEvent.setClientIP(null); // TODO:
+			auditEvent.setEventTime(getUTCDate());
+			auditEvent.setRepositoryType(EnumRepositoryType.HBASE);
+			auditEvent.setRepositoryName(repositoryName);
+			
+			try {
+				if (LOG.isDebugEnabled()) {
+					LOG.debug("Writing audit log [" + auditEvent + "] - START.");
+				}
+				AuditProviderFactory.getAuditProvider().log(auditEvent);
+				if (LOG.isDebugEnabled()) {
+					LOG.debug("Writing audit log [" + auditEvent + "] - END.");
+				}
+			}
+			catch(Throwable t) {
+				LOG.error("ERROR during audit log [" + auditEvent + "]", t);
+			}
+			
+		}
+	}
+	@Override
+	public void postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName,HColumnDescriptor aHColDesc) throws IOException {
+	}
+	@Override
+	public void postCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx, NamespaceDescriptor aNamespaceDesc) throws IOException {
+	}
+	@Override
+	public void postCreateTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, HTableDescriptor arg1, HRegionInfo[] aRegionInfoList) throws IOException {
+	}
+	@Override
+	public void postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName, byte[] aColumnFamilyName) throws IOException {
+	}
+	@Override
+	public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx, String arg1) throws IOException {
+	}
+	@Override
+	public void postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> aMctx, List<HTableDescriptor> aHTableDescList) throws IOException {
+	}
+	@Override
+	public void postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName,HColumnDescriptor aHColDesc) throws IOException {
+	}
+	@Override
+	public void postModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx,NamespaceDescriptor aNamespaceDesc) throws IOException {
+	}
+	@Override
+	public void postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName,HTableDescriptor aHTableDesc) throws IOException {
+	}
+	@Override
+	public void postRegionOffline(ObserverContext<MasterCoprocessorEnvironment> aMctx, HRegionInfo aHRegInfo) throws IOException {
+	}
+	@Override
+	public void preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName,HColumnDescriptor aHColDesc) throws IOException {
+	}
+	@Override
+	public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx,NamespaceDescriptor aNamespaceDesc) throws IOException {
+	}
+	@Override
+	public void preCreateTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx,HTableDescriptor aHTableDesc, HRegionInfo[] aHRegInfoList) throws IOException {
+	}
+	@Override
+	public void preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName,byte[] aColumnFamilyName) throws IOException {
+	}
+	@Override
+	public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx, String aNamespaceName) throws IOException {
+	}
+	@Override
+	public void preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName) throws IOException {
+	}
+	@Override
+	public void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> aMctx,List<TableName> aTableNameList, List<HTableDescriptor> aHTableDescList) throws IOException {
+	}
+	@Override
+	public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> aMctx) throws IOException {
+	}
+	@Override
+	public void preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName, HColumnDescriptor aHColDesc) throws IOException {
+	}
+	@Override
+	public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> aMctx, NamespaceDescriptor aNamespaceDesc) throws IOException {
+	}
+	@Override
+	public void preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> aMctx, TableName aTableName, HTableDescriptor aHTableDesc) throws IOException {
+	}
+	@Override
+	public void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> aMctx, HRegionInfo aHRegInfo) throws IOException {
+	}
+	
+	public static Date getUTCDate() {
+		Calendar local=Calendar.getInstance();
+	    int offset = local.getTimeZone().getOffset(local.getTimeInMillis());
+	    GregorianCalendar utc = new GregorianCalendar(TimeZone.getTimeZone("GMT+0"));
+	    utc.setTimeInMillis(local.getTimeInMillis());
+	    utc.add(Calendar.MILLISECOND, -offset);
+	    return utc.getTime();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/hdfs-site-changes.cfg
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/hdfs-site-changes.cfg b/hdfs-agent/conf/hdfs-site-changes.cfg
new file mode 100644
index 0000000..26ee681
--- /dev/null
+++ b/hdfs-agent/conf/hdfs-site-changes.cfg
@@ -0,0 +1,6 @@
+#
+# This is to ensure that the dfs permission are being enabled for verification of the DFS permissions
+# XAAgents will be able to look into permission check only if these flag is set to TRUE
+#
+dfs.permissions.enabled						true	mod	create-if-not-exists
+dfs.permissions								true	mod	create-if-not-exists

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-audit-changes.cfg
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-audit-changes.cfg b/hdfs-agent/conf/xasecure-audit-changes.cfg
new file mode 100644
index 0000000..4b04f92
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-audit-changes.cfg
@@ -0,0 +1,5 @@
+xasecure.audit.jpa.javax.persistence.jdbc.url		jdbc:mysql://%XAAUDIT.DB.HOSTNAME%/%XAAUDIT.DB.DATABASE_NAME%	mod create-if-not-exists
+xasecure.audit.jpa.javax.persistence.jdbc.user		%XAAUDIT.DB.USER_NAME% 											mod create-if-not-exists
+xasecure.audit.jpa.javax.persistence.jdbc.password	%XAAUDIT.DB.PASSWORD% 											mod create-if-not-exists
+xasecure.audit.repository.name						%REPOSITORY_NAME% 												mod create-if-not-exists
+xasecure.audit.credential.provider.file     		jceks://file%CREDENTIAL_PROVIDER_FILE% 							mod create-if-not-exists

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-audit.xml
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-audit.xml b/hdfs-agent/conf/xasecure-audit.xml
new file mode 100644
index 0000000..2b24f33
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-audit.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+	<property>
+		<name>xasecure.audit.provider.factory</name>
+		<value>com.xasecure.audit.provider.AuditProviderFactory</value>
+	</property>
+
+	<!--  Properties whose name begin with "xasecure.audit." are used to configure JPA -->
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.url</name>
+		<value>jdbc:mysql://localhost:3306/xa_db</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.user</name>
+		<value>xaaudit</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.password</name>
+		<value>none</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.driver</name>
+		<value>com.mysql.jdbc.Driver</value>
+	</property>
+	
+	<property>
+		<name>xasecure.audit.credential.provider.file</name>
+		<value>jceks://file/etc/xasecure/conf/auditcred.jceks</value>
+	</property>
+	
+	<property>
+		<name>xasecure.audit.repository.name</name>
+		<value>hadoopdev</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.is.enabled</name>
+		<value>true</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.is.enabled</name>
+		<value>false</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.is.async</name>
+		<value>false</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.log4j.async.max.queue.size</name>
+		<value>10240</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.async.max.flush.interval.ms</name>
+		<value>30000</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.is.enabled</name>
+		<value>true</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.is.async</name>
+		<value>false</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.async.max.queue.size</name>
+		<value>10240</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.db.async.max.flush.interval.ms</name>
+		<value>30000</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.db.batch.size</name>
+		<value>100</value>
+	</property>	
+	
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-hadoop-env.sh b/hdfs-agent/conf/xasecure-hadoop-env.sh
new file mode 100644
index 0000000..95e00c6
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-hadoop-env.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+echo "$0" | grep -q beeswax_server.sh > /dev/null 2>&1
+if [ $? -ne 0 ]
+then
+	XASECURE_AGENT_PATH="`ls -1 /usr/lib/hadoop/lib/hdfs-agent-*.jar | head -1`"
+	if [ -f "${XASECURE_AGENT_PATH}" ]
+	then
+	    if [ "${XASECURE_INIT}" != "0" ]
+	    then
+	        XASECURE_INIT="0"
+	        XASECURE_AGENT_OPTS=" -javaagent:${XASECURE_AGENT_PATH}=authagent "
+	        echo ${HADOOP_NAMENODE_OPTS} | grep -q -- "${XASECURE_AGENT_OPTS}" > /dev/null 2>&1
+	        if [ $? -ne 0 ]
+	        then
+	                export HADOOP_NAMENODE_OPTS=" ${XASECURE_AGENT_OPTS} ${HADOOP_NAMENODE_OPTS} "
+	                export HADOOP_SECONDARYNAMENODE_OPTS=" ${XASECURE_AGENT_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
+	        fi
+	    fi
+	else
+	    echo "ERROR: XASecure Agent is not located at [${XASECURE_AGENT_PATH}]. Exiting ..."
+	    #exit 0
+	fi
+fi

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-hdfs-security-changes.cfg
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-hdfs-security-changes.cfg b/hdfs-agent/conf/xasecure-hdfs-security-changes.cfg
new file mode 100644
index 0000000..ff6fdef
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-hdfs-security-changes.cfg
@@ -0,0 +1,10 @@
+#
+# Change the original policy parameter to work with policy manager based.
+# 
+#
+hdfs.authorization.verifier.classname				com.xasecure.pdp.hdfs.XASecureAuthorizer								mod	create-if-not-exists
+xasecure.hdfs.policymgr.url							%POLICY_MGR_URL%/service/assets/policyList/%REPOSITORY_NAME% 			mod create-if-not-exists
+xasecure.hdfs.policymgr.url.saveAsFile				/tmp/hadoop_%REPOSITORY_NAME%_json  									mod create-if-not-exists
+xasecure.hdfs.policymgr.url.laststoredfile			%POLICY_CACHE_FILE_PATH%/hadoop_%REPOSITORY_NAME%_json 					mod create-if-not-exists
+xasecure.hdfs.policymgr.url.reloadIntervalInMillis 	30000 																	mod create-if-not-exists
+xasecure.hdfs.policymgr.ssl.config					/etc/hadoop/conf/xasecure-policymgr-ssl.xml								mod create-if-not-exists

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-hdfs-security.xml b/hdfs-agent/conf/xasecure-hdfs-security.xml
new file mode 100644
index 0000000..d968453
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-hdfs-security.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+	<!--  The following property is used to select appropriate XASecure Authorizer Module (filebased, policymanager based) -->
+	<property>
+		<name>hdfs.authorization.verifier.classname</name>
+		<value>com.xasecure.pdp.hdfs.XASecureAuthorizer</value>
+		<description>
+			Class Name of the authorization Module 
+		</description>
+	</property>
+
+	<!-- The following properties are used only when PolicyManager is used as 
+		main storage for all policy -->
+	<property>
+		<name>xasecure.hdfs.policymgr.url</name>
+		<value>http://policymanagerhost:port/service/assets/hadoopdev</value>
+		<description>
+			Location where XASecure Role Based Authorization Info is
+			located.
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hdfs.policymgr.url.saveAsFile</name>
+		<value>/tmp/xasecure-hdfs-policy.json</value>
+		<description>
+			Location where XASecure Role Based Authorization Info is
+			saved after successful retrieval from policymanager
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hdfs.policymgr.url.laststoredfile</name>
+		<value>/home/hdfs/last_xasecure-hdfs-policy.json</value>
+		<description>
+			Location and file where last XASecure Role Based Authorization Info
+		    is saved after successful retrieval from policymanager.
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hdfs.policymgr.url.reloadIntervalInMillis</name>
+		<value>30000</value>
+		<description>
+			How often do we need to verify the changes tothe
+			authorization url,
+			to reload to memory (reloaded only if there are
+			changes)
+		</description>
+	</property>
+	
+	<property>
+		<name>xasecure.add-hadoop-authorization</name>
+		<value>true</value>
+		<description>
+			Enable/Disable the default hadoop authorization (based on
+			rwxrwxrwx permission on
+			the resource) if the XASecure Authorization
+			fails.
+		</description>
+	</property>
+
+	<!--  The following field are used to customize the audit logging feature -->
+
+	<!-- 
+	<property>
+		<name>xasecure.auditlog.fieldDelimiterString</name>
+		<value>@</value>
+		<description> Audit Log field delimiters </description>
+	</property>
+	<property>
+		<name>xasecure.auditlog.xasecureAcl.name</name>
+		<value>xasecure-acl</value>
+		<description> The module name listed in the auditlog when the
+			permission
+			check is done by XASecureACL
+		</description>
+	</property>
+	<property>
+		<name>xasecure.auditlog.hadoopAcl.name</name>
+		<value>hadoop-acl</value>
+		<description> The module name listed in the auditlog
+			when the permission check is done by HadoopACL
+		</description>
+	</property>
+	<property>
+		<name>xasecure.auditlog.accessgranted.text</name>
+		<value>granted</value>
+		<description> The text to be written in audit log when access is
+			granted
+		</description>
+	</property>
+	<property>
+		<name>xasecure.auditlog.accessdenied.text</name>
+		<value>denied</value>
+		<description> The text to be written in audit log when
+			access is denied
+		</description>
+	</property>
+	<property>
+		<name>xasecure.auditlog.hdfs.excludeusers</name>
+		<value>hbase,hive</value>
+		<description> List of comma separated users for
+			whom the audit log is not written
+		</description>
+	</property>
+	-->
+	
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-policymgr-ssl-changes.cfg
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-policymgr-ssl-changes.cfg b/hdfs-agent/conf/xasecure-policymgr-ssl-changes.cfg
new file mode 100644
index 0000000..184e334
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-policymgr-ssl-changes.cfg
@@ -0,0 +1,9 @@
+#
+# SSL Params
+#
+xasecure.policymgr.clientssl.keystore					 %SSL_KEYSTORE_FILE_PATH%						mod create-if-not-exists
+xasecure.policymgr.clientssl.keystore.password			 %SSL_KEYSTORE_PASSWORD%						mod create-if-not-exists
+xasecure.policymgr.clientssl.keystore.credential.file	 jceks://file%CREDENTIAL_PROVIDER_FILE%			mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore				     %SSL_TRUSTSTORE_FILE_PATH%						mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore.password	     %SSL_TRUSTSTORE_PASSWORD%						mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore.credential.file  jceks://file%CREDENTIAL_PROVIDER_FILE%         mod create-if-not-exists						

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/conf/xasecure-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/hdfs-agent/conf/xasecure-policymgr-ssl.xml b/hdfs-agent/conf/xasecure-policymgr-ssl.xml
new file mode 100644
index 0000000..df1cf59
--- /dev/null
+++ b/hdfs-agent/conf/xasecure-policymgr-ssl.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+	<!--  The following properties are used for 2-way SSL client server validation -->
+	<property>
+		<name>xasecure.policymgr.clientssl.keystore</name>
+		<value>hadoopdev-clientcert.jks</value>
+		<description> 
+			Java Keystore files 
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.keystore.password</name>
+		<value>none</value>
+		<description> 
+			password for keystore 
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore</name>
+		<value>cacerts-xasecure.jks</value>
+		<description> 
+			java truststore file
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore.password</name>
+		<value>none</value>
+		<description> 
+			java  truststore password
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+		<value>jceks://file/tmp/keystore-hadoopdev-ssl.jceks</value>
+		<description> 
+			java  keystore credential file
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+		<value>jceks://file/tmp/truststore-hadoopdev-ssl.jceks</value>
+		<description> 
+			java  truststore credential file
+		</description>
+	</property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/pom.xml
----------------------------------------------------------------------
diff --git a/hdfs-agent/pom.xml b/hdfs-agent/pom.xml
new file mode 100644
index 0000000..0770c91
--- /dev/null
+++ b/hdfs-agent/pom.xml
@@ -0,0 +1,99 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>security_agents.hdfs-agent</groupId>
+  <artifactId>hdfs-agent</artifactId>
+  <name>Hdfs Security Agent</name>
+  <description>Hdfs Security Agents</description>
+  <packaging>jar</packaging>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+  <parent>
+     <groupId>com.hortonworks.hadoop.security</groupId>
+     <artifactId>argus</artifactId>
+     <version>3.5.000</version>
+     <relativePath>..</relativePath>
+  </parent>
+  <dependencies>
+    <dependency>
+	<groupId>com.google.code.gson</groupId>
+	<artifactId>gson</artifactId>
+	<version>${gson.version}</version>
+    </dependency>
+    <dependency>
+	<groupId>commons-logging</groupId>
+	<artifactId>commons-logging</artifactId>
+	<version>${commons.logging.version}</version>
+    </dependency>
+    <dependency>
+	<groupId>org.apache.hadoop</groupId>
+	<artifactId>hadoop-common</artifactId>
+	<version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+	<groupId>org.apache.hadoop</groupId>
+	<artifactId>hadoop-hdfs</artifactId>
+	<version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+	<groupId>javassist</groupId>
+	<artifactId>javassist</artifactId>
+	<version>${javassist.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>security_agents.agents-audit</groupId>
+      <artifactId>agents-audit</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>security_agents.agents-common</groupId>
+      <artifactId>agents-common</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+  </dependencies>
+  <build>
+	<!--
+  	<pluginManagement>
+	-->
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <version>2.3.2</version>
+        <configuration>
+          <archive>
+            <index>true</index>
+	    <manifestFile>
+		src/main/resources/META-INF/MANIFEST.MF
+	    </manifestFile>
+            <manifest>
+              <addDefaultImplementationEntries/>
+            </manifest>
+          </archive>
+        </configuration>
+      </plugin>
+	  <plugin>
+		<groupId>org.apache.maven.plugins</groupId>
+		<artifactId>maven-shade-plugin</artifactId>
+		<executions>
+			<execution>
+				<phase>package</phase>
+				<goals>
+					<goal>shade</goal>
+				</goals>
+			</execution>
+		</executions>
+		<configuration>
+			<artifactSet>
+				<includes>
+					<include>javassist:javassist:jar:</include>
+				</includes>
+			</artifactSet>
+		</configuration>
+	  </plugin>
+    </plugins>
+	<!--
+    </pluginManagement>
+	-->
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/scripts/install.properties
----------------------------------------------------------------------
diff --git a/hdfs-agent/scripts/install.properties b/hdfs-agent/scripts/install.properties
new file mode 100644
index 0000000..e851391
--- /dev/null
+++ b/hdfs-agent/scripts/install.properties
@@ -0,0 +1,80 @@
+#
+# Location of Policy Manager URL  
+#
+#
+# Example:
+# POLICY_MGR_URL=http://policymanager.xasecure.net:6080
+#
+
+POLICY_MGR_URL=
+
+#
+# Location of mysql client library (please check the location of the jar file)
+#
+MYSQL_CONNECTOR_JAR=/usr/share/java/mysql-connector-java.jar
+
+#
+# This is the repository name created within policy manager
+#
+# Example:
+# REPOSITORY_NAME=hadoopdev
+#
+
+REPOSITORY_NAME=
+
+
+#
+# AUDIT DB Configuration
+# 
+#  This information should match with the one you specified during the PolicyManager Installation
+# 
+# Example:
+# XAAUDIT.DB.HOSTNAME=localhost
+# XAAUDIT.DB.DATABASE_NAME=xasecure
+# XAAUDIT.DB.USER_NAME=xalogger
+# XAAUDIT.DB.PASSWORD=xalogger
+
+
+
+XAAUDIT.DB.HOSTNAME=
+XAAUDIT.DB.DATABASE_NAME=
+XAAUDIT.DB.USER_NAME=
+XAAUDIT.DB.PASSWORD=
+
+#
+# Credential Provider File Path
+#
+# CREDENTIAL_PROVIDER_FILE=/etc/xasecure/conf/{repoName}-credstore.jceks
+#
+
+CREDENTIAL_PROVIDER_FILE=
+
+
+#
+# POLICY CACHE FILE PATH
+# 
+# This information is used to configure the path where the policy cache is stored.
+# 
+# Example:
+# POLICY_CACHE_FILE_PATH=/home/hdfs
+# 
+
+POLICY_CACHE_FILE_PATH=
+
+#
+# SSL Client Certificate Information
+#
+# Example:
+# SSL_KEYSTORE_FILE_PATH=/etc/xasecure/conf/xasecure-hadoop-client.jks
+# SSL_KEYSTORE_PASSWORD=none
+# SSL_TRUSTSTORE_FILE_PATH=/etc/xasecure/conf/xasecure-truststore.jks
+# SSL_TRUSTSTORE_PASSWORD=none
+
+#
+# You do not need use SSL between agent and security admin tool, please leave these sample value as it is.
+#
+
+SSL_KEYSTORE_FILE_PATH=agentKey.jks
+SSL_KEYSTORE_PASSWORD=myKeyFilePassword
+SSL_TRUSTSTORE_FILE_PATH=cacert
+SSL_TRUSTSTORE_PASSWORD=changeit


Mime
View raw message