ranger-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject [38/44] ARGUS-1. Initial code commit (Selvamohan Neethiraj via omalley)
Date Thu, 14 Aug 2014 20:50:49 GMT
http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/scripts/install.sh
----------------------------------------------------------------------
diff --git a/hdfs-agent/scripts/install.sh b/hdfs-agent/scripts/install.sh
new file mode 100644
index 0000000..59c248f
--- /dev/null
+++ b/hdfs-agent/scripts/install.sh
@@ -0,0 +1,272 @@
+#!/bin/bash
+
+function create_jceks()
+{
+
+alias=$1
+pass=$2
+jceksFile=$3
+
+ret=`hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile} 2>&1`
+res=`echo $ret | grep 'already exist'`
+
+if ! [ "${res}" == "" ]
+then
+   echo "Credential file already exists,recreating the file..."
+   hadoop credential delete ${alias} --provider jceks://file${jceksFile}
+   hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile}
+fi
+}
+
+hdp_dir=/usr/lib/hadoop
+hdp_lib_dir=/usr/lib/hadoop/lib
+hdp_conf_dir=/etc/hadoop/conf
+
+if [ ! -w /etc/passwd ]
+then
+	echo "ERROR: $0 script should be run as root."
+	exit 0
+fi
+
+export CONFIG_FILE_OWNER="hdfs:hadoop"
+
+if [ ! -d "${hdp_dir}" ]
+then
+	echo "ERROR: Invalid HADOOP HOME Directory: [${hdp_dir}]. Exiting ..."
+	exit 1
+fi
+
+
+#echo "Hadoop Configuration Path: ${hdp_conf_dir}"
+
+if [ ! -f ${hdp_conf_dir}/hadoop-env.sh ]
+then
+	echo '#!/bin/bash' > ${hdp_conf_dir}/hadoop-env.sh
+	chown $CONFIG_FILE_OWNER ${hdp_conf_dir}/hadoop-env.sh
+fi
+
+install_dir=`dirname $0`
+
+[ "${install_dir}" = "." ] && install_dir=`pwd`
+
+
+#verify mysql-connector path is valid
+MYSQL_CONNECTOR_JAR=`grep '^MYSQL_CONNECTOR_JAR'  ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+echo "[I] Checking MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR" 
+if test -f "$MYSQL_CONNECTOR_JAR"; then
+	echo "[I] MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR file found" 
+else
+	echo "[E] MYSQL CONNECTOR FILE : $MYSQL_CONNECTOR_JAR does not exists" ; exit 1;
+fi
+#copying mysql connector jar file to lib directory
+cp $MYSQL_CONNECTOR_JAR ${install_dir}/lib
+
+#echo "Current Install Directory: [${install_dir}]"
+
+#
+# --- Backup current configuration for backup - START
+#
+
+COMPONENT_NAME=hadoop
+
+XASECURE_VERSION=`cat ${install_dir}/version`
+
+CFG_DIR=${hdp_conf_dir}
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+
+backup_dt=`date '+%Y%m%d%H%M%S'`
+
+if [ -d "${PRE_INSTALL_CONFIG}" ]
+then
+	PRE_INSTALL_CONFIG="${PRE_INSTALL_CONFIG}.${backup_dt}"
+fi
+
+if [ -d ${CFG_DIR} ]
+then
+	( cd ${CFG_DIR} ; find . -print | cpio -pdm ${PRE_INSTALL_CONFIG} )
+	[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-${backup_dt}
+	echo ${XASECURE_VERSION} > ${CUR_VERSION_FILE}
+else
+	echo "ERROR: Unable to find configuration directory: [${CFG_DIR}]"
+	exit 1
+fi
+
+cp -f ${install_dir}/uninstall.sh ${XASECURE_ROOT}/
+
+#
+# --- Backup current configuration for backup  - END
+#
+
+
+dt=`date '+%Y%m%d%H%M%S'`
+for f in ${install_dir}/conf/*
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		if [ ! -f ${hdp_conf_dir}/${fn} ]
+		then
+			echo "+cp ${f} ${hdp_conf_dir}/${fn}"
+			cp ${f} ${hdp_conf_dir}/${fn}
+		else
+			echo "WARN: ${fn} already exists in the ${hdp_conf_dir} - Using existing configuration ${fn}"
+		fi
+	fi
+done
+
+XASECURE_PERM_FILE="${hdp_conf_dir}/xasecure-hadoop-permissions.txt"
+
+if [ -f ${XASECURE_PERM_FILE} ]
+then
+	echo "+chown hdfs:root ${XASECURE_PERM_FILE}"
+	chown hdfs:root ${XASECURE_PERM_FILE}
+	echo "+chmod 0640      ${XASECURE_PERM_FILE}"
+	chmod 0640      ${XASECURE_PERM_FILE}
+fi
+
+if [ -f ${hdp_conf_dir}/xasecure-hadoop-env.sh ]
+then
+	echo "+chmod a+rx ${hdp_conf_dir}/xasecure-hadoop-env.sh"
+	chmod a+rx ${hdp_conf_dir}/xasecure-hadoop-env.sh
+fi
+
+
+#echo "Hadoop XASecure Library Path: ${hdp_lib_dir}"
+
+if [ ! -d ${hdp_lib_dir} ]
+then
+	echo "+mkdir -p ${hdp_lib_dir}"
+	mkdir -p ${hdp_lib_dir}
+fi
+
+for f in ${install_dir}/dist/*.jar
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		echo "+cp ${f} ${hdp_lib_dir}/${fn}"
+		cp ${f} ${hdp_lib_dir}/${fn}
+	fi
+done
+
+for f in ${install_dir}/lib/*.jar
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		echo "+cp ${f} ${hdp_lib_dir}/${fn}"
+		cp ${f} ${hdp_lib_dir}/${fn}
+	fi
+done
+
+
+if [ -f ${hdp_conf_dir}/hadoop-env.sh  ]
+then
+	grep -q 'xasecure-hadoop-env.sh' ${hdp_conf_dir}/hadoop-env.sh  > /dev/null 2>&1
+	if [ $? -ne 0 ]
+	then
+		echo "+Adding line: . ${hdp_conf_dir}/xasecure-hadoop-env.sh to file: ${hdp_conf_dir}/hadoop-env.sh"
+		echo ". ${hdp_conf_dir}/xasecure-hadoop-env.sh" >> ${hdp_conf_dir}/hadoop-env.sh
+	fi
+fi
+
+
+CredFile=`grep '^CREDENTIAL_PROVIDER_FILE' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+if ! [ `echo ${CredFile} | grep '^/.*'` ]
+then
+  echo "Please enter the Credential File Store with proper file path"
+  exit 1
+fi
+
+#
+# Generate Credential Provider file and Credential for Audit DB access.
+#
+
+
+auditCredAlias="auditDBCred"
+
+auditdbCred=`grep '^XAAUDIT.DB.PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${auditCredAlias} ${auditdbCred} ${CredFile}
+
+
+#
+# Generate Credential Provider file and Credential for SSL KEYSTORE AND TRUSTSTORE
+#
+
+
+sslkeystoreAlias="sslKeyStore"
+
+sslkeystoreCred=`grep '^SSL_KEYSTORE_PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${sslkeystoreAlias} ${sslkeystoreCred} ${CredFile}
+
+
+ssltruststoreAlias="sslTrustStore"
+
+ssltruststoreCred=changeit
+
+create_jceks ${ssltruststoreAlias} ${ssltruststoreCred} ${CredFile}
+
+chown ${CONFIG_FILE_OWNER} ${CredFile} 
+
+
+#
+# Modify the XML files needed to support the 
+#
+PROP_ARGS="-p  ${install_dir}/install.properties"
+
+for f in ${install_dir}/installer/conf/*-changes.cfg
+do
+        if [ -f ${f} ]
+        then
+                fn=`basename $f`
+                orgfn=`echo $fn | sed -e 's:-changes.cfg:.xml:'`
+                fullpathorgfn="${hdp_conf_dir}/${orgfn}"
+                if [ ! -f ${fullpathorgfn} ]
+                then
+                        echo "ERROR: Unable to find ${fullpathorgfn}"
+                        exit 1
+                fi
+                archivefn="${hdp_conf_dir}/.${orgfn}.${dt}"
+                newfn="${hdp_conf_dir}/.${orgfn}-new.${dt}"
+                cp ${fullpathorgfn} ${archivefn}
+                if [ $? -eq 0 ]
+                then
+                	cp="${install_dir}/installer/lib/*:/usr/lib/hadoop/*:/usr/lib/hadoop/lib/*"
+                        java -cp "${cp}" com.xasecure.utils.install.XmlConfigChanger -i ${archivefn} -o ${newfn} -c ${f} ${PROP_ARGS}
+                        if [ $? -eq 0 ]
+                        then
+                                diff -w ${newfn} ${fullpathorgfn} > /dev/null 2>&1 
+                                if [ $? -ne 0 ]
+                                then
+	                        	#echo "Changing config file:  ${fullpathorgfn} with following changes:"
+	                                #echo "==============================================================="
+	                                #diff -w ${newfn} ${fullpathorgfn}
+	                                #echo "==============================================================="
+	                                echo "NOTE: Current config file: ${fullpathorgfn} is being saved as ${archivefn}"
+	                                #echo "==============================================================="
+	                                cp ${newfn} ${fullpathorgfn}
+	                            fi
+                        else
+                                echo "ERROR: Unable to make changes to config. file: ${fullpathorgfn}"
+                                echo "exiting ...."
+                                exit 1
+                        fi
+                else
+                        echo "ERROR: Unable to save config. file: ${fullpathorgfn}  to ${archivefn}"
+                        echo "exiting ...."
+                        exit 1
+                fi
+        fi
+done
+
+chmod go-rwx ${hdp_conf_dir}/xasecure-policymgr-ssl.xml
+
+chown ${CONFIG_FILE_OWNER} ${hdp_conf_dir}/xasecure-policymgr-ssl.xml
+
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/scripts/uninstall.sh
----------------------------------------------------------------------
diff --git a/hdfs-agent/scripts/uninstall.sh b/hdfs-agent/scripts/uninstall.sh
new file mode 100644
index 0000000..c335e27
--- /dev/null
+++ b/hdfs-agent/scripts/uninstall.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+COMPONENT_NAME=hadoop
+CFG_DIR=/etc/${COMPONENT_NAME}/conf
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+if [ -f ${CUR_VERSION_FILE} ]
+then
+	XASECURE_VERSION=`cat ${CUR_VERSION_FILE}`
+	PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ -d "${PRE_INSTALL_CONFIG}" ]
+	then
+		[ -d ${CFG_DIR} ] && mv ${CFG_DIR} ${CFG_DIR}-${dt}
+		( cd ${PRE_INSTALL_CONFIG} ; find . -print | cpio -pdm ${CFG_DIR} )
+		[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-uninstalled-${dt}
+		echo "XASecure version - ${XASECURE_VERSION} has been uninstalled successfully."
+	else
+		echo "ERROR: Unable to find pre-install configuration directory: [${PRE_INSTALL_CONFIG}]"
+		exit 1
+	fi
+else
+	cd ${CFG_DIR}
+	saved_files=`find . -type f -name '.*' |  sort | grep -v -- '-new.' | grep '[0-9]*$' | grep -v -- '-[0-9]*$' | sed -e 's:\.[0-9]*$::' | sed -e 's:^./::' | sort -u`
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ "${saved_files}" != "" ]
+	then
+	        for f in ${saved_files}
+	        do
+	                oldf=`ls ${f}.[0-9]* | sort | head -1`
+	                if [ -f "${oldf}" ]
+	                then
+	                        nf=`echo ${f} | sed -e 's:^\.::'`
+	                        if [ -f "${nf}" ]
+	                        then
+	                                echo "+cp -p ${nf} .${nf}-${dt}"
+	                                cp -p ${nf} .${nf}-${dt}
+	                                echo "+cp ${oldf} ${nf}"
+	                                cp ${oldf} ${nf}
+	                        else
+	                                echo "ERROR: ${nf} not found to save. However, old file is being recovered."
+	                                echo "+cp -p ${oldf} ${nf}"
+	                                cp -p ${oldf} ${nf}
+	                        fi
+	                fi
+	        done
+	        echo "XASecure configuration has been uninstalled successfully."
+	fi
+fi

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifier.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifier.java b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifier.java
new file mode 100644
index 0000000..e902452
--- /dev/null
+++ b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifier.java
@@ -0,0 +1,35 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hadoop;
+
+import java.util.Set;
+
+public interface HDFSAccessVerifier {
+	public class AccessContext {
+		String agentId;
+		int repositoryType;
+		String sessionId;
+		String clientType;
+		String clientIP;
+		String requestData;
+	}
+	
+	public boolean isAccessGranted(String aPathName, String aPathOwnerName, String access, String username, Set<String> groups);
+	public boolean isAuditLogEnabled(String aPathName) ;
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifierFactory.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifierFactory.java b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifierFactory.java
new file mode 100644
index 0000000..8e3649f
--- /dev/null
+++ b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/HDFSAccessVerifierFactory.java
@@ -0,0 +1,59 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hadoop;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.xasecure.authorization.hadoop.config.XaSecureConfiguration;
+import com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants;
+
+public class HDFSAccessVerifierFactory {
+	
+	private static final Log LOG = LogFactory.getLog(HDFSAccessVerifierFactory.class) ;
+
+	private static HDFSAccessVerifier hdfsAccessVerifier = null ;
+	
+	public static HDFSAccessVerifier getInstance() {
+		if (hdfsAccessVerifier == null) {
+			synchronized(HDFSAccessVerifierFactory.class) {
+				HDFSAccessVerifier temp = hdfsAccessVerifier ;
+				if (temp == null) {
+					
+					String hdfsAccessVerifierClassName = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.HDFS_ACCESS_VERIFIER_CLASS_NAME_PROP, XaSecureHadoopConstants.HDFS_ACCESS_VERIFIER_CLASS_NAME_DEFAULT_VALUE) ;
+					if (hdfsAccessVerifierClassName != null) {
+						try {
+							hdfsAccessVerifierClassName = hdfsAccessVerifierClassName.trim();
+							hdfsAccessVerifier = (HDFSAccessVerifier) (Class.forName(hdfsAccessVerifierClassName).newInstance()) ;
+							LOG.info("Created a new instance of class: [" + hdfsAccessVerifierClassName + "] for HDFS Access verification.");
+						} catch (InstantiationException e) {
+							LOG.error("Unable to create HdfsAccessVerifier Verifier: [" +  hdfsAccessVerifierClassName + "]", e);
+						} catch (IllegalAccessException e) {
+							LOG.error("Unable to create HdfsAccessVerifier Verifier: [" +  hdfsAccessVerifierClassName + "]", e);
+						} catch (ClassNotFoundException e) {
+							LOG.error("Unable to create HdfsAccessVerifier Verifier: [" +  hdfsAccessVerifierClassName + "]", e);
+						}
+					}
+				}
+			}
+		}
+		return hdfsAccessVerifier ;
+		
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/AuthCodeInjectionJavaAgent.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/AuthCodeInjectionJavaAgent.java b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/AuthCodeInjectionJavaAgent.java
new file mode 100644
index 0000000..16416d8
--- /dev/null
+++ b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/AuthCodeInjectionJavaAgent.java
@@ -0,0 +1,32 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hadoop.agent;
+
+import java.lang.instrument.Instrumentation;
+
+public class AuthCodeInjectionJavaAgent {
+	public static final String AUTHORIZATION_AGENT_PARAM = "authagent";
+
+	public static void premain(String agentArgs, Instrumentation inst) {
+		if (agentArgs != null && AUTHORIZATION_AGENT_PARAM.equalsIgnoreCase(agentArgs.trim())) {
+			inst.addTransformer(new HadoopAuthClassTransformer());
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/HadoopAuthClassTransformer.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/HadoopAuthClassTransformer.java b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/HadoopAuthClassTransformer.java
new file mode 100644
index 0000000..e0a4ba0
--- /dev/null
+++ b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/agent/HadoopAuthClassTransformer.java
@@ -0,0 +1,196 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hadoop.agent;
+
+import java.io.IOException;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.security.ProtectionDomain;
+
+import javassist.CannotCompileException;
+import javassist.ClassPool;
+import javassist.CtClass;
+import javassist.CtMethod;
+import javassist.NotFoundException;
+
+public class HadoopAuthClassTransformer implements ClassFileTransformer {
+
+	byte[] transformedClassByteCode = null ;
+	
+	@Override
+	public byte[] transform(ClassLoader aClassLoader, String aClassName, Class<?> aClassBeingRedefined, ProtectionDomain aProtectionDomain, byte[] aClassFileBuffer) throws IllegalClassFormatException {
+
+		byte[] byteCode = aClassFileBuffer;
+		if (aClassName.equals("org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker")) {
+			System.out.println("Injection code is Invoked in JVM [" + Runtime.getRuntime() + "] for class [" + aClassBeingRedefined + "] ....");
+			try {
+				if (transformedClassByteCode == null) {
+					ClassPool cp = ClassPool.getDefault();
+					String curClassName = aClassName.replaceAll("/", ".");
+					CtClass curClass = cp.get(curClassName);
+					
+					
+					CtClass inodeClass = null, snapShotClass = null, fsActionClass = null  ;
+					String paramClassName = null ;
+					
+					try {
+						paramClassName = "org.apache.hadoop.hdfs.server.namenode.INode" ;
+						inodeClass = cp.get(paramClassName) ;
+					} catch (javassist.NotFoundException nfe) {
+						System.err.println("Unable to find Class for [" + paramClassName + "]" + nfe) ;
+						inodeClass = null ;
+					}
+
+
+					try {
+						paramClassName = "org.apache.hadoop.fs.permission.FsAction" ;
+						fsActionClass = cp.get(paramClassName) ;
+					} catch (javassist.NotFoundException nfe) {
+						System.err.println("Unable to find Class for [" + paramClassName + "]" + nfe) ;
+						fsActionClass = null ;
+					}
+					
+					try {
+						paramClassName = "org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot" ;
+						snapShotClass = cp.get(paramClassName) ;
+					} catch (javassist.NotFoundException nfe) {
+						System.err.println("Unable to find Class for [" + paramClassName + "]" + nfe) ;
+						snapShotClass = null ;
+					}
+					
+					boolean injected = false ;
+					boolean injected_cm = false ;
+					boolean withIntParamInMiddle = false ;
+
+					
+					try {
+						
+						CtClass[] paramArgs = null ;
+						
+						if (inodeClass != null && fsActionClass != null) {
+
+							CtMethod checkMethod = null ;
+							
+							if (snapShotClass != null) {
+								paramArgs = new CtClass[] { inodeClass, snapShotClass, fsActionClass } ;
+								try {
+									checkMethod = curClass.getDeclaredMethod("check", paramArgs);
+								}
+								catch(NotFoundException SSnfe) {
+									System.out.println("Unable to find check method with snapshot class. Trying to find check method without snapshot support.") ;
+									snapShotClass = null;
+									paramArgs = new CtClass[] { inodeClass, CtClass.intType,  fsActionClass } ;
+									checkMethod = curClass.getDeclaredMethod("check", paramArgs);
+									withIntParamInMiddle = true ;
+									System.out.println("Found method check() - without snapshot support") ;
+								}
+							}
+							else {
+								System.out.println("Snapshot class was already null ... Trying to find check method") ;
+								paramArgs = new CtClass[] { inodeClass, fsActionClass } ;
+								checkMethod = curClass.getDeclaredMethod("check", paramArgs);
+								System.out.println("Found method check() - without snapshot support") ;
+							}
+						
+							if (checkMethod != null) {
+								if (snapShotClass == null && (!withIntParamInMiddle)) {
+									checkMethod.insertAfter("org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.logHadoopEvent(ugi,$1,$2,true) ;");
+									CtClass throwable = ClassPool.getDefault().get("java.lang.Throwable");
+									checkMethod.addCatch("{ org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.logHadoopEvent(ugi,$1,$2,false) ; throw $e; }", throwable);
+									checkMethod.insertBefore("{ if ( org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.check(ugi,$1,$2) ) { return ; } }");
+								}
+								else {
+									checkMethod.insertAfter("org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.logHadoopEvent(ugi,$1,$3,true) ;");
+									CtClass throwable = ClassPool.getDefault().get("java.lang.Throwable");
+									checkMethod.addCatch("{ org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.logHadoopEvent(ugi,$1,$3,false) ; throw $e; }", throwable);	
+									checkMethod.insertBefore("{ if ( org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.check(ugi,$1,$3) ) { return ; } }");
+								}
+								System.out.println("Injection of code is successfull ....");
+							}
+							else {
+								System.out.println("Injection failed. Unable to identify check() method on class: [" + curClass.getName() + "]. Continue without Injection ...") ; 
+							}
+							
+							injected = true ;
+						}
+					} catch (NotFoundException nfex) {
+						nfex.printStackTrace();
+						System.out.println("Unable to find the check() method with expected params in [" + aClassName + "] ....");
+						for (CtMethod m : curClass.getDeclaredMethods()) {
+							System.err.println("Found Method: " + m);
+						}
+					}
+					
+					
+					try {
+						
+						CtMethod checkMethod = curClass.getDeclaredMethod("checkPermission");
+						
+						if (checkMethod != null) {
+							checkMethod.insertBefore("org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.checkPermissionPre($1) ;");
+							checkMethod.insertAfter("org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.writeLog($1) ;");
+							CtClass throwable = ClassPool.getDefault().get("org.apache.hadoop.security.AccessControlException");
+							checkMethod.addCatch("{ org.apache.hadoop.hdfs.server.namenode.XaSecureFSPermissionChecker.writeLog($1); throw $e; }", throwable);	
+							injected_cm = true ;
+						}
+
+					} catch (NotFoundException nfe) {
+						nfe.printStackTrace();
+						System.out.println("Unable to find the checkPermission() method with expected params in [" + aClassName + "] ....");
+						for (CtMethod m : curClass.getDeclaredMethods()) {
+							System.err.println("Found Method: " + m);
+						}
+					}
+					
+					System.out.println("Injected: " + injected + ", Injected_CheckMethod: " + injected_cm ) ;
+					
+					if (injected) {
+						byteCode = curClass.toBytecode();
+						if (transformedClassByteCode == null) {
+							synchronized(HadoopAuthClassTransformer.class) {
+								byte[] temp = transformedClassByteCode ;
+								if (temp == null) {
+									transformedClassByteCode = byteCode;
+								}
+							}
+						}
+					}
+					
+				}
+				else {
+					byteCode = transformedClassByteCode;
+					System.out.println("Injection of code (using existing bytecode) is successfull ....");
+				}
+			} catch (NotFoundException e) {
+				System.err.println("Class Not Found Exception for class Name: " + aClassName + " Exception: " + e);
+				e.printStackTrace();
+			} catch (CannotCompileException e) {
+				System.err.println("Can not compile Exception for class Name: " + aClassName + " Exception: " + e);
+				e.printStackTrace();
+			} catch (IOException e) {
+				System.err.println("IO Exception for class Name: " + aClassName + " Exception: " + e);
+				e.printStackTrace();
+			}
+		
+		}
+		
+		return byteCode;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/exceptions/XaSecureAccessControlException.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/exceptions/XaSecureAccessControlException.java b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/exceptions/XaSecureAccessControlException.java
new file mode 100644
index 0000000..f35674f
--- /dev/null
+++ b/hdfs-agent/src/main/java/com/xasecure/authorization/hadoop/exceptions/XaSecureAccessControlException.java
@@ -0,0 +1,32 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package com.xasecure.authorization.hadoop.exceptions;
+
+import org.apache.hadoop.security.AccessControlException;
+
+
+public class XaSecureAccessControlException extends AccessControlException {
+
+	private static final long serialVersionUID = -4673975720243484927L;
+
+	public XaSecureAccessControlException(String aMsg) {
+		super(aMsg) ;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/java/org/apache/hadoop/hdfs/server/namenode/XaSecureFSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/java/org/apache/hadoop/hdfs/server/namenode/XaSecureFSPermissionChecker.java b/hdfs-agent/src/main/java/org/apache/hadoop/hdfs/server/namenode/XaSecureFSPermissionChecker.java
new file mode 100644
index 0000000..fedab58
--- /dev/null
+++ b/hdfs-agent/src/main/java/org/apache/hadoop/hdfs/server/namenode/XaSecureFSPermissionChecker.java
@@ -0,0 +1,321 @@
+/**************************************************************************
+ *                                                                        *
+ * The information in this document is proprietary to XASecure Inc.,      *
+ * It may not be used, reproduced or disclosed without the written        *
+ * approval from the XASecure Inc.,                                       *
+ *                                                                        *
+ * PRIVILEGED AND CONFIDENTIAL XASECURE PROPRIETARY INFORMATION           *
+ *                                                                        *
+ * Copyright (c) 2013 XASecure, Inc.  All rights reserved.                *
+ *                                                                        *
+ *************************************************************************/
+
+ /**
+  *
+  *	@version: 1.0.004
+  *
+  */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants.*;
+
+import java.net.InetAddress;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.xasecure.audit.model.EnumRepositoryType;
+import com.xasecure.audit.model.HdfsAuditEvent;
+import com.xasecure.audit.provider.AuditProviderFactory;
+import com.xasecure.authorization.hadoop.HDFSAccessVerifier;
+import com.xasecure.authorization.hadoop.HDFSAccessVerifierFactory;
+import com.xasecure.authorization.hadoop.config.XaSecureConfiguration;
+import com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants;
+import com.xasecure.authorization.hadoop.exceptions.XaSecureAccessControlException;
+
+
+public class XaSecureFSPermissionChecker {
+
+	private static Map<FsAction, String[]> access2ActionListMapper = null ;
+
+	private static HDFSAccessVerifier authorizer = null ;
+	
+	private static final String XaSecureModuleName  	= XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_XASECURE_MODULE_ACL_NAME_PROP , XaSecureHadoopConstants.DEFAULT_XASECURE_MODULE_ACL_NAME) ;
+	private static final String HadoopModuleName    	= XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_HADOOP_MODULE_ACL_NAME_PROP , XaSecureHadoopConstants.DEFAULT_HADOOP_MODULE_ACL_NAME) ;
+	private static final boolean addHadoopAuth 			= XaSecureConfiguration.getInstance().getBoolean(XaSecureHadoopConstants.XASECURE_ADD_HDFS_PERMISSION_PROP, XaSecureHadoopConstants.XASECURE_ADD_HDFS_PERMISSION_DEFAULT) ;
+	private static final String excludeUserList 		= XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_HDFS_EXCLUDE_LIST_PROP, XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING) ;
+	private static final String repositoryName          = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_REPOSITORY_NAME_PROP);
+	private static final boolean isAuditEnabled         = XaSecureConfiguration.getInstance().getBoolean(XaSecureHadoopConstants.AUDITLOG_IS_ENABLED_PROP, true);
+
+	private static final Log LOG = LogFactory.getLog(XaSecureFSPermissionChecker.class);
+
+	private static HashSet<String> excludeUsers = null ;
+	
+	private static ThreadLocal<LogEventInfo> currentValidatedLogEvent = new ThreadLocal<LogEventInfo>() ;
+	
+
+	static {
+		access2ActionListMapper = new HashMap<FsAction, String[]>();
+		access2ActionListMapper.put(FsAction.NONE, new String[] {});
+		access2ActionListMapper.put(FsAction.ALL, new String[] { READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.READ, new String[] { READ_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.READ_WRITE, new String[] { READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.READ_EXECUTE, new String[] { READ_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.WRITE, new String[] { WRITE_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.WRITE_EXECUTE, new String[] { WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE });
+		access2ActionListMapper.put(FsAction.EXECUTE, new String[] { EXECUTE_ACCCESS_TYPE });
+		
+		if (excludeUserList != null && excludeUserList.trim().length() > 0) {
+			excludeUsers = new HashSet<String>() ;
+			for(String excludeUser : excludeUserList.trim().split(",")) {
+				excludeUser = excludeUser.trim() ;
+				if (LOG.isDebugEnabled()) {
+					LOG.debug("Adding exclude user [" + excludeUser + "]");
+				}
+				excludeUsers.add(excludeUser) ;
+ 			}
+		}
+		
+		
+	}
+
+	public static boolean check(UserGroupInformation ugi, INode inode, FsAction access) throws XaSecureAccessControlException {
+
+		if (inode == null) {
+			return false;
+		}
+
+		String user = ugi.getShortUserName();
+
+		Set<String> groups = Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(ugi.getGroupNames())));
+		
+		String pathOwnerName = inode.getUserName() ;
+
+		boolean accessGranted =  AuthorizeAccessForUser(inode.getFullPathName(), pathOwnerName, access, user, groups);
+		
+		if (!accessGranted &&  !addHadoopAuth ) {
+			String inodeInfo = (inode.isDirectory() ? "directory" : "file") +  "="  + "\"" + inode.getFullPathName() + "\""  ;
+		    throw new XaSecureAccessControlException("Permission denied: principal{user=" + user + ",groups: " + groups + "}, access=" + access + ", " + inodeInfo ) ; 
+		}
+		
+		return accessGranted ;
+
+	}
+
+	public static boolean AuthorizeAccessForUser(String aPathName, String aPathOwnerName, FsAction access, String user, Set<String> groups) throws XaSecureAccessControlException {
+		boolean accessGranted = false;
+		try {
+			if (XaSecureHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(aPathName)) {
+				aPathName = XaSecureHadoopConstants.HDFS_ROOT_FOLDER_PATH;
+			}
+			
+			String[] accessTypes = access2ActionListMapper.get(access);
+
+			if ((accessTypes == null) || (accessTypes.length == 0)) {
+				accessGranted = false;
+			} else {
+				
+				if (authorizer == null) {
+					synchronized(XaSecureFSPermissionChecker.class) {
+						HDFSAccessVerifier temp = authorizer ;
+						if (temp == null) {
+							try {
+								authorizer = HDFSAccessVerifierFactory.getInstance();
+							}
+							catch(Throwable t) {
+								LOG.error("Unable to create Authorizer", t);
+							}
+						}
+					}
+				}
+				
+				if (authorizer != null) {
+					for (String accessType : accessTypes) {
+						accessGranted = authorizer.isAccessGranted(aPathName, aPathOwnerName, accessType, user, groups);
+						if (!accessGranted) {
+							break;
+						}
+					}
+				}
+			}
+
+		} finally {
+			logEvent(XaSecureModuleName, user, aPathName, access, accessGranted);
+		}
+		return accessGranted;
+	}
+	
+	
+	public static void logHadoopEvent(UserGroupInformation ugi, INode inode, FsAction access, boolean accessGranted) {
+		String path = (inode == null) ? XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING : inode.getFullPathName() ;
+		String username = (ugi == null) ? XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING : ugi.getShortUserName() ;
+		logEvent(HadoopModuleName, username, path,  access, accessGranted);
+	}
+	
+	
+
+	
+	
+	private static void logEvent(String moduleName,  String username, String path, FsAction access, boolean accessGranted) {
+		LogEventInfo e = null;
+
+		if(isAuditEnabled) {
+		    e = new LogEventInfo(moduleName,  username, path, access, accessGranted) ;
+		}
+
+		currentValidatedLogEvent.set(e);
+	}
+	
+	
+	public static void checkPermissionPre(String pathToBeValidated) {
+		// TODO: save the path in a thread-local
+	}
+	
+	public static void checkPermissionPost(String pathToBeValidated) {
+		writeLog(pathToBeValidated);
+	}
+
+	public static void writeLog(String pathValidated) {
+		
+		LogEventInfo e = currentValidatedLogEvent.get();
+		
+		if (e == null) {
+			return ;
+		}
+		
+		String username = e.getUserName() ;
+		
+		boolean skipLog = (username != null && excludeUsers != null && excludeUsers.contains(username)) ;
+		
+		if (skipLog) {
+			return ;
+		}
+
+		String requestedPath = e.getPath() ;
+		
+		if (requestedPath == null) {
+			requestedPath = XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING ;
+		}
+
+		if (! authorizer.isAuditLogEnabled(requestedPath)) {
+			return ;
+		}
+		
+		
+		String accessType = ( (e.getAccess() == null) ? XaSecureHadoopConstants.AUDITLOG_EMPTY_STRING : e.getAccess().toString() ) ;
+		
+		HdfsAuditEvent auditEvent = new HdfsAuditEvent();
+
+		auditEvent.setUser(username);
+		auditEvent.setResourcePath(requestedPath);
+		auditEvent.setResourceType("HDFSPath") ;
+		auditEvent.setAccessType(accessType);
+		auditEvent.setAccessResult((short)(e.isAccessGranted() ? 1 : 0));
+		auditEvent.setClientIP(getRemoteIp());
+		auditEvent.setEventTime(getUTCDate());
+		auditEvent.setAclEnforcer(e.getModuleName());
+		auditEvent.setRepositoryType(EnumRepositoryType.HDFS);
+		auditEvent.setRepositoryName(repositoryName);
+		auditEvent.setResultReason(pathValidated);
+
+		/*
+		 * Review following audit fields for appropriate values
+		 *
+		auditEvent.setAgentId();
+		auditEvent.setPolicyId();
+		auditEvent.setSessionId();
+		auditEvent.setClientType();
+		 *
+		 */
+
+		try {
+			if (LOG.isDebugEnabled()) {
+				LOG.debug("Audit log of auditEvent: [" + auditEvent.toString() + "] - START.");
+			}
+			AuditProviderFactory.getAuditProvider().log(auditEvent);
+			if (LOG.isDebugEnabled()) {
+				LOG.debug("Audit log of auditEvent: [" + auditEvent.toString() + "] - END.");
+			}
+		}
+		catch(Throwable t) {
+			LOG.error("ERROR during audit log of auditEvent: [" + auditEvent.toString() + "]", t);
+		}
+	}
+	
+	
+	private static String getRemoteIp() {
+		String ret = null ;
+		InetAddress ip = Server.getRemoteIp() ;
+		if (ip != null) {
+			ret = ip.toString() ;
+		}
+		else {
+			ret = "" ;
+		}
+		return ret ;
+	}
+	
+	
+	public static Date getUTCDate() {
+		Calendar local=Calendar.getInstance();
+	    int offset = local.getTimeZone().getOffset(local.getTimeInMillis());
+	    GregorianCalendar utc = new GregorianCalendar(TimeZone.getTimeZone("GMT+0"));
+	    utc.setTimeInMillis(local.getTimeInMillis());
+	    utc.add(Calendar.MILLISECOND, -offset);
+	    return utc.getTime();
+	}
+
+}
+
+class LogEventInfo {
+	String moduleName ;
+	String userName ;
+	String path ;
+	FsAction access ;
+	boolean accessGranted ;
+	
+	LogEventInfo(String moduleName,  String username, String path, FsAction access, boolean accessGranted) {
+		this.moduleName = moduleName ;
+		this.userName = username ;
+		this.path = path ;
+		this.access = access ;
+		this.accessGranted = accessGranted;
+	}
+
+	public String getModuleName() {
+		return moduleName;
+	}
+
+	public String getUserName() {
+		return userName;
+	}
+
+	public String getPath() {
+		return path;
+	}
+
+	public FsAction getAccess() {
+		return access;
+	}
+
+	public boolean isAccessGranted() {
+		return accessGranted;
+	}
+	
+	
+	
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hdfs-agent/src/main/resources/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/hdfs-agent/src/main/resources/META-INF/MANIFEST.MF b/hdfs-agent/src/main/resources/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..0d9d2b4
--- /dev/null
+++ b/hdfs-agent/src/main/resources/META-INF/MANIFEST.MF
@@ -0,0 +1 @@
+premain-class: com.xasecure.authorization.hadoop.agent.AuthCodeInjectionJavaAgent

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/hive-site-changes.cfg
----------------------------------------------------------------------
diff --git a/hive-agent/conf/hive-site-changes.cfg b/hive-agent/conf/hive-site-changes.cfg
new file mode 100644
index 0000000..eaa98f4
--- /dev/null
+++ b/hive-agent/conf/hive-site-changes.cfg
@@ -0,0 +1,9 @@
+#hive.server2.authentication			KERBEROS															mod		create-if-not-exists
+hive.security.authorization.enabled	true																mod		create-if-not-exists
+hive.security.authorization.manager	com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory		mod		create-if-not-exists
+hive.security.authenticator.manager	org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator		mod	create-if-not-exists
+
+hive.conf.restricted.list			hive.server2.authentication											append	create-if-not-exists	,
+hive.conf.restricted.list			hive.security.authorization.enabled									append	create-if-not-exists	,
+hive.conf.restricted.list			hive.security.authorization.manager									append	create-if-not-exists	,
+hive.conf.restricted.list			hive.security.authenticator.manager									append	create-if-not-exists	,

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-audit-changes.cfg
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-audit-changes.cfg b/hive-agent/conf/xasecure-audit-changes.cfg
new file mode 100644
index 0000000..4b04f92
--- /dev/null
+++ b/hive-agent/conf/xasecure-audit-changes.cfg
@@ -0,0 +1,5 @@
+xasecure.audit.jpa.javax.persistence.jdbc.url		jdbc:mysql://%XAAUDIT.DB.HOSTNAME%/%XAAUDIT.DB.DATABASE_NAME%	mod create-if-not-exists
+xasecure.audit.jpa.javax.persistence.jdbc.user		%XAAUDIT.DB.USER_NAME% 											mod create-if-not-exists
+xasecure.audit.jpa.javax.persistence.jdbc.password	%XAAUDIT.DB.PASSWORD% 											mod create-if-not-exists
+xasecure.audit.repository.name						%REPOSITORY_NAME% 												mod create-if-not-exists
+xasecure.audit.credential.provider.file     		jceks://file%CREDENTIAL_PROVIDER_FILE% 							mod create-if-not-exists

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-audit.xml
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-audit.xml b/hive-agent/conf/xasecure-audit.xml
new file mode 100644
index 0000000..4014546
--- /dev/null
+++ b/hive-agent/conf/xasecure-audit.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+	<property>
+		<name>xasecure.audit.provider.factory</name>
+		<value>com.xasecure.audit.provider.AuditProviderFactory</value>
+	</property>
+
+	<!--  Properties whose name begin with "xasecure.audit." are used to configure JPA -->
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.url</name>
+		<value>jdbc:mysql://localhost:3306/xa_db</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.user</name>
+		<value>xaaudit</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.password</name>
+		<value>none</value>
+	</property>
+
+	<property>
+		<name>xasecure.audit.jpa.javax.persistence.jdbc.driver</name>
+		<value>com.mysql.jdbc.Driver</value>
+	</property>
+
+    <property>
+		<name>xasecure.audit.credential.provider.file</name>
+		<value>jceks://file/etc/xasecure/conf/auditcred.jceks</value>
+	</property>
+	
+	<property>
+		<name>xasecure.audit.repository.name</name>
+		<value>hadoopdev</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.is.enabled</name>
+		<value>true</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.is.enabled</name>
+		<value>false</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.is.async</name>
+		<value>false</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.log4j.async.max.queue.size</name>
+		<value>10240</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.log4j.async.max.flush.interval.ms</name>
+		<value>30000</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.is.enabled</name>
+		<value>true</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.is.async</name>
+		<value>false</value>
+	</property>	
+	
+	<property>
+		<name>xasecure.audit.db.async.max.queue.size</name>
+		<value>10240</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.db.async.max.flush.interval.ms</name>
+		<value>30000</value>
+	</property>	
+
+	<property>
+		<name>xasecure.audit.db.batch.size</name>
+		<value>100</value>
+	</property>	
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-hive-security-changes.cfg
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-hive-security-changes.cfg b/hive-agent/conf/xasecure-hive-security-changes.cfg
new file mode 100644
index 0000000..a57e0e9
--- /dev/null
+++ b/hive-agent/conf/xasecure-hive-security-changes.cfg
@@ -0,0 +1,10 @@
+#
+# Change the original policy parameter to work with policy manager based.
+# 
+#
+hive.authorization.verifier.classname				com.xasecure.pdp.hive.XASecureAuthorizer								mod	create-if-not-exists
+xasecure.hive.policymgr.url							%POLICY_MGR_URL%/service/assets/policyList/%REPOSITORY_NAME% 			mod create-if-not-exists
+xasecure.hive.policymgr.url.saveAsFile				/tmp/hive_%REPOSITORY_NAME%_json  									    mod create-if-not-exists
+xasecure.hive.policymgr.url.laststoredfile			%POLICY_CACHE_FILE_PATH%/hive_%REPOSITORY_NAME%_json 					mod create-if-not-exists
+xasecure.hive.policymgr.url.reloadIntervalInMillis 	30000 																	mod create-if-not-exists
+xasecure.hive.policymgr.ssl.config					/etc/hive/conf/xasecure-policymgr-ssl.xml								mod create-if-not-exists
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-hive-security.xml
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-hive-security.xml b/hive-agent/conf/xasecure-hive-security.xml
new file mode 100644
index 0000000..714589c
--- /dev/null
+++ b/hive-agent/conf/xasecure-hive-security.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+	<!--  The following property is used to select appropriate XASecure Authorizer Module (filebased, policymanager based) -->
+	<property>
+		<name>hive.authorization.verifier.classname</name>
+		<value>com.xasecure.pdp.hive.XASecureAuthorizer</value>
+		<description>
+			Class Name of the authorization Module 
+		</description>
+	</property>
+
+
+	<!-- The following properties are used only when PolicyManager is used as 
+		main storage for all policy -->
+	<property>
+		<name>xasecure.hive.policymgr.url</name>
+		<value>http://policymanagerhost:port/service/assets/dev-hive</value>
+		<description>
+			Location where XASecure Role Based Authorization Info is
+			located.
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hive.policymgr.url.saveAsFile</name>
+		<value>/tmp/xasecure-hive-policy.json</value>
+		<description>
+			Location where XASecure Role Based Authorization Info is
+			saved after successful retrieval from policymanager
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hive.policymgr.url.laststoredfile</name>
+		<value>/home/hive/last_xasecure-hive-policy.json</value>
+		<description>
+			Location and file where last XASecure Role Based Authorization Info
+		    is saved after successful retrieval from policymanager.
+		</description>
+	</property>
+	<property>
+		<name>xasecure.hive.policymgr.url.reloadIntervalInMillis</name>
+		<value>30000</value>
+		<description>
+			How often do we need to verify the changes tothe
+			authorization url,
+			to reload to memory (reloaded only if there are
+			changes)
+		</description>
+	</property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-policymgr-ssl-changes.cfg
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-policymgr-ssl-changes.cfg b/hive-agent/conf/xasecure-policymgr-ssl-changes.cfg
new file mode 100644
index 0000000..5490c76
--- /dev/null
+++ b/hive-agent/conf/xasecure-policymgr-ssl-changes.cfg
@@ -0,0 +1,9 @@
+#
+# SSL Params
+#
+xasecure.policymgr.clientssl.keystore					 %SSL_KEYSTORE_FILE_PATH%						mod create-if-not-exists
+xasecure.policymgr.clientssl.keystore.password			 %SSL_KEYSTORE_PASSWORD%						mod create-if-not-exists
+xasecure.policymgr.clientssl.keystore.credential.file	 jceks://file%CREDENTIAL_PROVIDER_FILE%			mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore				     %SSL_TRUSTSTORE_FILE_PATH%						mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore.password	     %SSL_TRUSTSTORE_PASSWORD%						mod create-if-not-exists
+xasecure.policymgr.clientssl.truststore.credential.file  jceks://file%CREDENTIAL_PROVIDER_FILE%         mod create-if-not-exists	
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/conf/xasecure-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/hive-agent/conf/xasecure-policymgr-ssl.xml b/hive-agent/conf/xasecure-policymgr-ssl.xml
new file mode 100644
index 0000000..00133f9
--- /dev/null
+++ b/hive-agent/conf/xasecure-policymgr-ssl.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+	<!--  The following properties are used for 2-way SSL client server validation -->
+	<property>
+		<name>xasecure.policymgr.clientssl.keystore</name>
+		<value>hadoopdev-clientcert.jks</value>
+		<description> 
+			Java Keystore files 
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.keystore.password</name>
+		<value>none</value>
+		<description> 
+			password for keystore 
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore</name>
+		<value>cacerts-xasecure.jks</value>
+		<description> 
+			java truststore file
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore.password</name>
+		<value>none</value>
+		<description> 
+			java  truststore password
+		</description>
+	</property>
+    <property>
+		<name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+		<value>jceks://file/tmp/keystore-hadoopdev-ssl.jceks</value>
+		<description> 
+			java  keystore credential file
+		</description>
+	</property>
+	<property>
+		<name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+		<value>jceks://file/tmp/truststore-hadoopdev-ssl.jceks</value>
+		<description> 
+			java  truststore credential file
+		</description>
+	</property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/pom.xml
----------------------------------------------------------------------
diff --git a/hive-agent/pom.xml b/hive-agent/pom.xml
new file mode 100644
index 0000000..b4e3b2a
--- /dev/null
+++ b/hive-agent/pom.xml
@@ -0,0 +1,62 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>security_agents.hive-agent</groupId>
+  <artifactId>hive-agent</artifactId>
+  <name>Hive Security Agent</name>
+  <description>Hive Security Agents</description>
+  <packaging>jar</packaging>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+  <parent>
+     <groupId>com.hortonworks.hadoop.security</groupId>
+     <artifactId>argus</artifactId>
+     <version>3.5.000</version>
+     <relativePath>..</relativePath>
+  </parent>
+  <dependencies>
+    <dependency>
+	  <groupId>org.apache.hive</groupId>
+	  <artifactId>hive-common</artifactId>
+	  <version>${hive.version}</version>
+      <scope>system</scope>
+      <systemPath>${local.lib.dir}/hive-0.14.0-SNAPSHOT/hive-common-0.14.0-SNAPSHOT.jar</systemPath>
+    </dependency>
+    <dependency>
+	  <groupId>org.apache.hive</groupId>
+	  <artifactId>hive-service</artifactId>
+	  <version>${hive.version}</version>
+        <scope>system</scope>
+      <systemPath>${local.lib.dir}/hive-0.14.0-SNAPSHOT/hive-service-0.14.0-SNAPSHOT.jar</systemPath>
+    </dependency>
+    <dependency>
+	  <groupId>org.apache.hive</groupId>
+	  <artifactId>hive-exec</artifactId>
+	  <version>${hive.version}</version>
+        <scope>system</scope>
+      <systemPath>${local.lib.dir}/hive-0.14.0-SNAPSHOT/hive-exec-0.14.0-SNAPSHOT.jar</systemPath>
+    </dependency>
+    <dependency>
+	  <groupId>org.apache.hive</groupId>
+	  <artifactId>hive-metastore</artifactId>
+	  <version>${hive.version}</version>
+        <scope>system</scope>
+      <systemPath>${local.lib.dir}/hive-0.14.0-SNAPSHOT/hive-metastore-0.14.0-SNAPSHOT.jar</systemPath>
+    </dependency>
+    <dependency>
+	  <groupId>org.apache.hadoop</groupId>
+	  <artifactId>hadoop-hdfs</artifactId>
+	  <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>security_agents.agents-common</groupId>
+      <artifactId>agents-common</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>security_agents.agents-audit</groupId>
+      <artifactId>agents-audit</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/scripts/install.properties
----------------------------------------------------------------------
diff --git a/hive-agent/scripts/install.properties b/hive-agent/scripts/install.properties
new file mode 100644
index 0000000..9ce44be
--- /dev/null
+++ b/hive-agent/scripts/install.properties
@@ -0,0 +1,73 @@
+#
+# Location of Policy Manager URL  
+#
+#
+# Example:
+# POLICY_MGR_URL=http://policymanager.xasecure.net:6080
+#
+
+POLICY_MGR_URL=
+
+#
+# This is the repository name created within policy manager
+#
+# Example:
+# REPOSITORY_NAME=hivedev
+#
+
+REPOSITORY_NAME=
+
+#
+# AUDIT DB Configuration
+# 
+#  This information should match with the one you specified during the PolicyManager Installation
+# 
+# Example:
+# XAAUDIT.DB.HOSTNAME=localhost
+# XAAUDIT.DB.DATABASE_NAME=xasecure
+# XAAUDIT.DB.USER_NAME=xalogger
+# XAAUDIT.DB.PASSWORD=none
+
+
+XAAUDIT.DB.HOSTNAME=
+XAAUDIT.DB.DATABASE_NAME=
+XAAUDIT.DB.USER_NAME=
+XAAUDIT.DB.PASSWORD=
+
+
+#
+# POLICY CACHE FILE PATH
+# 
+# This information is used to configure the path where the policy cache is stored.
+# 
+# Example:
+# POLICY_CACHE_FILE_PATH=/home/hive
+# 
+
+POLICY_CACHE_FILE_PATH=
+
+#
+# Credential Provider File Path
+#
+# CREDENTIAL_PROVIDER_FILE=/etc/xasecure/conf/{repoName}-credstore.jceks
+#
+
+CREDENTIAL_PROVIDER_FILE=
+
+#
+# SSL Client Certificate Information
+#
+# Example:
+# SSL_KEYSTORE_FILE_PATH=/etc/xasecure/conf/xasecure-hive-client.jks
+# SSL_KEYSTORE_PASSWORD=none
+# SSL_TRUSTSTORE_FILE_PATH=/etc/xasecure/conf/xasecure-truststore.jks
+# SSL_TRUSTSTORE_PASSWORD=none
+
+#
+# You do not need use SSL between agent and security admin tool, please leave these sample value as it is.
+#
+
+SSL_KEYSTORE_FILE_PATH=agentKey.jks
+SSL_KEYSTORE_PASSWORD=myKeyFilePassword
+SSL_TRUSTSTORE_FILE_PATH=cacert
+SSL_TRUSTSTORE_PASSWORD=changeit

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/scripts/install.sh
----------------------------------------------------------------------
diff --git a/hive-agent/scripts/install.sh b/hive-agent/scripts/install.sh
new file mode 100644
index 0000000..51e49a0
--- /dev/null
+++ b/hive-agent/scripts/install.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+
+function create_jceks()
+{
+
+alias=$1
+pass=$2
+jceksFile=$3
+
+ret=`hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile} 2>&1`
+res=`echo $ret | grep 'already exist'`
+
+if ! [ "${res}" == "" ]
+then
+   echo "Credential file already exists,recreating the file..."
+   hadoop credential delete ${alias} --provider jceks://file${jceksFile}
+   hadoop credential create ${alias} --value ${pass} --provider jceks://file${jceksFile}
+fi
+}
+
+hive_dir=/usr/lib/hive
+hive_lib_dir=${hive_dir}/lib
+hive_conf_dir=/etc/hive/conf
+hive_srv_conf_dir=/etc/hive/conf.server
+hive_bin_dir=${hive_dir}/bin
+
+
+if [ -d ${hive_srv_conf_dir} -o -L ${hive_srv_conf_dir} ]
+then
+	hive_cli_conf_dir=${hive_conf_dir}
+	hive_conf_dir=${hive_srv_conf_dir}
+else
+	hive_srv_conf_dir=${hive_conf_dir}
+	hive_cli_conf_dir="${hive_conf_dir}"
+fi
+
+hdp_dir=/usr/lib/hadoop
+hdp_lib_dir=/usr/lib/hadoop/lib
+hdp_conf_dir=/etc/hadoop/conf
+
+export CONFIG_FILE_OWNER="hive:hadoop"
+
+
+if [ ! -d "${hdp_dir}" ]
+then
+	echo "ERROR: Invalid HADOOP HOME Directory: [${hdp_dir}]. Exiting ..."
+	exit 1
+fi
+
+#echo "Hadoop Configuration Path: ${hdp_conf_dir}"
+
+if [ ! -f ${hdp_conf_dir}/hadoop-env.sh ]
+then
+	echo "ERROR: Invalid HADOOP CONF Directory: [${hdp_conf_dir}]."
+	echo "ERROR: Unable to locate: hadoop-env.sh. Exiting ..."
+	exit 1
+fi
+
+install_dir=`dirname $0`
+
+[ "${install_dir}" = "." ] && install_dir=`pwd`
+
+#echo "Current Install Directory: [${install_dir}]"
+
+
+#
+# --- Backup current configuration for backup - START
+#
+
+COMPONENT_NAME=hive
+
+XASECURE_VERSION=`cat ${install_dir}/version`
+
+CFG_DIR=${hive_conf_dir}
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+CUR_CFG_DIR_FILE=${XASECURE_ROOT}/.config_dir
+PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+
+backup_dt=`date '+%Y%m%d%H%M%S'`
+
+if [ -d "${PRE_INSTALL_CONFIG}" ]
+then
+	PRE_INSTALL_CONFIG="${PRE_INSTALL_CONFIG}.${backup_dt}"
+fi
+
+if [ -d ${CFG_DIR} ]
+then
+	( cd ${CFG_DIR} ; find . -print | cpio -pdm ${PRE_INSTALL_CONFIG} )
+	[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-${backup_dt}
+	echo ${XASECURE_VERSION} > ${CUR_VERSION_FILE}
+	echo ${CFG_DIR} > ${CUR_CFG_DIR_FILE}
+else
+	echo "ERROR: Unable to find configuration directory: [${CFG_DIR}]"
+	exit 1
+fi
+
+cp -f ${install_dir}/uninstall.sh ${XASECURE_ROOT}/
+
+#
+# --- Backup current configuration for backup  - END
+#
+
+
+dt=`date '+%Y%m%d%H%M%S'`
+for f in ${install_dir}/conf/*
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		if [ ! -f ${hive_conf_dir}/${fn} ]
+		then
+			echo "+cp ${f} ${hive_conf_dir}/${fn}"
+			cp ${f} ${hive_conf_dir}/${fn}
+		else
+			echo "WARN: ${fn} already exists in the ${hive_conf_dir} - Using existing configuration ${fn}"
+		fi
+	fi
+done
+
+
+#echo "Hadoop XASecure Library Path: ${hdp_lib_dir}"
+
+if [ ! -d ${hive_lib_dir} ]
+then
+	echo "+mkdir -p ${hive_lib_dir}"
+	mkdir -p ${hive_lib_dir}
+fi
+
+for f in ${install_dir}/dist/*.jar ${install_dir}/lib/*.jar
+do
+	if [ -f ${f} ]
+	then
+		fn=`basename $f`
+		echo "+cp ${f} ${hive_lib_dir}/${fn}"
+		cp ${f} ${hive_lib_dir}/${fn}
+	fi
+done
+
+#
+# Copy the SSL parameters
+#
+
+CredFile=`grep '^CREDENTIAL_PROVIDER_FILE' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+if ! [ `echo ${CredFile} | grep '^/.*'` ]
+then
+  echo "Please enter the Credential File Store with proper file path"
+  exit 1
+fi
+
+#
+# Generate Credential Provider file and Credential for Audit DB access.
+#
+
+
+auditCredAlias="auditDBCred"
+
+auditdbCred=`grep '^XAAUDIT.DB.PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${auditCredAlias} ${auditdbCred} ${CredFile}
+
+
+#
+# Generate Credential Provider file and Credential for SSL KEYSTORE AND TRUSTSTORE
+#
+
+
+sslkeystoreAlias="sslKeyStore"
+
+sslkeystoreCred=`grep '^SSL_KEYSTORE_PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${sslkeystoreAlias} ${sslkeystoreCred} ${CredFile}
+
+
+ssltruststoreAlias="sslTrustStore"
+
+ssltruststoreCred=`grep '^SSL_TRUSTSTORE_PASSWORD' ${install_dir}/install.properties | awk -F= '{ print $2 }'`
+
+create_jceks ${ssltruststoreAlias} ${ssltruststoreCred} ${CredFile}
+
+chown ${CONFIG_FILE_OWNER} ${CredFile} 
+
+PROP_ARGS="-p  ${install_dir}/install.properties"
+
+for f in ${install_dir}/installer/conf/*-changes.cfg
+do
+        if [ -f ${f} ]
+        then
+                fn=`basename $f`
+                orgfn=`echo $fn | sed -e 's:-changes.cfg:.xml:'`
+                fullpathorgfn="${hive_conf_dir}/${orgfn}"
+                if [ ! -f ${fullpathorgfn} ]
+                then
+                        echo "ERROR: Unable to find ${fullpathorgfn}"
+                        exit 1
+                fi
+                archivefn="${hive_conf_dir}/.${orgfn}.${dt}"
+                newfn="${hive_conf_dir}/.${orgfn}-new.${dt}"
+                cp ${fullpathorgfn} ${archivefn}
+                if [ $? -eq 0 ]
+                then
+                	cp="${install_dir}/installer/lib/*:/usr/lib/hadoop/*:/usr/lib/hadoop/lib/*"
+                        java -cp "${cp}" com.xasecure.utils.install.XmlConfigChanger -i ${archivefn} -o ${newfn} -c ${f} ${PROP_ARGS}
+                        if [ $? -eq 0 ]
+                        then
+                                diff -w ${newfn} ${fullpathorgfn} > /dev/null 2>&1 
+                                if [ $? -ne 0 ]
+                                then
+	                        		#echo "Changing config file:  ${fullpathorgfn} with following changes:"
+	                                #echo "==============================================================="
+	                                #diff -w ${newfn} ${fullpathorgfn}
+	                                #echo "==============================================================="
+	                                echo "NOTE: Current config file: ${fullpathorgfn} is being saved as ${archivefn}"
+	                                #echo "==============================================================="
+	                                cp ${newfn} ${fullpathorgfn}
+	                            fi
+                        else
+                                echo "ERROR: Unable to make changes to config. file: ${fullpathorgfn}"
+                                echo "exiting ...."
+                                exit 1
+                        fi
+                else
+                        echo "ERROR: Unable to save config. file: ${fullpathorgfn}  to ${archivefn}"
+                        echo "exiting ...."
+                        exit 1
+                fi
+        fi
+done
+
+chmod go-rwx ${hive_conf_dir}/xasecure-policymgr-ssl.xml
+chown ${CONFIG_FILE_OWNER} ${hive_conf_dir}/xasecure-policymgr-ssl.xml
+
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/scripts/uninstall.sh
----------------------------------------------------------------------
diff --git a/hive-agent/scripts/uninstall.sh b/hive-agent/scripts/uninstall.sh
new file mode 100644
index 0000000..e27c510
--- /dev/null
+++ b/hive-agent/scripts/uninstall.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+COMPONENT_NAME=hive
+CFG_DIR=/etc/${COMPONENT_NAME}/conf
+XASECURE_ROOT=/etc/xasecure/${COMPONENT_NAME}
+BACKUP_TYPE=pre
+CUR_VERSION_FILE=${XASECURE_ROOT}/.current_version
+CUR_CFG_DIR_FILE=${XASECURE_ROOT}/.config_dir
+if [ -f ${CUR_VERSION_FILE} ]
+then
+	XASECURE_VERSION=`cat ${CUR_VERSION_FILE}`
+	PRE_INSTALL_CONFIG=${XASECURE_ROOT}/${BACKUP_TYPE}-${XASECURE_VERSION}
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ -d "${PRE_INSTALL_CONFIG}" ]
+	then
+		if [ -f ${CUR_CFG_DIR_FILE} ] 
+		then
+			CFG_DIR=`cat ${CUR_CFG_DIR_FILE}`
+		fi 
+		[ -d ${CFG_DIR} ] && mv ${CFG_DIR} ${CFG_DIR}-${dt}
+		( cd ${PRE_INSTALL_CONFIG} ; find . -print | cpio -pdm ${CFG_DIR} )
+		[ -f ${CUR_VERSION_FILE} ] && mv ${CUR_VERSION_FILE} ${CUR_VERSION_FILE}-uninstalled-${dt}
+		echo "XASecure version - ${XASECURE_VERSION} has been uninstalled successfully."
+	else
+		echo "ERROR: Unable to find pre-install configuration directory: [${PRE_INSTALL_CONFIG}]"
+		exit 1
+	fi
+else
+	cd ${CFG_DIR}
+	saved_files=`find . -type f -name '.*' |  sort | grep -v -- '-new.' | grep '[0-9]*$' | grep -v -- '-[0-9]*$' | sed -e 's:\.[0-9]*$::' | sed -e 's:^./::' | sort -u`
+	dt=`date '+%Y%m%d%H%M%S'`
+	if [ "${saved_files}" != "" ]
+	then
+	        for f in ${saved_files}
+	        do
+	                oldf=`ls ${f}.[0-9]* | sort | head -1`
+	                if [ -f "${oldf}" ]
+	                then
+	                        nf=`echo ${f} | sed -e 's:^\.::'`
+	                        if [ -f "${nf}" ]
+	                        then
+	                                echo "+cp -p ${nf} .${nf}-${dt}"
+	                                cp -p ${nf} .${nf}-${dt}
+	                                echo "+cp ${oldf} ${nf}"
+	                                cp ${oldf} ${nf}
+	                        else
+	                                echo "ERROR: ${nf} not found to save. However, old file is being recovered."
+	                                echo "+cp -p ${oldf} ${nf}"
+	                                cp -p ${oldf} ${nf}
+	                        fi
+	                fi
+	        done
+	        echo "XASecure configuration has been uninstalled successfully."
+	fi
+fi
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/hive-agent/src/main/java/com/xasecure/authorization/hive/authorizer/XaSecureHiveAuthorizer.java
----------------------------------------------------------------------
diff --git a/hive-agent/src/main/java/com/xasecure/authorization/hive/authorizer/XaSecureHiveAuthorizer.java b/hive-agent/src/main/java/com/xasecure/authorization/hive/authorizer/XaSecureHiveAuthorizer.java
new file mode 100644
index 0000000..eb8bd62
--- /dev/null
+++ b/hive-agent/src/main/java/com/xasecure/authorization/hive/authorizer/XaSecureHiveAuthorizer.java
@@ -0,0 +1,466 @@
+package com.xasecure.authorization.hive.authorizer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivObjectActionType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.xasecure.audit.model.EnumRepositoryType;
+import com.xasecure.audit.model.HiveAuditEvent;
+import com.xasecure.audit.provider.AuditProviderFactory;
+import com.xasecure.authorization.hadoop.config.XaSecureConfiguration;
+import com.xasecure.authorization.hadoop.constants.XaSecureHadoopConstants;
+import com.xasecure.authorization.hive.XaHiveAccessContext;
+import com.xasecure.authorization.hive.XaHiveAccessVerifier;
+import com.xasecure.authorization.hive.XaHiveAccessVerifierFactory;
+import com.xasecure.authorization.hive.XaHiveObjectAccessInfo;
+import com.xasecure.authorization.hive.XaHiveObjectAccessInfo.HiveAccessType;
+import com.xasecure.authorization.hive.XaHiveObjectAccessInfo.HiveObjectType;
+import com.xasecure.authorization.utils.StringUtil;
+
+public class XaSecureHiveAuthorizer extends XaSecureHiveAuthorizerBase {
+	private static final Log LOG = LogFactory.getLog(XaSecureHiveAuthorizer.class) ; 
+
+	private static final String XaSecureModuleName =  XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_XASECURE_MODULE_ACL_NAME_PROP , XaSecureHadoopConstants.DEFAULT_XASECURE_MODULE_ACL_NAME) ;
+	private static final String repositoryName     = XaSecureConfiguration.getInstance().get(XaSecureHadoopConstants.AUDITLOG_REPOSITORY_NAME_PROP);
+
+	private XaHiveAccessVerifier mHiveAccessVerifier = null ;
+
+
+	public XaSecureHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory,
+								  HiveConf                   hiveConf,
+								  HiveAuthenticationProvider hiveAuthenticator) {
+		super(metastoreClientFactory, hiveConf, hiveAuthenticator);
+
+		LOG.debug("XaSecureHiveAuthorizer.XaSecureHiveAuthorizer()");
+
+		mHiveAccessVerifier = XaHiveAccessVerifierFactory.getInstance() ;
+	}
+
+
+	@Override
+	public void checkPrivileges(HiveOperationType         hiveOpType,
+								List<HivePrivilegeObject> inputHObjs,
+							    List<HivePrivilegeObject> outputHObjs,
+							    HiveAuthzContext          context)
+		      throws HiveAuthzPluginException, HiveAccessControlException {
+
+		if(LOG.isDebugEnabled()) {
+			LOG.debug(toString(hiveOpType, inputHObjs, outputHObjs, context));
+		}
+
+		UserGroupInformation ugi =  this.getCurrentUserGroupInfo();
+
+		List<XaHiveObjectAccessInfo> objAccessList = getObjectAccessInfo(hiveOpType, inputHObjs, outputHObjs, context);
+
+		for(XaHiveObjectAccessInfo objAccessInfo : objAccessList) {
+			boolean ret = mHiveAccessVerifier.isAccessAllowed(ugi, objAccessInfo);
+
+			if(! ret) {
+				if(mHiveAccessVerifier.isAudited(objAccessInfo)) {
+					logAuditEvent(ugi, objAccessInfo, false);
+				}
+				
+				String deniedObjectName = objAccessInfo.getDeinedObjectName();
+				
+				if(StringUtil.isEmpty(deniedObjectName)) {
+					deniedObjectName = objAccessInfo.getObjectName();
+				}
+
+				throw new HiveAccessControlException(String.format("Permission denied: user [%s] does not have [%s] privilege on [%s]",
+													 ugi.getShortUserName(), objAccessInfo.getAccessType().name(), deniedObjectName));
+			}
+		}
+
+		// access is allowed; audit all accesses
+		for(XaHiveObjectAccessInfo objAccessInfo : objAccessList) {
+			if(mHiveAccessVerifier.isAudited(objAccessInfo)) {
+				logAuditEvent(ugi, objAccessInfo, true);
+			}
+		}
+	}
+	
+	private List<XaHiveObjectAccessInfo> getObjectAccessInfo(HiveOperationType         hiveOpType,
+														   List<HivePrivilegeObject> inputsHObjs,
+														   List<HivePrivilegeObject> outputHObjs,
+														   HiveAuthzContext          context) {
+		List<XaHiveObjectAccessInfo> ret = new ArrayList<XaHiveObjectAccessInfo>();
+
+		if(inputsHObjs != null) {
+			for(HivePrivilegeObject hiveObj : inputsHObjs) {
+				XaHiveObjectAccessInfo hiveAccessObj = getObjectAccessInfo(hiveOpType, hiveObj, context, true);
+				
+				if(hiveAccessObj != null && !ret.contains(hiveAccessObj)) {
+					ret.add(hiveAccessObj);
+				}
+			}
+		}
+
+		if(outputHObjs != null) {
+			for(HivePrivilegeObject hiveObj : outputHObjs) {
+				XaHiveObjectAccessInfo hiveAccessObj = getObjectAccessInfo(hiveOpType, hiveObj, context, false);
+				
+				if(hiveAccessObj != null && !ret.contains(hiveAccessObj)) {
+					ret.add(hiveAccessObj);
+				}
+			}
+		}
+
+		if(ret.size() == 0 && LOG.isDebugEnabled()) {
+			LOG.debug("getObjectAccessInfo(): no objects found for access check! " + toString(hiveOpType, inputsHObjs, outputHObjs, context));
+		}
+		
+		return ret;
+	}
+
+	private XaHiveObjectAccessInfo getObjectAccessInfo(HiveOperationType hiveOpType, HivePrivilegeObject hiveObj, HiveAuthzContext context, boolean isInput) {
+		XaHiveObjectAccessInfo ret = null;
+
+		HiveObjectType objectType = getObjectType(hiveObj, hiveOpType);
+		HiveAccessType accessType = getAccessType(hiveObj, hiveOpType, isInput);
+		String         operType   = hiveOpType.name();
+		
+		XaHiveAccessContext hiveContext = new XaHiveAccessContext(context.getIpAddress(), context.getClientType().name(), context.getCommandString(), context.getSessionString());
+
+		switch(objectType) {
+			case DATABASE:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname());
+			break;
+	
+			case TABLE:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), HiveObjectType.TABLE, hiveObj.getObjectName());
+			break;
+	
+			case VIEW:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), HiveObjectType.VIEW, hiveObj.getObjectName());
+			break;
+	
+			case PARTITION:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), HiveObjectType.PARTITION, hiveObj.getObjectName());
+			break;
+	
+			case INDEX:
+				String indexName = "?"; // TODO:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), hiveObj.getObjectName(), HiveObjectType.INDEX, indexName);
+			break;
+	
+			case COLUMN:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), hiveObj.getObjectName(), hiveObj.getColumns());
+			break;
+
+			case FUNCTION:
+				ret = new XaHiveObjectAccessInfo(operType, hiveContext, accessType, hiveObj.getDbname(), HiveObjectType.FUNCTION, hiveObj.getObjectName());
+			break;
+	
+			case NONE:
+			break;
+		}
+
+		return ret;
+	}
+
+	private HiveObjectType getObjectType(HivePrivilegeObject hiveObj, HiveOperationType hiveOpType) {
+		HiveObjectType objType = HiveObjectType.NONE;
+
+		switch(hiveObj.getType()) {
+			case DATABASE:
+				objType = HiveObjectType.DATABASE;
+			break;
+
+			case PARTITION:
+				objType = HiveObjectType.PARTITION;
+			break;
+
+			case TABLE_OR_VIEW:
+				String hiveOpTypeName = hiveOpType.name().toLowerCase();
+				if(hiveOpTypeName.contains("index")) {
+					objType = HiveObjectType.INDEX;
+				} else if(! StringUtil.isEmpty(hiveObj.getColumns())) {
+					objType = HiveObjectType.COLUMN;
+				} else if(hiveOpTypeName.contains("view")) {
+					objType = HiveObjectType.VIEW;
+				} else {
+					objType = HiveObjectType.TABLE;
+				}
+			break;
+
+			case FUNCTION:
+				objType = HiveObjectType.FUNCTION;
+			break;
+
+			case DFS_URI:
+			case LOCAL_URI:
+			case COMMAND_PARAMS:
+			case GLOBAL:
+			break;
+
+			case COLUMN:
+				// Thejas: this value is unused in Hive; the case should not be hit.
+			break;
+		}
+
+		return objType;
+	}
+	
+	private HiveAccessType getAccessType(HivePrivilegeObject hiveObj, HiveOperationType hiveOpType, boolean isInput) {
+		HiveAccessType           accessType       = HiveAccessType.NONE;
+		HivePrivObjectActionType objectActionType = hiveObj.getActionType();
+		
+		if(objectActionType == HivePrivObjectActionType.INSERT ||
+		   objectActionType == HivePrivObjectActionType.INSERT_OVERWRITE) {
+			accessType = HiveAccessType.INSERT;
+		} else {
+			switch(hiveOpType) {
+				case CREATEDATABASE:
+					if(hiveObj.getType() == HivePrivilegeObjectType.DATABASE) {
+						accessType = HiveAccessType.CREATE;
+					}
+				break;
+
+				case CREATEFUNCTION:
+					if(hiveObj.getType() == HivePrivilegeObjectType.FUNCTION) {
+						accessType = HiveAccessType.CREATE;
+					}
+				break;
+
+				case CREATETABLE:
+				case CREATEVIEW:
+				case CREATETABLE_AS_SELECT:
+					if(hiveObj.getType() == HivePrivilegeObjectType.TABLE_OR_VIEW) {
+						accessType = isInput ? HiveAccessType.SELECT : HiveAccessType.CREATE;
+					}
+				break;
+
+				case ALTERDATABASE:
+				case ALTERDATABASE_OWNER:
+				case ALTERINDEX_PROPS:
+				case ALTERINDEX_REBUILD:
+				case ALTERPARTITION_BUCKETNUM:
+				case ALTERPARTITION_FILEFORMAT:
+				case ALTERPARTITION_LOCATION:
+				case ALTERPARTITION_MERGEFILES:
+				case ALTERPARTITION_PROTECTMODE:
+				case ALTERPARTITION_SERDEPROPERTIES:
+				case ALTERPARTITION_SERIALIZER:
+				case ALTERTABLE_ADDCOLS:
+				case ALTERTABLE_ADDPARTS:
+				case ALTERTABLE_ARCHIVE:
+				case ALTERTABLE_BUCKETNUM:
+				case ALTERTABLE_CLUSTER_SORT:
+				case ALTERTABLE_COMPACT:
+				case ALTERTABLE_DROPPARTS:
+				case ALTERTABLE_FILEFORMAT:
+				case ALTERTABLE_LOCATION:
+				case ALTERTABLE_MERGEFILES:
+				case ALTERTABLE_PARTCOLTYPE:
+				case ALTERTABLE_PROPERTIES:
+				case ALTERTABLE_PROTECTMODE:
+				case ALTERTABLE_RENAME:
+				case ALTERTABLE_RENAMECOL:
+				case ALTERTABLE_RENAMEPART:
+				case ALTERTABLE_REPLACECOLS:
+				case ALTERTABLE_SERDEPROPERTIES:
+				case ALTERTABLE_SERIALIZER:
+				case ALTERTABLE_SKEWED:
+				case ALTERTABLE_TOUCH:
+				case ALTERTABLE_UNARCHIVE:
+				case ALTERTBLPART_SKEWED_LOCATION:
+				case ALTERVIEW_PROPERTIES:
+				case ALTERVIEW_RENAME:
+				case DROPVIEW_PROPERTIES:
+					accessType = HiveAccessType.ALTER;
+				break;
+
+				case DELETE:
+				case DROPFUNCTION:
+				case DROPINDEX:
+				case DROPTABLE:
+				case DROPVIEW:
+				case DROPDATABASE:
+					accessType = HiveAccessType.DROP;
+				break;
+
+				case CREATEINDEX:
+					accessType = HiveAccessType.INDEX;
+				break;
+
+				case IMPORT:
+				case LOAD:
+					accessType = HiveAccessType.INSERT;
+				break;
+
+				case LOCKDB:
+				case LOCKTABLE:
+				case UNLOCKDB:
+				case UNLOCKTABLE:
+					accessType = HiveAccessType.LOCK;
+				break;
+
+				case EXPORT:
+				case QUERY:
+					accessType = HiveAccessType.SELECT;
+				break;
+
+				case SWITCHDATABASE:
+					accessType = HiveAccessType.USE;
+				break;
+
+				case TRUNCATETABLE:
+					accessType = HiveAccessType.UPDATE;
+				break;
+
+				case ADD:
+				case ANALYZE_TABLE:
+				case COMPILE:
+				case CREATEMACRO:
+				case CREATEROLE:
+				case DESCDATABASE:
+				case DESCFUNCTION:
+				case DESCTABLE:
+				case DFS:
+				case DROPMACRO:
+				case DROPROLE:
+				case EXPLAIN:
+				case GRANT_PRIVILEGE:
+				case GRANT_ROLE:
+				case MSCK:
+				case REVOKE_PRIVILEGE:
+				case REVOKE_ROLE:
+				case RESET:
+				case SET:
+				case SHOWCOLUMNS:
+				case SHOWCONF:
+				case SHOWDATABASES:
+				case SHOWFUNCTIONS:
+				case SHOWINDEXES:
+				case SHOWLOCKS:
+				case SHOWPARTITIONS:
+				case SHOWTABLES:
+				case SHOW_COMPACTIONS:
+				case SHOW_CREATETABLE:
+				case SHOW_GRANT:
+				case SHOW_ROLES:
+				case SHOW_ROLE_GRANT:
+				case SHOW_ROLE_PRINCIPALS:
+				case SHOW_TABLESTATUS:
+				case SHOW_TBLPROPERTIES:
+				case SHOW_TRANSACTIONS:
+				break;
+			}
+		}
+		
+		return accessType;
+	}
+
+	private void logAuditEvent(UserGroupInformation ugi, XaHiveObjectAccessInfo objAccessInfo, boolean accessGranted) {
+		
+		HiveAuditEvent auditEvent = new HiveAuditEvent();
+
+		try {
+			auditEvent.setAclEnforcer(XaSecureModuleName);
+			auditEvent.setSessionId(objAccessInfo.getContext().getSessionString());
+			auditEvent.setResourceType("@" + StringUtil.toLower(objAccessInfo.getObjectType().name())); // to be consistent with earlier release
+			auditEvent.setAccessType(objAccessInfo.getAccessType().toString());
+			auditEvent.setAction(objAccessInfo.getOperType());
+			auditEvent.setUser(ugi.getShortUserName());
+			auditEvent.setAccessResult((short)(accessGranted ? 1 : 0));
+			auditEvent.setClientIP(objAccessInfo.getContext().getClientIpAddress());
+			auditEvent.setClientType(objAccessInfo.getContext().getClientType());
+			auditEvent.setEventTime(StringUtil.getUTCDate());
+			auditEvent.setRepositoryType(EnumRepositoryType.HIVE);
+			auditEvent.setRepositoryName(repositoryName) ;
+			auditEvent.setRequestData(objAccessInfo.getContext().getCommandString());
+
+			if(! accessGranted && !StringUtil.isEmpty(objAccessInfo.getDeinedObjectName())) {
+				auditEvent.setResourcePath(objAccessInfo.getDeinedObjectName());
+			} else {
+				auditEvent.setResourcePath(objAccessInfo.getObjectName());
+			}
+		
+			if(LOG.isDebugEnabled()) {
+				LOG.debug("logAuditEvent [" + auditEvent + "] - START");
+			}
+
+			AuditProviderFactory.getAuditProvider().log(auditEvent);
+
+			if(LOG.isDebugEnabled()) {
+				LOG.debug("logAuditEvent [" + auditEvent + "] - END");
+			}
+		}
+		catch(Throwable t) {
+			LOG.error("ERROR logEvent [" + auditEvent + "]", t);
+		}
+		
+	}
+	
+	private String toString(HiveOperationType         hiveOpType,
+							List<HivePrivilegeObject> inputHObjs,
+							List<HivePrivilegeObject> outputHObjs,
+							HiveAuthzContext          context) {
+		StringBuilder sb = new StringBuilder();
+		
+		sb.append("'checkPrivileges':{");
+		sb.append("'hiveOpType':").append(hiveOpType);
+
+		sb.append(", 'inputHObjs':[");
+		toString(inputHObjs, sb);
+		sb.append("]");
+
+		sb.append(", 'outputHObjs':[");
+		toString(outputHObjs, sb);
+		sb.append("]");
+
+		sb.append(", 'context':{");
+		sb.append("'clientType':").append(context.getClientType());
+		sb.append(", 'commandString':").append(context.getCommandString());
+		sb.append(", 'ipAddress':").append(context.getIpAddress());
+		sb.append(", 'sessionString':").append(context.getSessionString());
+		sb.append("}");
+
+		sb.append(", 'user':").append(this.getCurrentUserGroupInfo().getUserName());
+		sb.append(", 'groups':[").append(StringUtil.toString(this.getCurrentUserGroupInfo().getGroupNames())).append("]");
+
+		sb.append("}");
+
+		return sb.toString();
+	}
+
+	private StringBuilder toString(List<HivePrivilegeObject> privObjs, StringBuilder sb) {
+		if(privObjs != null && privObjs.size() > 0) {
+			toString(privObjs.get(0), sb);
+			for(int i = 1; i < privObjs.size(); i++) {
+				sb.append(",");
+				toString(privObjs.get(i), sb);
+			}
+		}
+		
+		return sb;
+	}
+
+	private StringBuilder toString(HivePrivilegeObject privObj, StringBuilder sb) {
+		sb.append("'HivePrivilegeObject':{");
+		sb.append("'type':").append(privObj.getType().toString());
+		sb.append(", 'dbName':").append(privObj.getDbname());
+		sb.append(", 'objectType':").append(privObj.getType());
+		sb.append(", 'objectName':").append(privObj.getObjectName());
+		sb.append(", 'columns':[").append(StringUtil.toString(privObj.getColumns())).append("]");
+		sb.append(", 'partKeys':[").append(StringUtil.toString(privObj.getPartKeys())).append("]");
+		sb.append(", 'commandParams':[").append(StringUtil.toString(privObj.getCommandParams())).append("]");
+		sb.append(", 'actionType':").append(privObj.getActionType().toString());
+		sb.append("}");
+
+		return sb;
+	}
+}


Mime
View raw message