ranger-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject [36/44] ARGUS-1. Initial code commit (Selvamohan Neethiraj via omalley)
Date Thu, 14 Aug 2014 20:50:47 GMT
http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClient.java
----------------------------------------------------------------------
diff --git a/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClient.java b/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClient.java
new file mode 100644
index 0000000..d9dea71
--- /dev/null
+++ b/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClient.java
@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.xasecure.knox.client;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.codehaus.jackson.JsonNode;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter;
+
+public class KnoxClient {
+
+	private static final String EXPECTED_MIME_TYPE = "application/json";
+	private static final Log LOG = LogFactory.getLog(KnoxClient.class);
+
+	private String knoxUrl;
+	private String userName;
+	private String password;
+	
+	/*
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/admin
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/sandbox
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp1
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp2
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/xa
+	*/
+	
+	public KnoxClient(String knoxUrl, String userName, String password) {
+		LOG.debug("Constructed KnoxClient with knoxUrl: " + knoxUrl +
+				", userName: " + userName);
+		this.knoxUrl = knoxUrl;
+		this.userName = userName;
+		this.password = password;
+	}
+
+	public  List<String> getTopologyList(String topologyNameMatching) {
+		
+		// sample URI: https://hdp.example.com:8443/gateway/admin/api/v1/topologies
+		LOG.debug("Getting Knox topology list for topologyNameMatching : " +
+				topologyNameMatching);
+		List<String> topologyList = new ArrayList<String>();
+		if ( topologyNameMatching == null ||  topologyNameMatching.trim().isEmpty()) {
+			topologyNameMatching = "";
+		}
+		try {
+
+			Client client = null;
+			ClientResponse response = null;
+
+			try {
+				client = Client.create();;
+				
+				client.addFilter(new HTTPBasicAuthFilter(userName, password));
+				WebResource webResource = client.resource(knoxUrl);
+				response = webResource.accept(EXPECTED_MIME_TYPE)
+					    .get(ClientResponse.class);
+				LOG.debug("Knox topology list response: " + response);
+				if (response != null) {
+
+					if (response.getStatus() == 200) {
+						String jsonString = response.getEntity(String.class);
+						LOG.debug("Knox topology list response JSON string: "+ jsonString);
+						
+						ObjectMapper objectMapper = new ObjectMapper();
+						
+						JsonNode rootNode = objectMapper.readTree(jsonString);
+						// JsonNode rootNode = objectMapper.readTree(getKnoxMockResponseTopologies());
+						
+						Iterator<JsonNode> elements = rootNode.getElements();
+						while (elements.hasNext()) {
+							JsonNode element = elements.next();
+							String topologyName = element.get("name").getValueAsText();
+							LOG.debug("Found Knox topologyName: " + topologyName);
+							if (topologyName.startsWith(topologyNameMatching)) {
+								topologyList.add(topologyName);
+							}
+						}
+					} else {
+						LOG.error("Got invalid  REST response from: "+ knoxUrl + ", responsStatus: " + response.getStatus());
+					}
+
+				} else {
+					LOG.error("Unable to get a valid response for isFileChanged()  call for ["
+							+ knoxUrl + "] - got null response.");
+				}
+
+			} finally {
+				if (response != null) {
+					response.close();
+				}
+				if (client != null) {
+					client.destroy();
+				}
+			}
+		} catch (Throwable t) {
+			LOG.error("Exception on REST call to: " + knoxUrl, t);
+			t.printStackTrace();
+		} finally {
+		}
+		return topologyList;
+	}
+
+	
+	public List<String> getServiceList(String topologyName, String serviceNameMatching) {
+		
+		// sample URI: .../admin/api/v1/topologies/<topologyName>
+		
+		List<String> serviceList = new ArrayList<String>();
+		if ( serviceNameMatching == null ||  serviceNameMatching.trim().isEmpty()) {
+			serviceNameMatching = "";
+		}
+		try {
+
+			Client client = null;
+			ClientResponse response = null;
+
+			try {
+				client = Client.create();;
+				
+				client.addFilter(new HTTPBasicAuthFilter(userName, password));
+				
+				WebResource webResource = client.resource(knoxUrl + "/" + topologyName);
+				// WebResource webResource = client.resource(knoxUrl);
+				
+				response = webResource.accept(EXPECTED_MIME_TYPE)
+					    .get(ClientResponse.class);
+				LOG.debug("Knox service lookup response: " + response);
+				if (response != null) {
+					
+					if (response.getStatus() == 200) {
+						String jsonString = response.getEntity(String.class);
+						LOG.debug("Knox service look up response JSON string: " + jsonString);
+						
+						ObjectMapper objectMapper = new ObjectMapper();
+						
+						JsonNode rootNode = objectMapper.readTree(jsonString);
+						//JsonNode rootNode = objectMapper.readTree(getKnoxMockResponseTopology());
+						
+						JsonNode servicesNode = rootNode.get("services");
+						Iterator<JsonNode> services = servicesNode.getElements();
+						while (services.hasNext()) {
+							JsonNode service = services.next();
+							String serviceName = service.get("role").getValueAsText();
+							LOG.debug("Knox serviceName: " + serviceName);
+							if (serviceName.startsWith(serviceNameMatching)) {
+								serviceList.add(serviceName);
+							}
+						}
+					} else {
+						LOG.error("Got invalid  REST response from: "+ knoxUrl + ", responsStatus: " + response.getStatus());
+					}
+
+				} else {
+					LOG.error("Unable to get a valid response for isFileChanged()  call for ["
+							+ knoxUrl + "] - got null response.");
+				}
+
+			} finally {
+				if (response != null) {
+					response.close();
+				}
+				if (client != null) {
+					client.destroy();
+				}
+			}
+		} catch (Throwable t) {
+			LOG.error("Exception on REST call to: " + knoxUrl, t);
+			t.printStackTrace();
+		} finally {
+		}
+		return serviceList;
+	}
+
+	
+
+	public static void main(String[] args) {
+
+		KnoxClient knoxClient = null;
+
+		if (args.length != 3) {
+			System.err.println("USAGE: java " + KnoxClient.class.getName()
+					+ " knoxUrl userName password [sslConfigFileName]");
+			System.exit(1);
+		}
+
+		try {
+			knoxClient = new KnoxClient(args[0], args[1], args[2]);
+			List<String> topologyList = knoxClient.getTopologyList("");
+			if ((topologyList == null) || topologyList.isEmpty()) {
+				System.out.println("No knox topologies found");
+			} else {
+				for (String topology : topologyList) {
+					System.out.println("Found Topology: " + topology);
+					List<String> serviceList = knoxClient.getServiceList(topology, "");
+					if ((serviceList == null) || serviceList.isEmpty()) {
+						System.out.println("No services found for knox topology: " + topology);
+					} else {
+						for (String service : serviceList) {
+							System.out.println("	Found service for topology: " + service +", " + topology);
+						}
+					}
+				}
+			}
+		} finally {
+		}
+	}
+
+	String getKnoxMockResponseTopologies() {
+		
+		// See https://docs.google.com/a/hortonworks.com/document/d/1fSs1xAMP2IeE24TOtywRrFMl81BHG72LeWu-O6WTq78/edit
+		return 		
+				"[" +
+		 		    "{" +
+		 		    	"  \"name\": \"hdp1\", " +
+		 		    	"  \"timestamp\": 1405540981000, " +
+		 		    	" \"href\": \"https://hdp.example.com:8443/gateway/admin/api/v1/topologies/hdp1\",  " +
+		 		    	" \"url\": \"https://hdp.example.com:8443/gateway/hdp1\" " +
+		 		    	"}, " +
+				    "{ " +
+				    	"  \"name\": \"hdp2\", " +
+				    	"  \"timestamp\": 1405540981000, " +
+				    	" \"href\": \"https://hdp.example.com:8443/gateway/admin/api/v1/topologies/hdp2\",  " +
+				    	" \"url\": \"https://hdp.example.com:8443/gateway/hdp2\" " +
+				    	"}" + 
+				"]";
+	}
+	
+	String getKnoxMockResponseTopology() {
+		
+		// See https://docs.google.com/a/hortonworks.com/document/d/1fSs1xAMP2IeE24TOtywRrFMl81BHG72LeWu-O6WTq78/edit
+		return  
+		"{" +
+			"\"name\": \"hdp1\"," + 
+			"\"providers\": [" +
+				"{" + 
+					"\"enabled\": true, " +
+					"\"name\": null, " +
+					"\"params\": {" +
+						"\"main.ldapRealm\": \"org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm\", " +
+						"\"main.ldapRealm.userDnTemplate\": \"uid={0},ou=people,dc=hadoop,dc=apache,dc=org\"," +
+         				"\"main.ldapRealm.contextFactory.url\": \"ldap://hdp.example.com:33389\"," +
+         				"\"main.ldapRealm.contextFactory.authenticationMechanism\": \"simple\"," +
+         				"\"urls./**\": \"authcBasic\"" +
+						"}, " +
+					"\"role\": \"authentication\"" +
+				"}, " +
+				 "{" +
+	                "\"enabled\": true," +
+	                "\"name\": \"Pseudo\"," +
+	               "\"params\": {" + "}," +
+	                "\"role\": \"identity-assertion\"" +
+	            "}, " +
+	            "{" +
+	                    "\"enabled\": false," +
+	                    "\"name\": null," +
+	                    "\"params\": {" + "}," +
+	                    "\"role\": null" +
+	              "}" +
+			"], " +
+			"\"services\": [" +
+				"{" +
+                 	"\"params\": {" + "}," +
+                 	"\"role\": \"KNOXADMIN\"," +
+                 	"\"url\": null" +
+                 	"}," +
+                 "{" +
+                 	"\"params\": {" + "}, " +
+                 	"\"role\": \"WEBHDFS\"," +
+                 	"\"url\": \"http://hdp.example.com:50070/webhdfs\"" +
+                 	"}" +
+                 	"]," +
+             "\"timestamp\": 1405541437000" +
+		"}";
+	}
+	
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClientTest.java
----------------------------------------------------------------------
diff --git a/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClientTest.java b/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClientTest.java
new file mode 100644
index 0000000..cd13f29
--- /dev/null
+++ b/lookup-client/src/main/java/com/xasecure/knox/client/KnoxClientTest.java
@@ -0,0 +1,34 @@
+package com.xasecure.knox.client;
+
+
+public class KnoxClientTest  {
+	
+	
+	/*
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/admin
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/sandbox
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp1
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/hdp2
+	 curl -ivk -u admin:admin-password https://localhost:8443/gateway/admin/api/v1/topologies/xa
+	*/
+	
+	public static void main(String[] args) {
+		System.out.println(System.getProperty("java.class.path"));
+		String[] testArgs = {
+				"https://localhost:8443/gateway/admin/api/v1/topologies",
+				"guest",
+				"guest-password"
+				};
+		KnoxClient.main(testArgs);
+	}
+	
+	
+}
+
+// http://hdp.example.com:6080/service/assets/hdfs/resources?dataSourceName=nn1&baseDirectory=%2F
+// http://hdp.example.com:6080/service/assets/knox/resources?dataSourceName=knox1&topology=%2F
+
+// com.xasecure.rest. AssetREST
+// com.xasecure.biz.AssetMgr
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/main/java/com/xasecure/knox/client/XaSecureConstants.java
----------------------------------------------------------------------
diff --git a/lookup-client/src/main/java/com/xasecure/knox/client/XaSecureConstants.java b/lookup-client/src/main/java/com/xasecure/knox/client/XaSecureConstants.java
new file mode 100644
index 0000000..191bebb
--- /dev/null
+++ b/lookup-client/src/main/java/com/xasecure/knox/client/XaSecureConstants.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.xasecure.knox.client;
+
+public class XaSecureConstants {
+	
+	// xasecure 2-way ssl configuration 
+
+	public static final String XASECURE_KNOX_CLIENT_KEY_FILE 						  = "xasecure.knoxclient.ssl.keystore";	
+	public static final String XASECURE_KNOX_CLIENT_KEY_FILE_PASSWORD				  = "xasecure.knoxclien.tssl.keystore.password";	
+	public static final String XASECURE_KNOX_CLIENT_KEY_FILE_TYPE 					  = "xasecure.knoxclient.ssl.keystore.type";	
+
+	public static final String XASECURE_KNOX_CLIENT_KEY_FILE_TYPE_DEFAULT 			  = "jks";	
+
+	public static final String XASECURE_KNOX_CLIENT_TRUSTSTORE_FILE					  = "xasecure.knoxclient.ssl.truststore";	
+	public static final String XASECURE_KNOX_CLIENT_TRUSTSTORE_FILE_PASSWORD		  = "xasecure.knoxclient.ssl.truststore.password";	
+	public static final String XASECURE_KNOX_CLIENT_TRUSTSTORE_FILE_TYPE			  = "xasecure.knoxclient.ssl.truststore.type";	
+
+	public static final String XASECURE_KNOX_CLIENT_TRUSTSTORE_FILE_TYPE_DEFAULT	  = "jks";	
+	
+	
+	public static final String XASECURE_SSL_KEYMANAGER_ALGO_TYPE					  = "SunX509" ;
+	public static final String XASECURE_SSL_TRUSTMANAGER_ALGO_TYPE					  = "SunX509" ;
+	public static final String XASECURE_SSL_CONTEXT_ALGO_TYPE						  = "SSL" ;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/log4j.xml
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/log4j.xml b/lookup-client/src/scripts/log4j.xml
new file mode 100644
index 0000000..20bb2db
--- /dev/null
+++ b/lookup-client/src/scripts/log4j.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="true">
+
+   <appender name="logFile" class="org.apache.log4j.DailyRollingFileAppender">
+        <param name="file" value="audit-test.log" />
+        <param name="DatePattern" value="'.'yyyy-MM-dd" />
+        <layout class="org.apache.log4j.PatternLayout">
+		<param name="ConversionPattern" value="%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"/>
+        </layout>
+   </appender>
+
+  <appender name="console" class="org.apache.log4j.ConsoleAppender"> 
+    <param name="Target" value="System.out"/> 
+    <layout class="org.apache.log4j.PatternLayout"> 
+	<param name="ConversionPattern" value="%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"/>
+    </layout> 
+  </appender> 
+
+  <root> 
+    <priority value ="info" /> 
+    <appender-ref ref="logFile" /> 
+  </root>
+  
+</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/run-audit-test.sh
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/run-audit-test.sh b/lookup-client/src/scripts/run-audit-test.sh
new file mode 100755
index 0000000..6725328
--- /dev/null
+++ b/lookup-client/src/scripts/run-audit-test.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+XA_AGENTS_DIR=$HOME/Hortonworks/git/xa-agents-2.1
+
+CONF_DIR=$XA_AGENTS_DIR/conf/hadoop
+HADOOP_LIB_DIR=$XA_AGENTS_DIR/lib/hadoop-hdp-2.0
+JPA_LIB_DIR=$XA_AGENTS_DIR/lib/jpa
+MYSQL_LIB_DIR=$XA_AGENTS_DIR/lib/mysql
+
+cp="$CONF_DIR:$HADOOP_LIB_DIR/*:$JPA_LIB_DIR/*:$MYSQL_LIB_DIR/*:$XA_AGENTS_DIR/dist/xasecure-audit.jar"
+export cp
+
+java -Xmx1024M -Xms1024M -cp "${cp}" com.xasecure.audit.test.TestEvents $*

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/run-hadoop-client.sh
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/run-hadoop-client.sh b/lookup-client/src/scripts/run-hadoop-client.sh
new file mode 100644
index 0000000..8ecc36b
--- /dev/null
+++ b/lookup-client/src/scripts/run-hadoop-client.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+case $# in
+4 )
+    java -cp "./dist/*:./lib/hadoop/*:./conf:." com.xasecure.hadoop.client.HadoopFSTester  "${1}" "${2}" "${3}" "${4}" ;;
+* )
+    java -cp "./dist/*:./lib/hadoop/*:./conf:." com.xasecure.hadoop.client.HadoopFSTester   ;;
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/run-hbase-client.sh
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/run-hbase-client.sh b/lookup-client/src/scripts/run-hbase-client.sh
new file mode 100644
index 0000000..97a339e
--- /dev/null
+++ b/lookup-client/src/scripts/run-hbase-client.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+cp="./dist/*:./conf/:.:./lib/hadoop/*:./lib/hive/*:./lib/hbase/*"
+
+case $# in
+2 )
+java ${JOPTS} -cp "${cp}" com.xasecure.hbase.client.HBaseClientTester  "${1}" "${2}" ;;
+3 )
+java  ${JOPTS} -cp "${cp}" com.xasecure.hbase.client.HBaseClientTester  "${1}" "${2}" "${3}" ;;
+4 )
+java ${JOPTS} -cp "${cp}" com.xasecure.hbase.client.HBaseClientTester  "${1}" "${2}" "${3}" "${4}" ;;
+* )
+java ${JOPTS} -cp "${cp}" com.xasecure.hbase.client.HBaseClientTester;;
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/run-hive-client.sh
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/run-hive-client.sh b/lookup-client/src/scripts/run-hive-client.sh
new file mode 100644
index 0000000..c653481
--- /dev/null
+++ b/lookup-client/src/scripts/run-hive-client.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+case $# in
+2 )
+	java -cp "./dist/*:./lib/hadoop/*:./lib/hive/*:./conf:."  com.xasecure.hive.client.HiveClientTester "$1" "${2}"  ;;
+3 )
+	java -cp "./dist/*:./lib/hadoop/*:./lib/hive/*:./conf:."  com.xasecure.hive.client.HiveClientTester "$1" "${2}" "${3}" ;;
+4 )
+	java -cp "./dist/*:./lib/hadoop/*:./lib/hive/*:./conf:."  com.xasecure.hive.client.HiveClientTester "$1" "${2}" "${3}" "${4}" ;;
+5 )
+	java -cp "./dist/*:./lib/hadoop/*:./lib/hive/*:./conf:."  com.xasecure.hive.client.HiveClientTester "$1" "${2}" "${3}" "${4}" "${5}" ;;
+* )
+	java -cp "./dist/*:./lib/hadoop/*:./lib/hive/*:./conf:."  com.xasecure.hive.client.HiveClientTester  ;;
+esac

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/lookup-client/src/scripts/xasecure-audit.properties
----------------------------------------------------------------------
diff --git a/lookup-client/src/scripts/xasecure-audit.properties b/lookup-client/src/scripts/xasecure-audit.properties
new file mode 100644
index 0000000..9989c00
--- /dev/null
+++ b/lookup-client/src/scripts/xasecure-audit.properties
@@ -0,0 +1,16 @@
+xasecure.audit.jpa.javax.persistence.jdbc.url=jdbc:mysql://localhost:3306/xasecure
+xasecure.audit.jpa.javax.persistence.jdbc.user=xalogger
+xasecure.audit.jpa.javax.persistence.jdbc.password=xalogger
+xasecure.audit.jpa.javax.persistence.jdbc.driver=com.mysql.jdbc.Driver
+	
+xasecure.audit.is.enabled=true
+xasecure.audit.log4j.is.enabled=false
+xasecure.audit.log4j.is.async=false
+xasecure.audit.log4j.async.max.queue.size=100000
+xasecure.audit.log4j.async.max.flush.interval.ms=30000
+xasecure.audit.db.is.enabled=true
+xasecure.audit.db.is.async=true
+xasecure.audit.db.async.max.queue.size=102400
+xasecure.audit.db.async.resume.queue.size=92400
+xasecure.audit.db.async.max.flush.interval.ms=30000
+xasecure.audit.db.batch.size=100

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..1f93d7f
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,201 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>com.hortonworks.hadoop.security</groupId>
+  <artifactId>argus</artifactId>
+  <version>3.5.000</version>
+  <description>Security for Enforcing Enterprise Policies</description>
+  <packaging>pom</packaging>
+  <name>Project Argus</name>
+  <prerequisites>
+	<maven>2.2.1</maven>
+  </prerequisites>
+  <modules>
+  <module>agents-audit</module>
+  <module>agents-common</module>
+  <module>agents-cred</module>
+  <module>agents-impl</module>
+  <module>agents-installer</module>
+  <module>credentialbuilder</module>
+  <module>embededwebserver</module>
+  <module>hbase-agent</module>
+  <module>hdfs-agent</module>
+  <module>hive-agent</module>
+  <module>knox-agent</module>
+  <module>lookup-client</module>
+  <module>security-admin</module>
+  <module>ugsync</module>
+  <module>unixauthclient</module>
+  <!--
+  <module>unixauthnative</module>
+  -->
+  <module>unixauthservice</module>
+  <!--
+  <module>winpkg</module>
+  -->
+  </modules>
+  <properties>
+<antlr.version>3.5.2</antlr.version>
+<aopalliance.version>1.0</aopalliance.version>
+<asm.version>3.1</asm.version>
+<c3p0.version>0.9.1.2</c3p0.version>
+<cglib.version>2.2.0-b23</cglib.version>
+<codehaus.jackson.version>1.8.1</codehaus.jackson.version>
+<commons.beanutils.collections.version>1.8.3</commons.beanutils.collections.version>
+<commons.beanutils.core.version>1.8.3</commons.beanutils.core.version>
+<commons.cli.version>1.2</commons.cli.version>
+<commons.codec.version>1.9</commons.codec.version>
+<commons.collections.version>3.2.1</commons.collections.version>
+<commons.compress.version>1.8.1</commons.compress.version>
+<commons.configuration.version>1.10</commons.configuration.version>
+<commons.dbcp.version>1.4</commons.dbcp.version>
+<commons.digester.version>2.1</commons.digester.version>
+<commons.httpclient.version>3.1</commons.httpclient.version>
+<commons.io.version>2.4</commons.io.version>
+<commons.lang.version>2.6</commons.lang.version>
+<commons.logging.version>1.2</commons.logging.version>
+<commons.math.version>2.2</commons.math.version>
+<commons.net.version>3.3</commons.net.version>
+<commons.pool.version>1.6</commons.pool.version>
+<eclipse.jpa.version>2.5.2-M1</eclipse.jpa.version>
+<googlecode.log4jdbc.version>1.2</googlecode.log4jdbc.version>
+<google.guava.version>17.0</google.guava.version>
+<gson.version>2.2.4</gson.version>
+<guava.version>11.0.2</guava.version>
+<hadoop-auth.version>2.2.0</hadoop-auth.version>
+<hadoop-common.version>3.0.0-SNAPSHOT</hadoop-common.version>
+<hadoop.version>2.4.0</hadoop.version>
+<hamcrest.all.version>1.3</hamcrest.all.version>
+<hbase.version>0.96.2-hadoop2</hbase.version>
+<hive.version>0.13.0</hive.version>
+<javassist.version>3.12.1.GA</javassist.version>
+<javax.persistence.version>2.1.0</javax.persistence.version>
+<javax.servlet.version>3.1.0</javax.servlet.version>
+<jericho.html.version>3.3</jericho.html.version>
+<jersey-bundle.version>1.17.1</jersey-bundle.version>
+<junit.version>4.11</junit.version>
+<knox.gateway.version>0.5.0-SNAPSHOT</knox.gateway.version>
+<local.lib.dir>${project.basedir}/../lib/local</local.lib.dir>
+<log4j.version>1.2.17</log4j.version>
+<mysql-connector-java.version>5.1.31</mysql-connector-java.version>
+<owasp-java-html-sanitizer.version>r239</owasp-java-html-sanitizer.version>
+<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+<security-agent-install-dir>hadoop-security/agents</security-agent-install-dir>
+<slf4j-api.version>1.7.5</slf4j-api.version>
+<springframework.security.version>3.1.0.RELEASE</springframework.security.version>
+<springframework.test.version>3.1.1.RELEASE</springframework.test.version>
+<springframework.version>3.1.3.RELEASE</springframework.version>
+<spring-ldap-core.version>1.3.1.RELEASE</spring-ldap-core.version>
+<sun.jersey.bundle.version>1.4</sun.jersey.bundle.version>
+<sun.jersey.core.version>1.4</sun.jersey.core.version>
+<sun.jersey.spring.version>1.4</sun.jersey.spring.version>
+<tomcat.commons.el.version>5.5.23</tomcat.commons.el.version>
+<tomcat.embed.version>7.0.55</tomcat.embed.version>
+<velocity.version>1.7</velocity.version>
+  </properties>
+  <repositories>
+          <repository>
+            <id>apache.snapshots.https</id>
+            <name>Apache Development Snapshot Repository</name>
+            <url>https://repository.apache.org/content/repositories/snapshots</url>
+            <snapshots>
+                <enabled>true</enabled>
+            </snapshots>
+          </repository>
+  </repositories>
+  <build>
+  	<pluginManagement>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>1.6</source>
+          <target>1.6</target>
+        </configuration>
+      </plugin>
+      <plugin>
+		    <groupId>org.apache.maven.plugins</groupId>
+		    <artifactId>maven-resources-plugin</artifactId>
+		    <version>2.4.3</version>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>2.17</version>
+        <configuration>
+          <skipTests>true</skipTests>
+        </configuration>
+      </plugin>
+      <plugin>
+         <artifactId>maven-antrun-plugin</artifactId>
+         <version>1.7</version>
+         <executions>
+           <execution>
+             <phase>process-resources</phase>
+             <configuration>
+               <tasks>
+                  <echo message="${project.version}" file="${project.build.directory}/version" />
+               </tasks>
+             </configuration>
+             <goals>
+               <goal>run</goal>
+             </goals>
+           </execution>
+         </executions>
+      </plugin>
+<!--
+      <plugin>
+        <groupId>com.google.code.maven-replacer-plugin</groupId>
+            <artifactId>maven-replacer-plugin</artifactId>
+            <version>1.4.0</version>
+            <executions>
+                <execution>
+                    <phase>process-sources</phase>
+                    <goals>
+                        <goal>replace</goal>
+                    </goals>
+                </execution>
+            </executions>
+            <configuration>
+                <file>${project.basedir}/templates/version</file>
+                <ignoreMissingFile>true</ignoreMissingFile>
+                <outputFile>${project.build.outputDirectory}/version</outputFile>
+                <replacements>
+                    <replacement>
+                        <token>@build.version@</token>
+                        <value>${project.version}</value>
+                    </replacement>
+                </replacements>
+            </configuration>
+      </plugin>
+-->
+      <plugin>
+         <artifactId>maven-assembly-plugin</artifactId>
+         <version>2.2-beta-5</version>
+         <configuration>
+           <descriptors>
+             <descriptor>src/main/assembly/hdfs-agent.xml</descriptor>
+             <descriptor>src/main/assembly/hive-agent.xml</descriptor>
+             <descriptor>src/main/assembly/hbase-agent.xml</descriptor>
+             <descriptor>src/main/assembly/knox-agent.xml</descriptor>
+             <descriptor>src/main/assembly/admin-web.xml</descriptor>
+             <descriptor>src/main/assembly/usersync.xml</descriptor>
+           </descriptors>
+         </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-release-plugin</artifactId>
+        <version>2.5</version>
+      </plugin>
+	  <!--
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.1-alpha-4</version>
+      </plugin>
+	-->
+    </plugins>
+    </pluginManagement>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_dbversion_catalog.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_dbversion_catalog.sql b/security-admin/db/create_dbversion_catalog.sql
new file mode 100644
index 0000000..1475da6
--- /dev/null
+++ b/security-admin/db/create_dbversion_catalog.sql
@@ -0,0 +1,9 @@
+create table if not exists x_db_version_h  (
+	id				bigint not null auto_increment primary key,
+	version   		varchar(64) not null,
+	inst_at 	    timestamp not null default current_timestamp,
+	inst_by 	    varchar(256) not null,
+	updated_at      timestamp not null,
+    updated_by      varchar(256) not null,
+	active          ENUM('Y', 'N') default 'Y'
+) ;

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_dev_user.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/create_dev_user.sh b/security-admin/db/create_dev_user.sh
new file mode 100755
index 0000000..17543e3
--- /dev/null
+++ b/security-admin/db/create_dev_user.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# 
+# Script to reset mysql database
+#
+
+if [ $# -lt 1 ]; then
+	echo "Usage: $0 <db_root_password> [db_host]"
+	exit 1
+fi
+
+db_root_password=$1
+db_host="localhost"
+if [ "$2" != "" ]; then
+    db_host="$2"
+fi
+
+echo "Creating user  ...  "
+set -x
+mysql -u root  --password=$db_root_password < create_dev_user.sql

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_dev_user.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_dev_user.sql b/security-admin/db/create_dev_user.sql
new file mode 100644
index 0000000..7bb51d5
--- /dev/null
+++ b/security-admin/db/create_dev_user.sql
@@ -0,0 +1,6 @@
+create user 'xaadmin'@'%' identified by 'xaadmin';
+GRANT ALL ON *.* TO 'xaadmin'@'localhost' IDENTIFIED BY 'xaadmin';
+grant all privileges on *.* to 'xaadmin'@'%' with grant option;
+grant all privileges on *.* to 'xaadmin'@'localhost' with grant option;
+FLUSH PRIVILEGES;
+

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_repo_hbase.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_repo_hbase.sql b/security-admin/db/create_repo_hbase.sql
new file mode 100644
index 0000000..d586ff2
--- /dev/null
+++ b/security-admin/db/create_repo_hbase.sql
@@ -0,0 +1,72 @@
+# Replace the following:
+#  %REPOSITORY_NAME%
+#  %REPOSITORY_DESC%
+#  %USERNAME%
+#  %PASSWORD%
+#  %FS_DEFAULT_NAME%
+#  %HADOOP_SECURITY_AUTHORIZATION%
+#  %HADOOP_SECURITY_AUTHENTICATION%
+#  %HADOOP_SECURITY_AUTH_TO_LOCAL%
+#  %DFS_DATANODE_KERBEROS_PRINCIPAL%
+#  %DFS_NAMENODE_KERBEROS_PRINCIPAL%
+#  %DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL%
+#  %HBASE_MASTER_KERBEROS_PRINCIPAL%
+#  %HBASE_RPC_ENGINE%
+#  %HBASE_RPC_PROTECTION%
+#  %HBASE_SECURITY_AUTHENTICATION%
+#  %HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT%
+#  %HBASE_ZOOKEEPER_QUORUM%
+#  %ZOOKEEPER_ZNODE_PARENT%
+#  %COMMON_NAME_FOR_CERTIFICATE%
+#
+# Example:
+#  INSERT INTO x_asset (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+#   VALUES ('hbasetest', 'hbase test repository', 1, 2, '{\"username\":\"policymgr\",\"password\":\"policymgr\",\"fs.default.name\":\"hdfs://sandbox.hortonworks.com:8020\",\"hadoop.security.authorization\":\"true\",\"hadoop.security.authentication\":\"simple\",\"hadoop.security.auth_to_local\":\"\",\"dfs.datanode.kerberos.principal\":\"\",\"dfs.namenode.kerberos.principal\":\"\",\"dfs.secondary.namenode.kerberos.principal\":\"\",\"hbase.master.kerberos.principal\":\"\",\"hbase.rpc.engine\":\"org.apache.hadoop.hbase.ipc.SecureRpcEngine%\",\"hbase.rpc.protection\":\"PRIVACY\",\"hbase.security.authentication\":\"simple\",\"hbase.zookeeper.property.clientPort\":\"2181\",\"hbase.zookeeper.quorum\":\"sandbox.hortonworks.com\",\"zookeeper.znode.parent\":\"/hbase-unsecure\",\"commonNameForCertificate\":\"\"}', now(), now(), 1, 1);
+#
+
+# create the repository
+INSERT INTO x_asset (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+ VALUES ('%REPOSITORY_NAME%', '%REPOSITORY_DESC%', 1, 2, '{\"username\":\"%USERNAME%\",\"password\":\"%PASSWORD%\",\"fs.default.name\":\"%FS_DEFAULT_NAME%\",\"hadoop.security.authorization\":\"%HADOOP_SECURITY_AUTHORIZATION%\",\"hadoop.security.authentication\":\"%HADOOP_SECURITY_AUTHENTICATION%\",\"hadoop.security.auth_to_local\":\"%HADOOP_SECURITY_AUTH_TO_LOCAL%\",\"dfs.datanode.kerberos.principal\":\"%DFS_DATANODE_KERBEROS_PRINCIPAL%\",\"dfs.namenode.kerberos.principal\":\"%DFS_NAMENODE_KERBEROS_PRINCIPAL%\",\"dfs.secondary.namenode.kerberos.principal\":\"%DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL%\",\"hbase.master.kerberos.principal\":\"%HBASE_MASTER_KERBEROS_PRINCIPAL%\",\"hbase.rpc.engine\":\"%HBASE_RPC_ENGINE%\",\"hbase.rpc.protection\":\"%HBASE_RPC_PROTECTION%\",\"hbase.security.authentication\":\"%HBASE_SECURITY_AUTHENTICATION%\",\"hbase.zookeeper.property.clientPort\":\"%HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT%\",\"hbase.zookeeper.quorum\":\"%HBASE_ZOOKEEPER_QUORUM%\",\"zookee
 per.znode.parent\":\"%ZOOKEEPER_ZNODE_PARENT%\",\"commonNameForCertificate\":\"%COMMON_NAME_FOR_CERTIFICATE%\"}', now(), now(), 1, 1);
+SELECT @asset_id := id FROM x_asset WHERE asset_name='%REPOSITORY_NAME%' and act_status = 1;
+
+# create default policy to allow access to public
+INSERT INTO x_resource (policy_name, res_name, descr, res_type, asset_id, is_encrypt, is_recursive, res_tables, res_col_fams, res_cols, res_status, table_type, col_type, create_time, update_time, added_by_id, upd_by_id) 
+ VALUES ('default-hbase', '/*/*/*', 'Default policy', 1, @asset_id, 2, 0, '*', '*', '*', 1, 0, 0, now(), now(), 1, 1);
+SELECT @resource_id := id FROM x_resource WHERE policy_name='default-hbase';
+
+DELIMITER //
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+CREATE PROCEDURE CreateXAGroup(in groupName varchar(1024))
+BEGIN
+  DECLARE groupId bigint(20);
+
+  SELECT g.id INTO groupId FROM x_group g WHERE g.group_name = groupName;
+
+  IF groupId IS NULL THEN
+	SELECT CONCAT('Creating group ', groupName);
+    INSERT INTO x_group (group_name, descr, status, group_type, create_time, update_time, added_by_id, upd_by_id) VALUES (groupName, groupName, 0, 1, now(), now(), 1, 1);
+  ELSE
+    SELECT CONCAT('Group ', groupName, ' already exists');
+  END IF;
+END //
+DELIMITER ;
+CALL CreateXAGroup('public');
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+
+SELECT @group_public := id FROM x_group WHERE group_name='public';
+
+SELECT @perm_read   := 2;
+SELECT @perm_write  := 3;
+SELECT @perm_create := 4;
+SELECT @perm_admin  := 6;
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_read, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_write, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_create, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_admin, now(), 0, 1, 1, now(), now(), 1, 1);
+
+# Enable auditing
+INSERT INTO x_audit_map (res_id, audit_type, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, 1, now(), now(), 1, 1);

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_repo_hdfs.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_repo_hdfs.sql b/security-admin/db/create_repo_hdfs.sql
new file mode 100644
index 0000000..3e5d060
--- /dev/null
+++ b/security-admin/db/create_repo_hdfs.sql
@@ -0,0 +1,64 @@
+# Replace the following:
+#  %REPOSITORY_NAME%
+#  %REPOSITORY_DESC%
+#  %USERNAME%
+#  %PASSWORD%
+#  %FS_DEFAULT_NAME%
+#  %HADOOP_SECURITY_AUTHORIZATION%
+#  %HADOOP_SECURITY_AUTHENTICATION%
+#  %HADOOP_SECURITY_AUTH_TO_LOCAL%
+#  %DFS_DATANODE_KERBEROS_PRINCIPAL%
+#  %DFS_NAMENODE_KERBEROS_PRINCIPAL%
+#  %DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL%
+#  %COMMON_NAME_FOR_CERTIFICATE%
+#
+# Example:
+#  INSERT INTO `x_asset` (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+#   VALUES ('hdfstest', 'hdfs test repository', 1, 1, '{\"username\":\"policymgr\",\"password\":\"policymgr\",\"fs.default.name\":\"hdfs://sandbox.hortonworks.com:8020\",\"hadoop.security.authorization\":\"true\",\"hadoop.security.authentication\":\"simple\",\"hadoop.security.auth_to_local\":\"\",\"dfs.datanode.kerberos.principal\":\"\",\"dfs.namenode.kerberos.principal\":\"\",\"dfs.secondary.namenode.kerberos.principal\":\"\",\"commonNameForCertificate\":\"\"}', now(), now(), 1, 1);
+#
+
+INSERT INTO `x_asset` (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+  VALUES ('%REPOSITORY_NAME%', '%REPOSITORY_DESC%', 1 ,1, '{\"username\":\"%USERNAME%\",\"password\":\"%PASSWORD%\",\"fs.default.name\":\"%FS_DEFAULT_NAME%\",\"hadoop.security.authorization\":\"%HADOOP_SECURITY_AUTHORIZATION%\",\"hadoop.security.authentication\":\"%HADOOP_SECURITY_AUTHENTICATION%\",\"hadoop.security.auth_to_local\":\"%HADOOP_SECURITY_AUTH_TO_LOCAL%\",\"dfs.datanode.kerberos.principal\":\"%DFS_DATANODE_KERBEROS_PRINCIPAL%\",\"dfs.namenode.kerberos.principal\":\"%DFS_NAMENODE_KERBEROS_PRINCIPAL%\",\"dfs.secondary.namenode.kerberos.principal\":\"%DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL%\",\"commonNameForCertificate\":\"%COMMON_NAME_FOR_CERTIFICATE%\"}', now(), now(), 1, 1);
+SELECT @asset_id := id FROM x_asset WHERE asset_name='%REPOSITORY_NAME%' and act_status = 1;
+
+# create default policy to allow access to public
+INSERT INTO x_resource (policy_name, res_name, descr, res_type, asset_id, is_encrypt, is_recursive, res_status, table_type, col_type, create_time, update_time, added_by_id, upd_by_id) 
+ VALUES ('default-hdfs', '/', 'Default policy', 1, @asset_id, 2, 1, 1, 0, 0, now(), now(), 1, 1);
+SELECT @resource_id := id FROM x_resource WHERE policy_name='default-hdfs';
+
+DELIMITER //
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+CREATE PROCEDURE CreateXAGroup(in groupName varchar(1024))
+BEGIN
+  DECLARE groupId bigint(20);
+
+  SELECT g.id INTO groupId FROM x_group g WHERE g.group_name = groupName;
+
+  IF groupId IS NULL THEN
+	SELECT CONCAT('Creating group ', groupName);
+    INSERT INTO x_group (group_name, descr, status, group_type, create_time, update_time, added_by_id, upd_by_id) VALUES (groupName, groupName, 0, 1, now(), now(), 1, 1);
+  ELSE
+    SELECT CONCAT('Group ', groupName, ' already exists');
+  END IF;
+END //
+DELIMITER ;
+CALL CreateXAGroup('public');
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+
+SELECT @group_public := id FROM x_group WHERE group_name='public';
+
+SELECT @perm_read    := 2;
+SELECT @perm_write   := 3;
+SELECT @perm_execute := 9;
+SELECT @perm_admin   := 6;
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_read, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_write, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_execute, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_admin, now(), 0, 1, 1, now(), now(), 1, 1);
+
+# Enable auditing
+INSERT INTO x_audit_map (res_id, audit_type, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, 1, now(), now(), 1, 1);

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_repo_hive.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_repo_hive.sql b/security-admin/db/create_repo_hive.sql
new file mode 100644
index 0000000..9743512
--- /dev/null
+++ b/security-admin/db/create_repo_hive.sql
@@ -0,0 +1,75 @@
+# Replace the following:
+#  %REPOSITORY_NAME%
+#  %REPOSITORY_DESC%
+#  %USERNAME%
+#  %PASSWORD%
+#  %JDBC_DRIVERCLASSNAME%
+#  %JDBC_URL%
+#  %COMMON_NAME_FOR_CERTIFICATE%
+#
+# Example:
+#  INSERT INTO `x_asset` (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+#   VALUES ('hivetest', 'hive test repo', 1, 3, '{\"username\":\"policymgr\",\"password\":\"policymgr\",\"jdbc.driverClassName\":\"org.apache.hive.jdbc.HiveDriver\",\"jdbc.url\":\"jdbc:hive2://sandbox.hortonworks.com:10000/default\",\"commonNameForCertificate\":\"\"}', now(), now(), 1, 1);
+#
+
+INSERT INTO `x_asset` (asset_name, descr, act_status, asset_type, config, create_time, update_time, added_by_id, upd_by_id)
+ VALUES ('%REPOSITORY_NAME%', '%REPOSITORY_DESC%', 1, 3, '{\"username\":\"%USERNAME%\",\"password\":\"%PASSWORD%\",\"jdbc.driverClassName\":\"%JDBC_DRIVERCLASSNAME%\",\"jdbc.url\":\"%JDBC_URL%\",\"commonNameForCertificate\":\"%COMMON_NAME_FOR_CERTIFICATE%\"}', now(), now(), 1, 1);
+SELECT @asset_id := id FROM x_asset WHERE asset_name='%REPOSITORY_NAME%' and act_status = 1;
+
+# create default policy to allow access to public
+INSERT INTO x_resource (policy_name, res_name, descr, res_type, asset_id, is_encrypt, is_recursive, res_dbs, res_tables, res_cols, res_status, table_type, col_type, create_time, update_time, added_by_id, upd_by_id) 
+ VALUES ('default-hive', '/*/*/*', 'Default policy', 1, @asset_id, 2, 0, '*', '*', '*', 1, 0, 0, now(), now(), 1, 1);
+SELECT @resource_id := id FROM x_resource WHERE policy_name='default-hive';
+
+DELIMITER //
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+CREATE PROCEDURE CreateXAGroup(in groupName varchar(1024))
+BEGIN
+  DECLARE groupId bigint(20);
+
+  SELECT g.id INTO groupId FROM x_group g WHERE g.group_name = groupName;
+
+  IF groupId IS NULL THEN
+	SELECT CONCAT('Creating group ', groupName);
+    INSERT INTO x_group (group_name, descr, status, group_type, create_time, update_time, added_by_id, upd_by_id) VALUES (groupName, groupName, 0, 1, now(), now(), 1, 1);
+  ELSE
+    SELECT CONCAT('Group ', groupName, ' already exists');
+  END IF;
+END //
+DELIMITER ;
+CALL CreateXAGroup('public');
+DROP PROCEDURE IF EXISTS CreateXAGroup;
+
+SELECT @group_public := id FROM x_group WHERE group_name='public';
+
+SELECT @perm_create := 4;
+SELECT @perm_select := 10;
+SELECT @perm_update := 11;
+SELECT @perm_drop   := 12;
+SELECT @perm_alter  := 13;
+SELECT @perm_index  := 14;
+SELECT @perm_lock   := 15;
+SELECT @perm_all    := 16;
+SELECT @perm_admin  := 6;
+
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_create, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_select, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_update, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_drop, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_alter, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_index, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_lock, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_all, now(), 0, 1, 1, now(), now(), 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, perm_group, is_recursive, is_wild_card, grant_revoke, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, @group_public, 2, @perm_admin, now(), 0, 1, 1, now(), now(), 1, 1);
+
+# Enable auditing
+INSERT INTO x_audit_map (res_id, audit_type, create_time, update_time, added_by_id, upd_by_id) VALUES (@resource_id, 1, now(), now(), 1, 1);

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/create_repo_knox.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/create_repo_knox.sql b/security-admin/db/create_repo_knox.sql
new file mode 100644
index 0000000..d778d88
--- /dev/null
+++ b/security-admin/db/create_repo_knox.sql
@@ -0,0 +1,245 @@
+#
+# create a demo repository, policy for Knox agent
+#
+# repository              -> x_asset
+# policy                  -> x_resource
+# users, groups in policy -> x_perm_map
+#
+# Replace the following:
+#  %REPOSITORY_NAME% 
+#  %REPOSITORY_DESC%
+#  %USERNAME%
+#  %PASSWORD%
+#  %JDBC_DRIVERCLASSNAME%
+#  %JDBC_URL%
+#  %COMMON_NAME_FOR_CERTIFICATE%
+
+# Create Repository
+# asset_name: repository name
+# descr: repository description
+# act_status: active status: 1 -> active
+# asset_type: asset type: 1 ->hdfs, 2 ->hbase, 3->hive, 4-> knox
+# config: config parameters for repository in json format
+INSERT INTO `x_asset` (
+     asset_name, 
+     descr, 
+     act_status, 
+     asset_type, 
+     config, 
+     create_time, 
+     update_time,
+     added_by_id, 
+     upd_by_id)
+   VALUES (
+    'knoxtest', 
+    'knox test repo', 
+    1, 
+    4, 
+    '{\"knox.admin.user\":\"guest\",\"knox.admin.password\":\"guest-password\",\"knox.url\":\"https://hdp.example.com:8443/gateway/hdp/webhdfs/v1?op=LISTSTATUS\",\"knox.cert.cn\":\"cn=knox\"}', 
+    now(), 
+    now(), 
+    1, 
+    1);
+
+# Create repostory
+# asset_name: repository name
+# descr: repository description
+# act_status: active status: 1 -> active
+# asset_type: asset type: 1 ->hdfs, 2 ->hbase, 3->hive, 4-> knox
+# config: config parameters for repository in json format
+# INSERT INTO `x_asset` (
+#   asset_name, 
+#  descr, 
+#  act_status, 
+#  asset_type, 
+#  config, 
+#  create_time, 
+#  update_time, 
+#  added_by_id, 
+#  upd_by_id)
+#VALUES (
+#  '%REPOSITORY_NAME%', 
+#  '%REPOSITORY_DESC%', 
+#  1, 
+#  3, 
+#  '{\"username\":\"%USERNAME%\",\"password\":\"%PASSWORD%\",\"jdbc.driverClassName\":\"%JDBC_DRIVERCLASSNAME%\",\"jdbc.url\":\"%JDBC_URL%\",\"commonNameForCertificate\":\"%COMMON_NAME_FOR_CERTIFICATE%\"}', 
+#  now(), 
+#  now(), 
+#  1, 
+#  1);
+
+SELECT @asset_id := id FROM x_asset WHERE asset_name='%REPOSITORY_NAME%' and act_status = 1;
+
+# create policy example
+# INSERT INTO x_resource (
+#   res_name, 
+#   descr, 
+#   res_type, 
+#   asset_id, 
+#   is_encrypt, 
+#   is_recursive, 
+#   res_dbs, 
+#   res_tables, 
+#   res_cols, 
+#   res_status, 
+#   table_type, 
+#   col_type, 
+#   create_time, 
+#   update_time, 
+#   added_by_id, 
+#   upd_by_id) 
+ #  VALUES ('/*/*/*', 'Default policy', 1, @asset_id, 2, 0, '*', '*', '*', 1, 0, 0, now(), now(), 1, 1);
+
+# create policy to allow access to public
+INSERT INTO x_resource (
+    policy_name,
+	res_name, 
+    descr, 
+    res_type, 
+    asset_id, 
+    is_encrypt, 
+    is_recursive, 
+    res_dbs, 
+    res_tables, 
+    res_cols, 
+    res_status, 
+    table_type, 
+    col_type, 
+    create_time, 
+    update_time, 
+    added_by_id, 
+    upd_by_id) 
+VALUES (
+    'default-knox', 
+    '/*/*/*', 
+    'Default policy', 
+    1, 
+    @asset_id, 
+    2, 
+    0, 
+    '*', 
+    '*', 
+    '*', 
+    1, 
+    0, 
+    0, 
+    now(), 
+    now(), 
+    1, 
+    1);
+
+SELECT @resource_id := id FROM x_resource WHERE policy_name='default-knox';
+
+
+DELIMITER //
+DROP PROCEDURE CreateXAGroup;
+CREATE PROCEDURE CreateXAGroup(in groupName varchar(1024))
+BEGIN
+   DECLARE groupId bigint(20);
+
+   SELECT g.id INTO groupId FROM x_group g WHERE g.group_name = groupName;
+
+   IF groupId IS NULL THEN
+      INSERT INTO x_group (
+          group_name, 
+          descr, 
+          status, 
+          group_type, 
+          group_src, 
+          create_time, 
+          update_time, 
+          added_by_id, 
+          upd_by_id) 
+      VALUES (
+          groupName, 
+          groupName, 
+          0, 
+          1, 
+          0, 
+          now(), 
+          now(), 
+          1, 
+          1);
+   END IF;
+END //
+
+
+DELIMITER ;
+CALL CreateXAGroup('public');
+
+SELECT @group_public := id FROM x_group WHERE group_name='public';
+
+SELECT @perm_create := 4;
+SELECT @perm_select := 10;
+SELECT @perm_update := 11;
+SELECT @perm_drop   := 12;
+SELECT @perm_alter  := 13;
+SELECT @perm_index  := 14;
+SELECT @perm_lock   := 15;
+SELECT @perm_all    := 16;
+SELECT @perm_admin  := 6;
+
+
+# add permitted users, groups to policy
+# res_id: policy id
+# perm_type: read | write | | execute | admin etc
+# perm_for: user | grouo
+# user_id: user id
+# group_id: group id
+# perm_group: not used
+INSERT INTO x_perm_map (
+    res_id, 
+    group_id, 
+    perm_for, 
+    perm_type, 
+    is_recursive, 
+    is_wild_card, 
+    grant_revoke) 
+  VALUES (
+    @resource_id, 
+    @group_public, 
+    2, 
+    @perm_create, 
+    0, 
+    1, 
+    1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_select, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_update, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_drop, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_alter, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_index, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_lock, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_all, 0, 1, 1);
+
+INSERT INTO x_perm_map (res_id, group_id, perm_for, perm_type, is_recursive, is_wild_card, grant_revoke) 
+                VALUES (@resource_id, @group_public, 2, @perm_admin, 0, 1, 1);
+
+# Enable auditing
+INSERT INTO x_audit_map (
+    res_id, 
+    audit_type, 
+    create_time, 
+    update_time, 
+    added_by_id, 
+    upd_by_id) 
+  VALUES (
+    @resource_id, 
+    1, 
+    now(), 
+    now(), 
+    1, 
+    1);

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/backup_mysql_db.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/backup_mysql_db.sh b/security-admin/db/init/backup_mysql_db.sh
new file mode 100755
index 0000000..48b8812
--- /dev/null
+++ b/security-admin/db/init/backup_mysql_db.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# 
+# Script to reset mysql database
+#
+
+if [ $# -lt 3 ]; then
+	echo "Usage: $0 <db_user> <db_password> <db_database> <output file>"
+	exit 1
+fi
+
+db_user=$1
+db_password=$2
+db_database=$3
+outfile=$4
+
+echo "Exporting $db_database ...  "
+mysqldump -u $db_user  --password=$db_password --add-drop-database --database $db_database > $outfile
+echo "Check output file $outfile"

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/create_dev_backup_mysql.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/create_dev_backup_mysql.sh b/security-admin/db/init/create_dev_backup_mysql.sh
new file mode 100755
index 0000000..149df8c
--- /dev/null
+++ b/security-admin/db/init/create_dev_backup_mysql.sh
@@ -0,0 +1 @@
+./backup_mysql_db.sh xaadmin xaadmin  xa_db ../xa_db_bare.sql

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/create_dev_backup_mysql_loaded.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/create_dev_backup_mysql_loaded.sh b/security-admin/db/init/create_dev_backup_mysql_loaded.sh
new file mode 100755
index 0000000..5545a9b
--- /dev/null
+++ b/security-admin/db/init/create_dev_backup_mysql_loaded.sh
@@ -0,0 +1 @@
+./backup_mysql_db.sh xaadmin xaadmin  xa_db ../xa_db.sql

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/create_xa_core_db.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/create_xa_core_db.sh b/security-admin/db/init/create_xa_core_db.sh
new file mode 100755
index 0000000..55e9cbe
--- /dev/null
+++ b/security-admin/db/init/create_xa_core_db.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# 
+# Script to reset mysql database
+#
+
+
+db_user=xaadmin
+db_password=xaadmin
+db_database=xa_db
+outfile=../xa_core_db.sql
+
+echo "Exporting $db_database ...  "
+mysqldump -u $db_user  --password=$db_password  $db_database > $outfile
+echo "Check output file $outfile"

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/mysql_seed_data.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/init/mysql_seed_data.sql b/security-admin/db/init/mysql_seed_data.sql
new file mode 100644
index 0000000..1a52ded
--- /dev/null
+++ b/security-admin/db/init/mysql_seed_data.sql
@@ -0,0 +1,25 @@
+insert into x_portal_user (
+       CREATE_TIME, UPDATE_TIME, 
+       FIRST_NAME, LAST_NAME, PUB_SCR_NAME, 
+       LOGIN_ID, PASSWORD, EMAIL, STATUS
+) values (
+  	 now(), now(), 
+	 'Admin', '', 'Admin', 
+	 'admin', 'ceb4f32325eda6142bd65215f4c0f371', '', 1
+);
+SET @user_id:= last_insert_id();
+
+insert into x_portal_user_role (
+       CREATE_TIME, UPDATE_TIME, 
+       USER_ID, USER_ROLE, STATUS
+) values (
+  	 now(), now(), 
+	 @user_id, 'ROLE_SYS_ADMIN', 1
+);
+SET @user_role_id:= last_insert_id();
+
+
+
+DROP TABLE IF EXISTS `vx_trx_log`;
+DROP VIEW IF EXISTS `vx_trx_log`;
+CREATE VIEW `vx_trx_log` AS select `x_trx_log`.`id` AS `id`,`x_trx_log`.`create_time` AS `create_time`,`x_trx_log`.`update_time` AS `update_time`,`x_trx_log`.`added_by_id` AS `added_by_id`,`x_trx_log`.`upd_by_id` AS `upd_by_id`,`x_trx_log`.`class_type` AS `class_type`,`x_trx_log`.`object_id` AS `object_id`,`x_trx_log`.`parent_object_id` AS `parent_object_id`,`x_trx_log`.`parent_object_class_type` AS `parent_object_class_type`,`x_trx_log`.`attr_name` AS `attr_name`,`x_trx_log`.`parent_object_name` AS `parent_object_name`,`x_trx_log`.`object_name` AS `object_name`,`x_trx_log`.`prev_val` AS `prev_val`,`x_trx_log`.`new_val` AS `new_val`,`x_trx_log`.`trx_id` AS `trx_id`,`x_trx_log`.`action` AS `action`,`x_trx_log`.`sess_id` AS `sess_id`,`x_trx_log`.`req_id` AS `req_id`,`x_trx_log`.`sess_type` AS `sess_type` from `x_trx_log` group by `x_trx_log`.`trx_id`

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/reset_db.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/reset_db.sh b/security-admin/db/init/reset_db.sh
new file mode 100755
index 0000000..df02857
--- /dev/null
+++ b/security-admin/db/init/reset_db.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+if [ $# -ne 3 ]; then
+	echo "Usage: $0 <db_user> <db_password> <db_database> [db_host]"
+	exit 1
+fi
+
+db_user=$1
+db_password=$2
+db_database=$3
+
+set -x
+#First drop the database and recreate i
+echo "y" | mysqladmin -u $db_user -p$db_password drop $db_database
+mysqladmin -u $db_user -p$db_password create $db_database
+
+#Create the schema
+mysql -u $db_user -p$db_password $db_database < schema_mysql.sql
+

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/reset_db_with_seed.sh
----------------------------------------------------------------------
diff --git a/security-admin/db/init/reset_db_with_seed.sh b/security-admin/db/init/reset_db_with_seed.sh
new file mode 100755
index 0000000..8730693
--- /dev/null
+++ b/security-admin/db/init/reset_db_with_seed.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+if [ $# -ne 3 ]; then
+	echo "Usage: $0 <db_user> <db_password> <db_database> [db_host]"
+	exit 1
+fi
+
+db_user=$1
+db_password=$2
+db_database=$3
+
+#db_user=cignifi
+#db_password=cignifi
+#db_database=cignifi_dev
+
+
+set -x
+#First drop the database and recreate i
+echo "y" | mysqladmin -u $db_user -p$db_password drop $db_database
+mysqladmin -u $db_user -p$db_password create $db_database
+
+#Create the schema
+mysql -u $db_user -p$db_password $db_database < schema_mysql.sql
+
+#Add seed users
+mysql -u $db_user -p$db_password $db_database < mysql_seed_data.sql
+

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/init/schema_mysql.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/init/schema_mysql.sql b/security-admin/db/init/schema_mysql.sql
new file mode 100644
index 0000000..2ad328f
--- /dev/null
+++ b/security-admin/db/init/schema_mysql.sql
@@ -0,0 +1,475 @@
+
+SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL';
+
+drop table if exists x_db_base;
+create table x_db_base (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_auth_sess;
+create table x_auth_sess (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	login_id VARCHAR  (767)  NOT NULL,
+	user_id BIGINT  ,
+	ext_sess_id VARCHAR  (512) ,
+	auth_time DATETIME   NOT NULL,
+	auth_status INT   NOT NULL DEFAULT 0,
+	auth_type INT   NOT NULL DEFAULT 0,
+	auth_provider INT   NOT NULL DEFAULT 0,
+	device_type INT   NOT NULL DEFAULT 0,
+	req_ip VARCHAR  (48)  NOT NULL,
+	req_ua VARCHAR  (1024) ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_portal_user;
+create table x_portal_user (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	first_name VARCHAR  (1022) ,
+	last_name VARCHAR  (1022) ,
+	pub_scr_name VARCHAR  (2048) ,
+	login_id VARCHAR  (767) ,
+	password VARCHAR  (512)  NOT NULL,
+	email VARCHAR  (512) ,
+	status INT   NOT NULL DEFAULT 0,
+	user_src INT   NOT NULL DEFAULT 0,
+	notes VARCHAR  (4000) ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_portal_user_role;
+create table x_portal_user_role (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	user_id BIGINT   NOT NULL,
+	user_role VARCHAR  (128) ,
+	status INT   NOT NULL DEFAULT 0,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_asset;
+create table x_asset (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	asset_name VARCHAR  (1024)  NOT NULL,
+	descr VARCHAR  (4000)  NOT NULL,
+	act_status INT   NOT NULL DEFAULT 0,
+	asset_type INT   NOT NULL DEFAULT 0,
+	config TEXT  ,
+	sup_native TINYINT  (1)  NOT NULL DEFAULT 0,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_resource;
+create table x_resource (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	res_name VARCHAR  (4000) ,
+	descr VARCHAR  (4000) ,
+	res_type INT   NOT NULL DEFAULT 0,
+	asset_id BIGINT   NOT NULL,
+	parent_id BIGINT  ,
+	parent_path VARCHAR  (4000) ,
+	is_encrypt INT   NOT NULL DEFAULT 0,
+	is_recursive INT   NOT NULL DEFAULT 0,
+	res_group VARCHAR  (1024) ,
+	res_dbs TEXT  ,
+	res_tables TEXT  ,
+	res_col_fams TEXT  ,
+	res_cols TEXT  ,
+	res_udfs TEXT  ,
+	res_status INT   NOT NULL DEFAULT 1,
+	table_type INT   NOT NULL DEFAULT 0,
+	col_type INT   NOT NULL DEFAULT 0,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_cred_store;
+create table x_cred_store (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	store_name VARCHAR  (1024)  NOT NULL,
+	descr VARCHAR  (4000)  NOT NULL,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_group;
+create table x_group (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	group_name VARCHAR  (1024)  NOT NULL,
+	descr VARCHAR  (4000)  NOT NULL,
+	status INT   NOT NULL DEFAULT 0,
+	group_type INT   NOT NULL DEFAULT 0,
+	cred_store_id BIGINT  ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_user;
+create table x_user (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	user_name VARCHAR  (1024)  NOT NULL,
+	descr VARCHAR  (4000)  NOT NULL,
+	status INT   NOT NULL DEFAULT 0,
+	cred_store_id BIGINT  ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_group_users;
+create table x_group_users (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	group_name VARCHAR  (1024)  NOT NULL,
+	p_group_id BIGINT  ,
+	user_id BIGINT  ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_group_groups;
+create table x_group_groups (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	group_name VARCHAR  (1024)  NOT NULL,
+	p_group_id BIGINT  ,
+	group_id BIGINT  ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_perm_map;
+create table x_perm_map (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	perm_group VARCHAR  (1024) ,
+	res_id BIGINT  ,
+	group_id BIGINT  ,
+	user_id BIGINT  ,
+	perm_for INT   NOT NULL DEFAULT 0,
+	perm_type INT   NOT NULL DEFAULT 0,
+	is_recursive INT   NOT NULL DEFAULT 0,
+	is_wild_card TINYINT  (1)  NOT NULL DEFAULT 1,
+	grant_revoke TINYINT  (1)  NOT NULL DEFAULT 1,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_audit_map;
+create table x_audit_map (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	res_id BIGINT  ,
+	group_id BIGINT  ,
+	user_id BIGINT  ,
+	audit_type INT   NOT NULL DEFAULT 0,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_policy_export_audit;
+create table x_policy_export_audit (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	client_ip VARCHAR  (255)  NOT NULL,
+	agent_id VARCHAR  (255) ,
+	req_epoch BIGINT   NOT NULL,
+	last_updated DATETIME  ,
+	repository_name VARCHAR  (1024) ,
+	exported_json TEXT  ,
+	http_ret_code INT   NOT NULL DEFAULT 0,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists x_trx_log;
+create table x_trx_log (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	class_type INT   NOT NULL DEFAULT 0,
+	object_id BIGINT  ,
+	parent_object_id BIGINT  ,
+	parent_object_class_type INT   NOT NULL DEFAULT 0,
+	parent_object_name VARCHAR  (1024) ,
+	object_name VARCHAR  (1024) ,
+	attr_name VARCHAR  (255) ,
+	prev_val VARCHAR  (1024) ,
+	new_val VARCHAR  (1024) ,
+	trx_id VARCHAR  (1024) ,
+	action VARCHAR  (255) ,
+	sess_id VARCHAR  (512) ,
+	req_id VARCHAR  (30) ,
+	sess_type VARCHAR  (30) ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+drop table if exists xa_access_audit;
+create table xa_access_audit (
+	id BIGINT   NOT NULL AUTO_INCREMENT,
+	create_time DATETIME  ,
+	update_time DATETIME  ,
+	added_by_id BIGINT  ,
+	upd_by_id BIGINT  ,
+	audit_type INT   NOT NULL DEFAULT 0,
+	access_result INT   DEFAULT 0,
+	access_type VARCHAR  (255) ,
+	acl_enforcer VARCHAR  (255) ,
+	agent_id VARCHAR  (255) ,
+	client_ip VARCHAR  (255) ,
+	client_type VARCHAR  (255) ,
+	policy_id BIGINT   DEFAULT 0,
+	repo_name VARCHAR  (255) ,
+	repo_type INT   DEFAULT 0,
+	result_reason VARCHAR  (255) ,
+	session_id VARCHAR  (255) ,
+	event_time DATETIME  ,
+	request_user VARCHAR  (255) ,
+	action VARCHAR  (2000) ,
+	request_data VARCHAR  (2000) ,
+	resource_path VARCHAR  (2000) ,
+	resource_type VARCHAR  (255) ,
+	PRIMARY KEY(id)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+ALTER TABLE x_db_base ADD (
+  CONSTRAINT x_db_base_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_db_base ADD (
+  CONSTRAINT x_db_base_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_auth_sess ADD (
+  CONSTRAINT x_auth_sess_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_auth_sess ADD (
+  CONSTRAINT x_auth_sess_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_auth_sess ADD (
+  CONSTRAINT x_auth_sess_FK_user_id FOREIGN KEY (user_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_portal_user ADD (
+  CONSTRAINT x_portal_user_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_portal_user ADD (
+  CONSTRAINT x_portal_user_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_portal_user_role ADD (
+  CONSTRAINT x_portal_user_role_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_portal_user_role ADD (
+  CONSTRAINT x_portal_user_role_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_portal_user_role ADD (
+  CONSTRAINT x_portal_user_role_FK_user_id FOREIGN KEY (user_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_asset ADD (
+  CONSTRAINT x_asset_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_asset ADD (
+  CONSTRAINT x_asset_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_resource ADD (
+  CONSTRAINT x_resource_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_resource ADD (
+  CONSTRAINT x_resource_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_resource ADD (
+  CONSTRAINT x_resource_FK_asset_id FOREIGN KEY (asset_id)
+    REFERENCES x_asset (id));
+ALTER TABLE x_resource ADD (
+  CONSTRAINT x_resource_FK_parent_id FOREIGN KEY (parent_id)
+    REFERENCES x_resource (id));
+ALTER TABLE x_cred_store ADD (
+  CONSTRAINT x_cred_store_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_cred_store ADD (
+  CONSTRAINT x_cred_store_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group ADD (
+  CONSTRAINT x_group_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group ADD (
+  CONSTRAINT x_group_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group ADD (
+  CONSTRAINT x_group_FK_cred_store_id FOREIGN KEY (cred_store_id)
+    REFERENCES x_cred_store (id));
+ALTER TABLE x_user ADD (
+  CONSTRAINT x_user_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_user ADD (
+  CONSTRAINT x_user_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_user ADD (
+  CONSTRAINT x_user_FK_cred_store_id FOREIGN KEY (cred_store_id)
+    REFERENCES x_cred_store (id));
+ALTER TABLE x_group_users ADD (
+  CONSTRAINT x_group_users_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group_users ADD (
+  CONSTRAINT x_group_users_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group_users ADD (
+  CONSTRAINT x_group_users_FK_p_group_id FOREIGN KEY (p_group_id)
+    REFERENCES x_group (id));
+ALTER TABLE x_group_users ADD (
+  CONSTRAINT x_group_users_FK_user_id FOREIGN KEY (user_id)
+    REFERENCES x_user (id));
+ALTER TABLE x_group_groups ADD (
+  CONSTRAINT x_group_groups_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group_groups ADD (
+  CONSTRAINT x_group_groups_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_group_groups ADD (
+  CONSTRAINT x_group_groups_FK_p_group_id FOREIGN KEY (p_group_id)
+    REFERENCES x_group (id));
+ALTER TABLE x_group_groups ADD (
+  CONSTRAINT x_group_groups_FK_group_id FOREIGN KEY (group_id)
+    REFERENCES x_group (id));
+ALTER TABLE x_perm_map ADD (
+  CONSTRAINT x_perm_map_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_perm_map ADD (
+  CONSTRAINT x_perm_map_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_perm_map ADD (
+  CONSTRAINT x_perm_map_FK_res_id FOREIGN KEY (res_id)
+    REFERENCES x_resource (id));
+ALTER TABLE x_perm_map ADD (
+  CONSTRAINT x_perm_map_FK_group_id FOREIGN KEY (group_id)
+    REFERENCES x_group (id));
+ALTER TABLE x_perm_map ADD (
+  CONSTRAINT x_perm_map_FK_user_id FOREIGN KEY (user_id)
+    REFERENCES x_user (id));
+ALTER TABLE x_audit_map ADD (
+  CONSTRAINT x_audit_map_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_audit_map ADD (
+  CONSTRAINT x_audit_map_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_audit_map ADD (
+  CONSTRAINT x_audit_map_FK_res_id FOREIGN KEY (res_id)
+    REFERENCES x_resource (id));
+ALTER TABLE x_audit_map ADD (
+  CONSTRAINT x_audit_map_FK_group_id FOREIGN KEY (group_id)
+    REFERENCES x_group (id));
+ALTER TABLE x_audit_map ADD (
+  CONSTRAINT x_audit_map_FK_user_id FOREIGN KEY (user_id)
+    REFERENCES x_user (id));
+ALTER TABLE x_policy_export_audit ADD (
+  CONSTRAINT x_policy_export_audit_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_policy_export_audit ADD (
+  CONSTRAINT x_policy_export_audit_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_trx_log ADD (
+  CONSTRAINT x_trx_log_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE x_trx_log ADD (
+  CONSTRAINT x_trx_log_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE xa_access_audit ADD (
+  CONSTRAINT xa_access_audit_FK_added_by_id FOREIGN KEY (added_by_id)
+    REFERENCES x_portal_user (id));
+ALTER TABLE xa_access_audit ADD (
+  CONSTRAINT xa_access_audit_FK_upd_by_id FOREIGN KEY (upd_by_id)
+    REFERENCES x_portal_user (id));
+
+ALTER TABLE x_portal_user ADD (
+  CONSTRAINT x_portal_user_UK_login_id UNIQUE (login_id(767)) );
+ALTER TABLE x_portal_user ADD (
+  CONSTRAINT x_portal_user_UK_email UNIQUE (email(512)) );
+
+ALTER TABLE x_db_base ADD (INDEX x_db_base_cr_time (create_time));
+ALTER TABLE x_db_base ADD (INDEX x_db_base_up_time (update_time));
+ALTER TABLE x_auth_sess ADD (INDEX x_auth_sess_cr_time (create_time));
+ALTER TABLE x_auth_sess ADD (INDEX x_auth_sess_up_time (update_time));
+ALTER TABLE x_portal_user ADD (INDEX x_portal_user_cr_time (create_time));
+ALTER TABLE x_portal_user ADD (INDEX x_portal_user_up_time (update_time));
+ALTER TABLE x_portal_user ADD (INDEX x_portal_user_name (first_name(767)));
+ALTER TABLE x_portal_user ADD (INDEX x_portal_user_email (email(512)));
+ALTER TABLE x_portal_user_role ADD (INDEX x_portal_user_role_cr_time (create_time));
+ALTER TABLE x_portal_user_role ADD (INDEX x_portal_user_role_up_time (update_time));
+ALTER TABLE x_asset ADD (INDEX x_asset_cr_time (create_time));
+ALTER TABLE x_asset ADD (INDEX x_asset_up_time (update_time));
+ALTER TABLE x_resource ADD (INDEX x_resource_cr_time (create_time));
+ALTER TABLE x_resource ADD (INDEX x_resource_up_time (update_time));
+ALTER TABLE x_cred_store ADD (INDEX x_cred_store_cr_time (create_time));
+ALTER TABLE x_cred_store ADD (INDEX x_cred_store_up_time (update_time));
+ALTER TABLE x_group ADD (INDEX x_group_cr_time (create_time));
+ALTER TABLE x_group ADD (INDEX x_group_up_time (update_time));
+ALTER TABLE x_user ADD (INDEX x_user_cr_time (create_time));
+ALTER TABLE x_user ADD (INDEX x_user_up_time (update_time));
+ALTER TABLE x_group_users ADD (INDEX x_group_users_cr_time (create_time));
+ALTER TABLE x_group_users ADD (INDEX x_group_users_up_time (update_time));
+ALTER TABLE x_group_groups ADD (INDEX x_group_groups_cr_time (create_time));
+ALTER TABLE x_group_groups ADD (INDEX x_group_groups_up_time (update_time));
+ALTER TABLE x_perm_map ADD (INDEX x_perm_map_cr_time (create_time));
+ALTER TABLE x_perm_map ADD (INDEX x_perm_map_up_time (update_time));
+ALTER TABLE x_audit_map ADD (INDEX x_audit_map_cr_time (create_time));
+ALTER TABLE x_audit_map ADD (INDEX x_audit_map_up_time (update_time));
+ALTER TABLE x_policy_export_audit ADD (INDEX x_policy_export_audit_cr_time (create_time));
+ALTER TABLE x_policy_export_audit ADD (INDEX x_policy_export_audit_up_time (update_time));
+ALTER TABLE x_trx_log ADD (INDEX x_trx_log_cr_time (create_time));
+ALTER TABLE x_trx_log ADD (INDEX x_trx_log_up_time (update_time));
+ALTER TABLE xa_access_audit ADD (INDEX xa_access_audit_cr_time (create_time));
+ALTER TABLE xa_access_audit ADD (INDEX xa_access_audit_up_time (update_time));
+SET SQL_MODE=@OLD_SQL_MODE;
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;
+##champlain changes
+ALTER TABLE  `x_group` ADD  `group_src` INT NOT NULL DEFAULT 0;
+ALTER TABLE  `x_resource` ADD  `policy_name` VARCHAR( 500 ) NULL DEFAULT NULL;
+ALTER TABLE  `x_resource` ADD UNIQUE  `x_resource_UK_policy_name` (  `policy_name` );
+ALTER TABLE  `x_resource` ADD  `res_topologies` TEXT NULL DEFAULT NULL ;
+ALTER TABLE  `x_resource` ADD  `res_services` TEXT NULL DEFAULT NULL;
+ALTER TABLE  `x_perm_map` ADD  `ip_address` TEXT NULL DEFAULT NULL;
+ALTER TABLE  `x_asset` CHANGE  `config`  `config` LONGTEXT NULL DEFAULT NULL ;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/001-groupsource.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/001-groupsource.sql b/security-admin/db/patches/001-groupsource.sql
new file mode 100644
index 0000000..a31cceb
--- /dev/null
+++ b/security-admin/db/patches/001-groupsource.sql
@@ -0,0 +1,19 @@
+ drop procedure if exists add_group_source_column_to_x_group_table;
+
+delimiter ;;
+ create procedure add_group_source_column_to_x_group_table() begin
+
+ /* add group source column if not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_group') then
+	if not exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_group' and column_name = 'group_src') then
+		ALTER TABLE  `x_group` ADD  `group_src` INT NOT NULL DEFAULT 0;
+ 	end if;
+ end if;
+  
+end;;
+
+delimiter ;
+
+ call add_group_source_column_to_x_group_table();
+
+ drop procedure if exists add_group_source_column_to_x_group_table;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/002-policyname.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/002-policyname.sql b/security-admin/db/patches/002-policyname.sql
new file mode 100644
index 0000000..8cbd479
--- /dev/null
+++ b/security-admin/db/patches/002-policyname.sql
@@ -0,0 +1,22 @@
+drop procedure if exists add_policy_name_column_to_x_resource_table;
+
+delimiter ;;
+create procedure add_policy_name_column_to_x_resource_table() begin
+
+ /* add policy name column if not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource') then
+  	if not exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource' and column_name = 'policy_name') then
+  		ALTER TABLE  `x_resource` ADD  `policy_name` VARCHAR( 500 ) NULL DEFAULT NULL;
+  		if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource' and column_name = 'policy_name') then
+  			ALTER TABLE  `x_resource` ADD UNIQUE  `x_resource_UK_policy_name` (  `policy_name` );
+  		end if;
+ 	end if;
+ end if;
+
+  
+end;;
+
+delimiter ;
+call add_policy_name_column_to_x_resource_table();
+
+drop procedure if exists add_policy_name_column_to_x_resource_table;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/003-knoxrepo.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/003-knoxrepo.sql b/security-admin/db/patches/003-knoxrepo.sql
new file mode 100644
index 0000000..31fa276
--- /dev/null
+++ b/security-admin/db/patches/003-knoxrepo.sql
@@ -0,0 +1,33 @@
+drop procedure if exists add_columns_for_knox_repository;
+
+delimiter ;;
+create procedure add_columns_for_knox_repository() begin
+
+ /* add res_topologies if  not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource') then
+ 	if not exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource' and column_name = 'res_topologies') then
+ 		ALTER TABLE  `x_resource` ADD  `res_topologies` TEXT NULL DEFAULT NULL ;
+ 	end if;
+ end if;
+ 
+  /* add res_services if  not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource') then
+ 	if not exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_resource' and column_name = 'res_services') then
+ 		ALTER TABLE  `x_resource` ADD  `res_services` TEXT NULL DEFAULT NULL;
+ 	end if;
+ end if;
+ 
+  /* add ip_address if  not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_perm_map') then
+ 	if not exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_perm_map' and column_name = 'ip_address') then
+ 		ALTER TABLE  `x_perm_map` ADD  `ip_address` TEXT NULL DEFAULT NULL;
+ 	end if;
+ end if;
+
+  
+end;;
+
+delimiter ;
+call add_columns_for_knox_repository();
+
+drop procedure if exists add_columns_for_knox_repository;

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/004-assetconfigsize.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/004-assetconfigsize.sql b/security-admin/db/patches/004-assetconfigsize.sql
new file mode 100644
index 0000000..c3133c2
--- /dev/null
+++ b/security-admin/db/patches/004-assetconfigsize.sql
@@ -0,0 +1,16 @@
+drop procedure if exists change_config_column_datatype_of_x_asset_table;
+
+delimiter ;;
+create procedure change_config_column_datatype_of_x_asset_table() begin
+
+ /* change config data type to longtext if not exist */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_asset' and column_name = 'config' and data_type='text') then
+ 	ALTER TABLE  `x_asset` CHANGE  `config`  `config` MEDIUMTEXT NULL DEFAULT NULL ;
+ end if;
+  
+end;;
+
+delimiter ;
+call change_config_column_datatype_of_x_asset_table();
+
+drop procedure if exists change_config_column_datatype_of_x_asset_table;

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/005-xtrxlogcolumnsize.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/005-xtrxlogcolumnsize.sql b/security-admin/db/patches/005-xtrxlogcolumnsize.sql
new file mode 100644
index 0000000..4fe6e92
--- /dev/null
+++ b/security-admin/db/patches/005-xtrxlogcolumnsize.sql
@@ -0,0 +1,19 @@
+drop procedure if exists change_values_columns_datatype_of_x_trx_log_table;
+
+delimiter ;;
+create procedure change_values_columns_datatype_of_x_trx_log_table() begin
+
+ /* change prev_value column data type to mediumtext */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_trx_log' and column_name = 'prev_val' and data_type='varchar') then
+ 	ALTER TABLE  `x_trx_log` CHANGE  `prev_val`  `prev_val` MEDIUMTEXT NULL DEFAULT NULL ;
+ end if;
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_trx_log' and column_name = 'new_val'  and data_type='varchar') then
+ 	ALTER TABLE  `x_trx_log` CHANGE  `new_val`  `new_val` MEDIUMTEXT NULL DEFAULT NULL ;
+ end if;
+  
+end;;
+
+delimiter ;
+call change_values_columns_datatype_of_x_trx_log_table();
+
+drop procedure if exists change_values_columns_datatype_of_x_trx_log_table;

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/patches/006-createdefaultpublicgroup.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/patches/006-createdefaultpublicgroup.sql b/security-admin/db/patches/006-createdefaultpublicgroup.sql
new file mode 100644
index 0000000..b71a1c7
--- /dev/null
+++ b/security-admin/db/patches/006-createdefaultpublicgroup.sql
@@ -0,0 +1,19 @@
+drop procedure if exists insert_public_group_in_x_group_table;
+
+delimiter ;;
+create procedure insert_public_group_in_x_group_table() begin
+
+ /* check table x_group exist or not */
+ if exists (select * from information_schema.columns where table_schema=database() and table_name = 'x_group') then
+ 	/* check record for group name public exist or not */
+ 	if not exists (select * from x_group where group_name = 'public') then
+ 		INSERT INTO x_group (ADDED_BY_ID, CREATE_TIME, DESCR, GROUP_SRC, GROUP_TYPE, GROUP_NAME, STATUS, UPDATE_TIME, UPD_BY_ID) VALUES (1, UTC_TIMESTAMP(), 'public group', 0, 0, 'public', 0, UTC_TIMESTAMP(), 1);
+ 	end if;
+ end if;
+  
+end;;
+
+delimiter ;
+call insert_public_group_in_x_group_table();
+
+drop procedure if exists insert_public_group_in_x_group_table;

http://git-wip-us.apache.org/repos/asf/incubator-argus/blob/7defc061/security-admin/db/reset_asset.sql
----------------------------------------------------------------------
diff --git a/security-admin/db/reset_asset.sql b/security-admin/db/reset_asset.sql
new file mode 100644
index 0000000..a0c5a70
--- /dev/null
+++ b/security-admin/db/reset_asset.sql
@@ -0,0 +1,4 @@
+update x_asset set config = '{"username":"policymgr","password":"policymgr","fs.default.name":"hdfs://sandbox.hortonworks.com:8020","hadoop.security.authorization":"false","hadoop.security.authentication":"simple","hadoop.security.auth_to_local":"RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/         RULE:[2:$1@$0](jhs@.*)s/.*/mapred/         RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/         RULE:[2:$1@$0](hm@.*)s/.*/hbase/         RULE:[2:$1@$0](rs@.*)s/.*/hbase/         DEFAULT","dfs.datanode.kerberos.principal":"","dfs.namenode.kerberos.principal":"","dfs.secondary.namenode.kerberos.principal":"","commonNameForCertificate":""}' where asset_name = 'hadoopdev';
+update x_asset set config = '{"username":"policymgr","password":"policymgr","fs.default.name":"hdfs://sandbox.hortonworks.com:8020","hadoop.security.authorization":"false","hadoop.security.authentication":"simple","hadoop.security.auth_to_local":"RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/         RULE:[2:$1@$0](jhs@.*)s/.*/mapred/         RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/         RULE:[2:$1@$0](hm@.*)s/.*/hbase/         RULE:[2:$1@$0](rs@.*)s/.*/hbase/         DEFAULT","dfs.datanode.kerberos.principal":"","dfs.namenode.kerberos.principal":"","dfs.secondary.namenode.kerberos.principal":"","hbase.master.kerberos.principal":"","hbase.rpc.engine":"org.apache.hadoop.hbase.ipc.SecureRpcEngine","hbase.rpc.protection":"privacy","hbase.security.authentication":"simple","hbase.zookeeper.property.clientPort":"2181","hbase.zookeeper.quorum":"sandbox.hortonworks.com","commonNameForCertificate":""}' where asset_name = 'hbase' ;
+update x_asset set config = '{"username":"policymgr","password":"","jdbc.driverClassName":"org.apache.hive.jdbc.HiveDriver","jdbc.url":"jdbc:hive2://sandbox.hortonworks.com:10000/default","commonNameForCertificate":""}'  where asset_name = 'dev-hive' ;
+commit ;


Mime
View raw message