carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [38/50] [abbrv] incubator-carbondata git commit: [Bug] add more info to audit log (#781)
Date Thu, 30 Jun 2016 17:42:25 GMT
[Bug] add more info to audit log (#781)

Added use name, thread id, database name and table name to audit log.

Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/fe7acdce
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/fe7acdce
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/fe7acdce

Branch: refs/heads/master
Commit: fe7acdce243cb0e5d705d07a3627f4aa79338230
Parents: 5fdadba
Author: Venkata Ramana G <g.ramana.v@gmail.com>
Authored: Thu Jun 30 04:52:15 2016 +0530
Committer: GitHub <noreply@github.com>
Committed: Thu Jun 30 04:52:15 2016 +0530

----------------------------------------------------------------------
 common/pom.xml                                  |  8 ++-
 .../common/logging/impl/StandardLogService.java | 15 ++++-
 .../execution/command/carbonTableSchema.scala   | 58 ++++++++++----------
 .../spark/sql/hive/CarbonMetastoreCatalog.scala |  6 +-
 .../spark/rdd/CarbonDataRDDFactory.scala        | 19 ++++---
 5 files changed, 64 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/fe7acdce/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index fe4560e..d7b6ad3 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -55,6 +55,12 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>${hadoop.deps.scope}</scope>
+    </dependency>
   </dependencies>
   <build>
     <sourceDirectory>src/main/java</sourceDirectory>
@@ -69,4 +75,4 @@
       </plugin>
     </plugins>
   </build>
-</project>
\ No newline at end of file
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/fe7acdce/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
b/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
index c4c86bf..07cd7bb 100644
--- a/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
+++ b/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
@@ -19,12 +19,14 @@
 
 package org.carbondata.common.logging.impl;
 
+import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Properties;
 
 import org.carbondata.common.logging.LogService;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Logger;
 import org.apache.log4j.MDC;
 
@@ -273,8 +275,17 @@ public final class StandardLogService implements LogService {
     } catch (UnknownHostException e) {
       hostName = "localhost";
     }
-
-    logger.log(AuditLevel.AUDIT, "[" + hostName + "]" + msg);
+    String username = "unknown";
+    String threadid = "unknown";
+    try {
+      threadid = Thread.currentThread().getId() + "";
+      username = UserGroupInformation.getCurrentUser().getShortUserName();
+    } catch (IOException e) {
+      username = "unknown";
+    }
+    logger.log(AuditLevel.AUDIT, "[" + hostName + "]"
+        + "[" + username + "]"
+        + "[Thread-" +  threadid + "]" + msg);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/fe7acdce/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index bcd6dd3..75abe0e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -270,7 +270,7 @@ class TableNewProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Duplicate column found with name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}"
+
         s"Duplicate column found with name : $name")
       sys.error(s"Duplicate dimensions found with name : $name")
     })
@@ -351,7 +351,8 @@ class TableNewProcessor(cm: tableModel, sqlContext: SQLContext) {
           val msg = definedpartCols.mkString(", ")
           LOGGER.error(s"partition columns specified are not part of Dimension columns :
$msg")
           LOGGER.audit(
-            s"Validation failed for Create/Alter Table Operation - " +
+            s"Validation failed for Create/Alter Table Operation for " +
+              s"${cm.schemaName}.${cm.cubeName} " +
             s"partition columns specified are not part of Dimension columns : $msg")
           sys.error(s"partition columns specified are not part of Dimension columns : $msg")
         }
@@ -363,7 +364,8 @@ class TableNewProcessor(cm: tableModel, sqlContext: SQLContext) {
             case e: Exception =>
               val cl = part.partitionClass
               LOGGER.audit(
-                s"Validation failed for Create/Alter Table Operation - " +
+                s"Validation failed for Create/Alter Table Operation for " +
+                  s"${cm.schemaName}.${cm.cubeName} " +
                 s"partition class specified can not be found or loaded : $cl")
               sys.error(s"partition class specified can not be found or loaded : $cl")
           }
@@ -549,7 +551,7 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Duplicate dimensions found with name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}
" +
         s"Duplicate dimensions found with name : $name")
       sys.error(s"Duplicate dimensions found with name : $name")
     })
@@ -558,7 +560,7 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Duplicate dimensions found with column name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}
" +
         s"Duplicate dimensions found with column name : $name")
       sys.error(s"Duplicate dimensions found with column name : $name")
     })
@@ -567,7 +569,7 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Duplicate measures found with name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}
" +
         s"Duplicate measures found with name : $name")
       sys.error(s"Duplicate measures found with name : $name")
     })
@@ -576,7 +578,7 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Duplicate measures found with column name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}
" +
         s"Duplicate measures found with column name : $name")
       sys.error(s"Duplicate measures found with column name : $name")
     })
@@ -589,7 +591,8 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
         val fault = a.msrName
         LOGGER.error(s"Aggregator should not be defined for dimension fields [$fault]")
         LOGGER.audit(
-          s"Validation failed for Create/Alter Table Operation - " +
+          s"Validation failed for Create/Alter Table Operation for " +
+            s"${cm.schemaName}.${cm.cubeName} " +
           s"Aggregator should not be defined for dimension fields [$fault]")
         sys.error(s"Aggregator should not be defined for dimension fields [$fault]")
       }
@@ -599,7 +602,7 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
       val name = f._1
       LOGGER.error(s"Dimension and Measure defined with same name : $name")
       LOGGER.audit(
-        s"Validation failed for Create/Alter Table Operation - " +
+        s"Validation failed for Create/Alter Table Operation for ${cm.schemaName}.${cm.cubeName}
" +
         s"Dimension and Measure defined with same name : $name")
       sys.error(s"Dimension and Measure defined with same name : $name")
     })
@@ -688,7 +691,8 @@ class TableProcessor(cm: tableModel, sqlContext: SQLContext) {
             case e: Exception =>
               val cl = part.partitionClass
               LOGGER.audit(
-                s"Validation failed for Create/Alter Table Operation - " +
+                s"Validation failed for Create/Alter Table Operation for " +
+                  s"${cm.schemaName}.${cm.cubeName} " +
                 s"partition class specified can not be found or loaded : $cl")
               sys.error(s"partition class specified can not be found or loaded : $cl")
           }
@@ -1308,11 +1312,11 @@ private[sql] case class DeleteLoadsById(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
 
-    LOGGER.audit("Delete load by Id request has been received.")
+    val schemaName = getDB.getDatabaseName(schemaNameOp, sqlContext)
+    LOGGER.audit(s"Delete load by Id request has been received for $schemaName.$tableName")
 
     // validate load ids first
     validateLoadIds
-    val schemaName = getDB.getDatabaseName(schemaNameOp, sqlContext)
 
     val relation = CarbonEnv.getInstance(sqlContext).carbonCatalog.lookupRelation1(
       Option(schemaName),
@@ -1340,7 +1344,7 @@ private[sql] case class DeleteLoadsById(
 
     if (invalidLoadIds.isEmpty) {
 
-      LOGGER.audit("Delete load by Id is successfull.")
+      LOGGER.audit(s"Delete load by Id is successfull for $schemaName.$tableName.")
     }
     else {
       sys.error("Delete load by Id is failed. No matching load id found. SegmentSeqId(s)
- "
@@ -1371,8 +1375,8 @@ private[sql] case class DeleteLoadsByLoadDate(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
 
-    LOGGER.audit("Delete load by load date request has been received.")
     val schemaName = getDB.getDatabaseName(schemaNameOp, sqlContext)
+    LOGGER.audit(s"Delete load by load date request has been received for $schemaName.$tableName")
 
     val relation = CarbonEnv.getInstance(sqlContext).carbonCatalog.lookupRelation1(
       Option(schemaName),
@@ -1408,7 +1412,7 @@ private[sql] case class DeleteLoadsByLoadDate(
     var invalidLoadTimestamps = segmentStatusManager
       .updateDeletionStatus(loadDate, path, timeObj.asInstanceOf[java.lang.Long]).asScala
     if(invalidLoadTimestamps.isEmpty) {
-      LOGGER.audit("Delete load by load date is successfull.")
+      LOGGER.audit(s"Delete load by load date is successfull for $schemaName.$tableName.")
     }
     else {
       sys.error("Delete load by load date is failed. No matching load found.")
@@ -1440,7 +1444,7 @@ private[sql] case class LoadCube(
     if (null == org.carbondata.core.carbon.metadata.CarbonMetadata.getInstance
       .getCarbonTable(schemaName + "_" + tableName)) {
       logError("Data loading failed. table not found: " + schemaName + "." + tableName)
-      LOGGER.audit("Data loading failed. table not found: " + schemaName + "." + tableName)
+      LOGGER.audit(s"Data loading failed. table not found: $schemaName.$tableName")
       sys.error("Data loading failed. table not found: " + schemaName + "." + tableName)
     }
     CarbonProperties.getInstance().addProperty("zookeeper.enable.lock", "false")
@@ -1576,7 +1580,7 @@ private[sql] case class LoadCube(
       catch {
         case ex: Exception =>
           LOGGER.error(ex)
-          LOGGER.audit("Dataload failure. Please check the logs")
+          LOGGER.audit(s"Dataload failure for $schemaName.$tableName. Please check the logs")
           throw ex
       }
       finally {
@@ -1591,7 +1595,8 @@ private[sql] case class LoadCube(
         } catch {
           case ex: Exception =>
             LOGGER.error(ex)
-            LOGGER.audit("Dataload failure. Problem deleting the partition folder")
+            LOGGER.audit(s"Dataload failure for $schemaName.$tableName. " +
+              "Problem deleting the partition folder")
             throw ex
         }
 
@@ -1788,8 +1793,7 @@ private[sql] case class DropCubeCommand(ifExistsSet: Boolean, schemaNameOp:
Opti
     if (null == tmpTable) {
       if (!ifExistsSet) {
         LOGGER
-          .audit(s"Dropping carbon table with Database name [$schemaName] and Table name"
+
-                 "[$cubeName] failed")
+          .audit(s"Dropping carbon table $schemaName.$cubeName failed")
         LOGGER.error(s"Carbon Table $schemaName.$cubeName metadata does not exist")
       }
       if (sqlContext.tableNames(schemaName).map(x => x.toLowerCase())
@@ -1814,9 +1818,7 @@ private[sql] case class DropCubeCommand(ifExistsSet: Boolean, schemaNameOp:
Opti
         if (carbonLock.lockWithRetries()) {
           logInfo("Successfully able to get the table metadata file lock")
         } else {
-          LOGGER.audit(
-            s"Dropping table with Database name [$schemaName] and Table name [$cubeName]
" +
-            s"failed as the Table is locked")
+          LOGGER.audit(s"Dropping table $schemaName.$cubeName failed as the Table is locked")
           sys.error("Table is locked for updation. Please try after some time")
         }
 
@@ -1840,7 +1842,7 @@ private[sql] case class DropCubeCommand(ifExistsSet: Boolean, schemaNameOp:
Opti
               relation.cubeMeta.partitioner)
           QueryPartitionHelper.getInstance().removePartition(schemaName, cubeName)
 
-          LOGGER.audit(s"Deleted table [$cubeName] under database [$schemaName]")
+          LOGGER.audit(s"Deleted table $schemaName.$cubeName")
         }
       }
       finally {
@@ -2083,8 +2085,8 @@ private[sql] case class DeleteLoadByDate(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
 
-    LOGGER.audit("The delete load by date request has been received.")
     val schemaName = getDB.getDatabaseName(schemaNameOp, sqlContext)
+    LOGGER.audit(s"The delete load by date request has been received for $schemaName.$cubeName")
     val relation = CarbonEnv.getInstance(sqlContext).carbonCatalog
       .lookupRelation1(Some(schemaName), cubeName, None)(sqlContext).asInstanceOf[CarbonRelation]
     var level: String = ""
@@ -2102,7 +2104,7 @@ private[sql] case class DeleteLoadByDate(
     if (matches.isEmpty) {
       LOGGER.audit(
         "The delete load by date is failed. " +
-        "Table $schemaName.$cubeName does not contain date field " + dateField)
+        s"Table $schemaName.$cubeName does not contain date field :" + dateField)
       sys.error(s"Table $schemaName.$cubeName does not contain date field " + dateField)
     }
     else {
@@ -2123,7 +2125,7 @@ private[sql] case class DeleteLoadByDate(
       actualColName,
       dateValue,
       relation.cubeMeta.partitioner)
-    LOGGER.audit("The delete load by date is successfull.")
+    LOGGER.audit(s"The delete load by date $dateValue is successful for $schemaName.$cubeName.")
     Seq.empty
   }
 }
@@ -2136,8 +2138,8 @@ private[sql] case class CleanFiles(
   val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
   def run(sqlContext: SQLContext): Seq[Row] = {
-    LOGGER.audit("Clean files request has been received.")
     val schemaName = getDB.getDatabaseName(schemaNameOp, sqlContext)
+    LOGGER.audit(s"Clean files request has been received for $schemaName.$cubeName")
     val relation = CarbonEnv.getInstance(sqlContext).carbonCatalog
       .lookupRelation1(Some(schemaName), cubeName, None)(sqlContext).
       asInstanceOf[CarbonRelation]

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/fe7acdce/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
index f1c8721..f88b74e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
@@ -158,7 +158,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
           CarbonRelation(schemaName, cubeName,
             CarbonSparkUtil.createSparkMeta(cubes.head.carbonTable), cubes.head, alias)(sqlContext)
         } else {
-          LOGGER.audit(s"Table Not Found: $schemaName $cubeName")
+          LOGGER.audit(s"Table Not Found: $schemaName.$cubeName")
           throw new NoSuchTableException
         }
       case Seq(cubeName) =>
@@ -170,7 +170,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
           CarbonRelation(currentDatabase, cubeName,
             CarbonSparkUtil.createSparkMeta(cubes.head.carbonTable), cubes.head, alias)(sqlContext)
         } else {
-          LOGGER.audit(s"Table Not Found: $cubeName")
+          LOGGER.audit(s"Table Not Found: $currentDatabase.$cubeName")
           throw new NoSuchTableException
         }
       case _ =>
@@ -580,7 +580,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
     val (timestampFile, timestampFileType) = getTimestampFileAndType(schemaName, cubeName)
 
     if (!FileFactory.isFileExist(timestampFile, timestampFileType)) {
-      LOGGER.audit("Creating timestamp file")
+      LOGGER.audit(s"Creating timestamp file for $schemaName.$cubeName")
       FileFactory.createNewFile(timestampFile, timestampFileType)
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/fe7acdce/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 6f44aef..1494282 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -213,7 +213,7 @@ object CarbonDataRDDFactory extends Logging {
       }
     } else {
       logError("Delete by Date request is failed")
-      logger.audit("The delete load by date is failed.")
+      logger.audit(s"The delete load by date is failed for $schemaName.$cubeName")
       sys.error("Delete by Date request is failed, potential causes " +
                 "Empty store or Invalid column type, For more details please refer logs.")
     }
@@ -305,8 +305,8 @@ object CarbonDataRDDFactory extends Logging {
     }
 
     logger
-      .audit("Compaction request received for table " + carbonLoadModel
-        .getDatabaseName + "." + carbonLoadModel.getTableName
+      .audit(s"Compaction request received for table " +
+        s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}"
       )
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
     val cubeCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
@@ -345,8 +345,8 @@ object CarbonDataRDDFactory extends Logging {
     }
     else {
       logger
-        .audit("Not able to acquire the compaction lock for table " + carbonLoadModel
-          .getDatabaseName + "." + carbonLoadModel.getTableName
+        .audit("Not able to acquire the compaction lock for table " +
+          s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}"
         )
       logger
         .error("Not able to acquire the compaction lock for table " + carbonLoadModel
@@ -827,7 +827,8 @@ object CarbonDataRDDFactory extends Logging {
           message = "Dataload failure"
         }
         logInfo("********clean up done**********")
-        logger.audit("Data load is failed.")
+        logger.audit(s"Data load is failed for " +
+          s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
         logWarning("Unable to write load metadata file")
         throw new Exception(message)
       } else {
@@ -842,7 +843,8 @@ object CarbonDataRDDFactory extends Logging {
             )
           if (!status) {
             val message = "Dataload failed due to failure in table status updation."
-            logger.audit("Data load is failed.")
+            logger.audit("Data load is failed for " +
+              s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
             logger.error("Dataload failed due to failure in table status updation.")
             throw new Exception(message)
           }
@@ -850,7 +852,8 @@ object CarbonDataRDDFactory extends Logging {
           // TODO : Handle it
           logInfo("********Database updated**********")
         }
-        logger.audit("Data load is successful.")
+        logger.audit("Data load is successful for " +
+          s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
       }
     }
 


Mime
View raw message