From common-commits-return-89974-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Mon Oct 29 17:35:41 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 3007E180627 for ; Mon, 29 Oct 2018 17:35:40 +0100 (CET) Received: (qmail 70337 invoked by uid 500); 29 Oct 2018 16:35:39 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 70328 invoked by uid 99); 29 Oct 2018 16:35:39 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 29 Oct 2018 16:35:39 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 13DABE0058; Mon, 29 Oct 2018 16:35:39 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Mon, 29 Oct 2018 16:35:39 -0000 Message-Id: <6b3b1edb009143bbb6318c6bbf63d6b0@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] hadoop git commit: HDDS-727. ozone.log is not getting created in logs directory. Contributed by Nilotpal Nandi. Repository: hadoop Updated Branches: refs/heads/ozone-0.3 031cdf5d5 -> 9854eed1a refs/heads/trunk 902345de6 -> a58048e8d HDDS-727. ozone.log is not getting created in logs directory. Contributed by Nilotpal Nandi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a58048e8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a58048e8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a58048e8 Branch: refs/heads/trunk Commit: a58048e8d545534e5b7f32e747df5a5f598889e2 Parents: 902345d Author: Arpit Agarwal Authored: Mon Oct 29 09:35:18 2018 -0700 Committer: Arpit Agarwal Committed: Mon Oct 29 09:35:18 2018 -0700 ---------------------------------------------------------------------- .../common/src/main/conf/log4j.properties | 157 ------------------- .../src/main/conf/om-audit-log4j2.properties | 90 ----------- .../common/src/main/conf/ozone-site.xml | 24 --- .../dist/dev-support/bin/dist-layout-stitching | 4 +- .../dist/src/main/conf/log4j.properties | 157 +++++++++++++++++++ .../src/main/conf/om-audit-log4j2.properties | 90 +++++++++++ hadoop-ozone/dist/src/main/conf/ozone-site.xml | 24 +++ 7 files changed, 274 insertions(+), 272 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-hdds/common/src/main/conf/log4j.properties ---------------------------------------------------------------------- diff --git a/hadoop-hdds/common/src/main/conf/log4j.properties b/hadoop-hdds/common/src/main/conf/log4j.properties deleted file mode 100644 index 663e254..0000000 --- a/hadoop-hdds/common/src/main/conf/log4j.properties +++ /dev/null @@ -1,157 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshold=ALL - -# Null Appender -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - -# -# Rolling File Appender - cap space usage at 5gb. -# -hadoop.log.maxfilesize=256MB -hadoop.log.maxbackupindex=20 -log4j.appender.RFA=org.apache.log4j.RollingFileAppender -log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize} -log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex} - -log4j.appender.RFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollover at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - -# -# TaskLog Appender -# -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -# HDFS block state change log from block manager -# -# Uncomment the following to log normal block state change -# messages from BlockManager in NameNode. -#log4j.logger.BlockStateChange=DEBUG - -# -#Security appender -# -hadoop.security.logger=INFO,NullAppender -hadoop.security.log.maxfilesize=256MB -hadoop.security.log.maxbackupindex=20 -log4j.category.SecurityLogger=${hadoop.security.logger} -hadoop.security.log.file=SecurityAuth-${user.name}.audit -log4j.appender.RFAS=org.apache.log4j.RollingFileAppender -log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} -log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} - -# -# Daily Rolling Security appender -# -log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - - -# Custom Logging levels -# AWS SDK & S3A FileSystem -#log4j.logger.com.amazonaws=ERROR -log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR -#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - - -log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE - -# Do not log into datanode logs. Remove this line to have single log. -log4j.additivity.org.apache.hadoop.ozone=false - -# For development purposes, log both to console and log file. -log4j.appender.OZONE=org.apache.log4j.ConsoleAppender -log4j.appender.OZONE.Threshold=info -log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout -log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ - %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n - -# Real ozone logger that writes to ozone.log -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log -log4j.appender.FILE.Threshold=debug -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ -(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ -%m%n - -# Log levels of third-party libraries -log4j.logger.org.apache.commons.beanutils=WARN - -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties ---------------------------------------------------------------------- diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties deleted file mode 100644 index 7be51ac..0000000 --- a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties +++ /dev/null @@ -1,90 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=30 - -filter=read,write -# filter.read.onMatch=DENY avoids logging all READ events -# filter.read.onMatch=ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch=NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type=MarkerFilter -filter.read.marker=READ -filter.read.onMatch=DENY -filter.read.onMismatch=NEUTRAL - -# filter.write.onMatch=DENY avoids logging all WRITE events -# filter.write.onMatch=ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type=MarkerFilter -filter.write.marker=WRITE -filter.write.onMatch=NEUTRAL -filter.write.onMismatch=NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -# Uncomment following section to enable logging to console appender also -#appenders=console, rolling -#appender.console.type=Console -#appender.console.name=STDOUT -#appender.console.layout.type=PatternLayout -#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -# Comment this line when using both console and rolling appenders -appenders=rolling - -#Rolling File Appender with size & time thresholds. -#Rolling is triggered when either threshold is breached. -#The rolled over file is compressed by default -#Time interval is specified in seconds 86400s=1 day -appender.rolling.type=RollingFile -appender.rolling.name=RollingFile -appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -appender.rolling.layout.type=PatternLayout -appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -appender.rolling.policies.type=Policies -appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval=86400 -appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -appender.rolling.policies.size.size=64MB - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level=INFO -logger.audit.appenderRefs=rolling -logger.audit.appenderRef.file.ref=RollingFile - -rootLogger.level=INFO -rootLogger.appenderRefs=stdout -rootLogger.appenderRef.stdout.ref=STDOUT http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/common/src/main/conf/ozone-site.xml ---------------------------------------------------------------------- diff --git a/hadoop-ozone/common/src/main/conf/ozone-site.xml b/hadoop-ozone/common/src/main/conf/ozone-site.xml deleted file mode 100644 index 77dd7ef..0000000 --- a/hadoop-ozone/common/src/main/conf/ozone-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching ---------------------------------------------------------------------- diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index cb6312d..612ee54 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -78,7 +78,9 @@ run mkdir -p ./etc run mkdir -p ./libexec run cp -r "${ROOT}/hadoop-common-project/hadoop-common/src/main/conf" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties" "etc/hadoop" +run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop" +run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop" +run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" "etc/hadoop" run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/" run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" "bin/" run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/" http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/log4j.properties ---------------------------------------------------------------------- diff --git a/hadoop-ozone/dist/src/main/conf/log4j.properties b/hadoop-ozone/dist/src/main/conf/log4j.properties new file mode 100644 index 0000000..663e254 --- /dev/null +++ b/hadoop-ozone/dist/src/main/conf/log4j.properties @@ -0,0 +1,157 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Define some default values that can be overridden by system properties +hadoop.root.logger=INFO,console +hadoop.log.dir=. +hadoop.log.file=hadoop.log + +# Define the root logger to the system property "hadoop.root.logger". +log4j.rootLogger=${hadoop.root.logger}, EventCounter + +# Logging Threshold +log4j.threshold=ALL + +# Null Appender +log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender + +# +# Rolling File Appender - cap space usage at 5gb. +# +hadoop.log.maxfilesize=256MB +hadoop.log.maxbackupindex=20 +log4j.appender.RFA=org.apache.log4j.RollingFileAppender +log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} + +log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize} +log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex} + +log4j.appender.RFA.layout=org.apache.log4j.PatternLayout + +# Pattern format: Date LogLevel LoggerName LogMessage +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# Debugging Pattern format +#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + +# +# Daily Rolling File Appender +# + +log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} + +# Rollover at midnight +log4j.appender.DRFA.DatePattern=.yyyy-MM-dd + +log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout + +# Pattern format: Date LogLevel LoggerName LogMessage +log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# Debugging Pattern format +#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + +# +# console +# Add "console" to rootlogger above if you want to use this +# + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n + +# +# TaskLog Appender +# +log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender + +log4j.appender.TLA.layout=org.apache.log4j.PatternLayout +log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n + +# +# HDFS block state change log from block manager +# +# Uncomment the following to log normal block state change +# messages from BlockManager in NameNode. +#log4j.logger.BlockStateChange=DEBUG + +# +#Security appender +# +hadoop.security.logger=INFO,NullAppender +hadoop.security.log.maxfilesize=256MB +hadoop.security.log.maxbackupindex=20 +log4j.category.SecurityLogger=${hadoop.security.logger} +hadoop.security.log.file=SecurityAuth-${user.name}.audit +log4j.appender.RFAS=org.apache.log4j.RollingFileAppender +log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} +log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} + +# +# Daily Rolling Security appender +# +log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd + + +# Custom Logging levels +# AWS SDK & S3A FileSystem +#log4j.logger.com.amazonaws=ERROR +log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR +#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN + +# +# Event Counter Appender +# Sends counts of logging messages at different severity levels to Hadoop Metrics. +# +log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter + + +log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE + +# Do not log into datanode logs. Remove this line to have single log. +log4j.additivity.org.apache.hadoop.ozone=false + +# For development purposes, log both to console and log file. +log4j.appender.OZONE=org.apache.log4j.ConsoleAppender +log4j.appender.OZONE.Threshold=info +log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout +log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ + %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n + +# Real ozone logger that writes to ozone.log +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log +log4j.appender.FILE.Threshold=debug +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ +(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ +%m%n + +# Log levels of third-party libraries +log4j.logger.org.apache.commons.beanutils=WARN + +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.ratis.conf.ConfUtils=WARN +log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties ---------------------------------------------------------------------- diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties new file mode 100644 index 0000000..7be51ac --- /dev/null +++ b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties @@ -0,0 +1,90 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#

+# http://www.apache.org/licenses/LICENSE-2.0 +#

+# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +name=PropertiesConfig + +# Checks for config change periodically and reloads +monitorInterval=30 + +filter=read,write +# filter.read.onMatch=DENY avoids logging all READ events +# filter.read.onMatch=ACCEPT permits logging all READ events +# The above two settings ignore the log levels in configuration +# filter.read.onMatch=NEUTRAL permits logging of only those READ events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.read.type=MarkerFilter +filter.read.marker=READ +filter.read.onMatch=DENY +filter.read.onMismatch=NEUTRAL + +# filter.write.onMatch=DENY avoids logging all WRITE events +# filter.write.onMatch=ACCEPT permits logging all WRITE events +# The above two settings ignore the log levels in configuration +# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.write.type=MarkerFilter +filter.write.marker=WRITE +filter.write.onMatch=NEUTRAL +filter.write.onMismatch=NEUTRAL + +# Log Levels are organized from most specific to least: +# OFF (most specific, no logging) +# FATAL (most specific, little data) +# ERROR +# WARN +# INFO +# DEBUG +# TRACE (least specific, a lot of data) +# ALL (least specific, all data) + +# Uncomment following section to enable logging to console appender also +#appenders=console, rolling +#appender.console.type=Console +#appender.console.name=STDOUT +#appender.console.layout.type=PatternLayout +#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n + +# Comment this line when using both console and rolling appenders +appenders=rolling + +#Rolling File Appender with size & time thresholds. +#Rolling is triggered when either threshold is breached. +#The rolled over file is compressed by default +#Time interval is specified in seconds 86400s=1 day +appender.rolling.type=RollingFile +appender.rolling.name=RollingFile +appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log +appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz +appender.rolling.layout.type=PatternLayout +appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +appender.rolling.policies.type=Policies +appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval=86400 +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=64MB + +loggers=audit +logger.audit.type=AsyncLogger +logger.audit.name=OMAudit +logger.audit.level=INFO +logger.audit.appenderRefs=rolling +logger.audit.appenderRef.file.ref=RollingFile + +rootLogger.level=INFO +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/ozone-site.xml ---------------------------------------------------------------------- diff --git a/hadoop-ozone/dist/src/main/conf/ozone-site.xml b/hadoop-ozone/dist/src/main/conf/ozone-site.xml new file mode 100644 index 0000000..77dd7ef --- /dev/null +++ b/hadoop-ozone/dist/src/main/conf/ozone-site.xml @@ -0,0 +1,24 @@ + + + + + + + + + --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org