hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a.@apache.org
Subject hadoop git commit: HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw)
Date Tue, 27 Jan 2015 22:32:31 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk a8ad1e808 -> 0a05ae178


HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a05ae17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a05ae17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a05ae17

Branch: refs/heads/trunk
Commit: 0a05ae1782488597cbf8667866f98f0df341abc0
Parents: a8ad1e8
Author: Allen Wittenauer <aw@apache.org>
Authored: Tue Jan 27 14:31:27 2015 -0800
Committer: Allen Wittenauer <aw@apache.org>
Committed: Tue Jan 27 14:31:27 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../src/main/resources/hdfs-default.xml         | 26 --------------------
 .../resources/job_1329348432655_0001_conf.xml   |  2 --
 .../src/main/data/2jobs2min-rumen-jh.json       |  6 -----
 4 files changed, 3 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a05ae17/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b867a70..beea13b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -798,6 +798,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-49. MiniDFSCluster.stopDataNode will always shut down a node in
     the cluster if a matching name is not found. (stevel)
 
+    HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
+    via aw)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a05ae17/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c24f7be..966f5f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -31,16 +31,6 @@
 </property>
 
 <property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>
-    The logging level for dfs namenode. Other values are "dir" (trace
-    namespace mutations), "block" (trace block under/over replications
-    and block creations/deletions), or "all".
-  </description>
-</property>
-
-<property>
   <name>dfs.namenode.rpc-address</name>
   <value></value>
   <description>
@@ -155,14 +145,6 @@
 </property>
 
 <property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>
-    Deprecated. Use "dfs.http.policy" instead.
-  </description>
-</property>
-
-<property>
   <name>dfs.http.policy</name>
   <value>HTTP_ONLY</value>
   <description>Decide if HTTPS(SSL) is supported on HDFS
@@ -1245,14 +1227,6 @@
 </property>
 
 <property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>
-    Does HDFS allow appends to files?
-  </description>
-</property>
-
-<property>
   <name>dfs.client.use.datanode.hostname</name>
   <value>false</value>
   <description>Whether clients should use datanode hostnames when

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a05ae17/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index d886e89..608b8ab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -113,7 +113,6 @@
 <property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
 <property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><!--Loaded from job.xml--><name>dfs.https.enable</name><value>false</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property>
@@ -209,7 +208,6 @@
 <property><!--Loaded from job.xml--><name>mapreduce.job.dir</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001</value></property>
 <property><!--Loaded from job.xml--><name>io.map.index.skip</name><value>0</value></property>
 <property><!--Loaded from job.xml--><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value></property>
-<property><!--Loaded from job.xml--><name>dfs.namenode.logging.level</name><value>info</value></property>
 <property><!--Loaded from job.xml--><name>fs.s3.maxRetries</name><value>4</value></property>
 <property><!--Loaded from job.xml--><name>s3native.client-write-packet-size</name><value>65536</value></property>
 <property><!--Loaded from job.xml--><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value></property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a05ae17/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 59ae8d7..70ff8af 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4657,7 +4657,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -4674,7 +4673,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -4783,7 +4781,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -9770,7 +9767,6 @@
     "mapreduce.tasktracker.taskcontroller" : "org.apache.hadoop.mapred.DefaultTaskController",
     "yarn.scheduler.fair.preemption" : "true",
     "mapreduce.reduce.shuffle.parallelcopies" : "5",
-    "dfs.support.append" : "true",
     "yarn.nodemanager.env-whitelist" : "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME",
     "mapreduce.jobtracker.heartbeats.in.second" : "100",
     "mapreduce.job.maxtaskfailures.per.tracker" : "3",
@@ -9787,7 +9783,6 @@
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
     "io.seqfile.lazydecompress" : "true",
-    "dfs.https.enable" : "false",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -9896,7 +9891,6 @@
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : "org.apache.hadoop.net.ScriptBasedMapping",
     "fs.s3.maxRetries" : "4",
-    "dfs.namenode.logging.level" : "info",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",


Mime
View raw message