hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1562670 [1/2] - in /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/qjournal/serv...
Date Thu, 30 Jan 2014 01:55:19 GMT
Author: cnauroth
Date: Thu Jan 30 01:55:14 2014
New Revision: 1562670

URL: http://svn.apache.org/r1562670
Log:
Merge trunk to HDFS-4685.

Modified:
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1561770-1562668

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jan 30 01:55:14 2014
@@ -292,6 +292,21 @@ Release 2.4.0 - UNRELEASED
 
   NEW FEATURES
 
+  IMPROVEMENTS
+
+    HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and 
+    the corresponding byte value. (jing9)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
     HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
     (Haohui Mai via jing9)
 
@@ -329,6 +344,43 @@ Release 2.4.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HDFS-5360. Improvement of usage message of renameSnapshot and
+    deleteSnapshot. (Shinichi Yamashita via wang)
+
+    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
+    (Vinayakumar B via umamahesh)
+
+    HDFS-4657.  Limit the number of blocks logged by the NN after a block
+    report to a configurable value.  (Aaron T. Myers via Colin Patrick
+    McCabe)
+
+    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+
+    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
+
+    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
+
+    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
+    jing9)
+
+    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
+    (Vinayakumar via umamahesh)
+
+    HDFS-5662. Can't decommission a DataNode due to file's replication factor
+    larger than the rest of the cluster size. (brandonli)
+
+    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
+    (shv)
+
+    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
+    (Plamen Jeliazkov via shv)
+
+    HDFS-5677. Need error checking for HA cluster configuration.
+    (Vincent Sheffer via cos)
+
+    HDFS-5825. Use FileUtils.copyFile() to implement DFSTestUtils.copyFile().
+    (Haohui Mai via Arpit Agarwal)
+
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
 
     HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is
@@ -504,6 +556,8 @@ Release 2.4.0 - UNRELEASED
     HDFS-5788. listLocatedStatus response can be very large. (Nathan Roberts
     via kihwal)
 
+    HDFS-5841. Update HDFS caching documentation with new changes. (wang)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -518,6 +572,177 @@ Release 2.4.0 - UNRELEASED
 
   BUG FIXES
 
+    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
+    brandonli)
+
+    HDFS-5291. Standby namenode after transition to active goes into safemode.
+    (jing9)
+
+    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
+    (Haohui Mai via brandonli)
+
+    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
+    brandonli)
+
+    HDFS-5281. COMMIT request should not block. (brandonli)
+
+    HDFS-5337. should do hsync for a commit request even there is no pending
+    writes (brandonli)
+
+    HDFS-5335. Hive query failed with possible race in dfs output stream.
+    (Haohui Mai via suresh)
+
+    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
+    clusters. (jing9)
+
+    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
+    token. (brandonli)
+
+    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
+
+    HDFS-5370. Typo in Error Message: different between range in condition
+    and range in error message. (Kousuke Saruta via suresh)
+
+    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
+    
+    HDFS-5347. Add HDFS NFS user guide. (brandonli)
+
+    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
+    post HDFS-5306. (atm)
+
+    HDFS-5171. NFS should create input stream for a file and try to share it
+    with multiple read requests. (Haohui Mai via brandonli)
+
+    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
+    (cnauroth)
+
+    HDFS-5433. When reloading fsimage during checkpointing, we should clear
+    existing snapshottable directories. (Aaron T. Myers via wang)
+
+    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
+    address resolves to host name localhost. (cnauroth)
+
+    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+
+    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
+    cache expires too quickly  (Chris Nauroth via Sanjay)
+
+    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
+
+    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
+    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
+
+    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
+    create a counter for a step that doesn't already exist.  (cnauroth)
+
+    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
+    in getDataDirsFromURIs. (Mike Mellenthin via wang)
+
+    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
+
+    HDFS-5364. Add OpenFileCtx cache. (brandonli)
+
+    HDFS-5469. Add configuration property for the sub-directroy export path
+    (brandonli)
+
+    HDFS-5519. COMMIT handler should update the commit status after sync
+    (brandonli)
+
+    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
+    holds the write lock (VinayaKumar B via umamahesh)
+
+    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
+    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
+
+    HDFS-5014. Process register commands with out holding BPOfferService lock. 
+    (Vinaykumar B via umamahesh)
+
+    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
+
+    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
+
+    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
+
+    HDFS-5577. NFS user guide update (brandonli)
+
+    HDFS-5563. NFS gateway should commit the buffered data when read request comes
+    after write to the same file (brandonli)
+
+    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
+
+    HDFS-5587. add debug information when NFS fails to start with duplicate user
+    or group names (brandonli)
+
+    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
+    set to false. (jing9)
+
+    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
+    enabled. (Colin Patrick McCabe via jing9)
+
+    HDFS-5283. Under construction blocks only inside snapshots should not be
+    counted in safemode threshhold.  (Vinay via szetszwo)
+
+    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
+    will get AIOBE. (Vinay via jing9)
+
+    HDFS-5427. Not able to read deleted files from snapshot directly under 
+    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
+
+    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
+    is included in snapshot. (jing9)
+
+    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
+    file/directory while deletion. (jing9)
+
+    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
+    restart. (jing9 and Vinay)
+
+    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
+    (Sathish via jing9)
+
+    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
+    leads to NN safemode. (Vinay via jing9)
+
+    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
+    leads nn safemode. (jing9)
+
+    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
+    segment. (Todd Lipcon via atm)
+
+    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
+
+    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
+    
+    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
+
+    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
+    address. (Benoy Antony via jing9)
+
+    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
+    via cmccabe)
+
+    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
+
+    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
+    (brandonli)
+
+    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
+
+    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
+    (Sathish via umamahesh)
+
+    HDFS-5297. Fix dead links in HDFS site documents. (Akira Ajisaka via
+    Arpit Agarwal)
+
+    HDFS-5830. WebHdfsFileSystem.getFileBlockLocations throws
+    IllegalArgumentException when accessing another cluster. (Yongjun Zhang via
+    Colin Patrick McCabe)
+
+    HDFS-5833. Fix SecondaryNameNode javadoc. (Bangtao Zhou via Arpit Agarwal)
+
+    HDFS-5844. Fix broken link in WebHDFS.apt.vm. (Akira Ajisaka via
+    Arpit Agarwal)
+
     HDFS-5034.  Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
     Patrick McCabe)
 
@@ -599,6 +824,12 @@ Release 2.4.0 - UNRELEASED
     HDFS-5728. Block recovery will fail if the metafile does not have crc 
     for all chunks of the block (Vinay via kihwal)
 
+    HDFS-5845. SecondaryNameNode dies when checkpointing with cache pools.
+    (wang)
+
+    HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs 
+    on a secure cluster. (jing9)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
     HDFS-4985. Add storage type to the protocol and expose it in block report
@@ -936,212 +1167,6 @@ Release 2.4.0 - UNRELEASED
     HDFS-5724. modifyCacheDirective logging audit log command wrongly as
     addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
 
-
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HDFS-5360. Improvement of usage message of renameSnapshot and
-    deleteSnapshot. (Shinichi Yamashita via wang)
-
-    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
-    (Vinayakumar B via umamahesh)
-
-    HDFS-4657.  Limit the number of blocks logged by the NN after a block
-    report to a configurable value.  (Aaron T. Myers via Colin Patrick
-    McCabe)
-
-    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
-
-    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
-
-    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
-
-    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
-    jing9)
-
-    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
-    (Vinayakumar via umamahesh)
-
-    HDFS-5662. Can't decommission a DataNode due to file's replication factor
-    larger than the rest of the cluster size. (brandonli)
-
-    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
-    (shv)
-
-    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
-    (Plamen Jeliazkov via shv)
-
-    HDFS-5677. Need error checking for HA cluster configuration.
-    (Vincent Sheffer via cos)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
-    brandonli)
-
-    HDFS-5291. Standby namenode after transition to active goes into safemode.
-    (jing9)
-
-    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
-    (Haohui Mai via brandonli)
-
-    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
-    brandonli)
-
-    HDFS-5281. COMMIT request should not block. (brandonli)
-
-    HDFS-5337. should do hsync for a commit request even there is no pending
-    writes (brandonli)
-
-    HDFS-5335. Hive query failed with possible race in dfs output stream.
-    (Haohui Mai via suresh)
-
-    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
-    clusters. (jing9)
-
-    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
-    token. (brandonli)
-
-    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
-
-    HDFS-5370. Typo in Error Message: different between range in condition
-    and range in error message. (Kousuke Saruta via suresh)
-
-    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
-    
-    HDFS-5347. Add HDFS NFS user guide. (brandonli)
-
-    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
-    post HDFS-5306. (atm)
-
-    HDFS-5171. NFS should create input stream for a file and try to share it
-    with multiple read requests. (Haohui Mai via brandonli)
-
-    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
-    (cnauroth)
-
-    HDFS-5433. When reloading fsimage during checkpointing, we should clear
-    existing snapshottable directories. (Aaron T. Myers via wang)
-
-    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
-    address resolves to host name localhost. (cnauroth)
-
-    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
-
-    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
-    cache expires too quickly  (Chris Nauroth via Sanjay)
-
-    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
-
-    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
-    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
-
-    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
-    create a counter for a step that doesn't already exist.  (cnauroth)
-
-    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
-    in getDataDirsFromURIs. (Mike Mellenthin via wang)
-
-    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
-
-    HDFS-5364. Add OpenFileCtx cache. (brandonli)
-
-    HDFS-5469. Add configuration property for the sub-directroy export path
-    (brandonli)
-
-    HDFS-5519. COMMIT handler should update the commit status after sync
-    (brandonli)
-
-    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
-    holds the write lock (VinayaKumar B via umamahesh)
-
-    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
-    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
-
-    HDFS-5014. Process register commands with out holding BPOfferService lock. 
-    (Vinaykumar B via umamahesh)
-
-    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
-
-    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
-
-    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
-
-    HDFS-5577. NFS user guide update (brandonli)
-
-    HDFS-5563. NFS gateway should commit the buffered data when read request comes
-    after write to the same file (brandonli)
-
-    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
-
-    HDFS-5587. add debug information when NFS fails to start with duplicate user
-    or group names (brandonli)
-
-    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
-    set to false. (jing9)
-
-    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
-    enabled. (Colin Patrick McCabe via jing9)
-
-    HDFS-5283. Under construction blocks only inside snapshots should not be
-    counted in safemode threshhold.  (Vinay via szetszwo)
-
-    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
-    will get AIOBE. (Vinay via jing9)
-
-    HDFS-5427. Not able to read deleted files from snapshot directly under 
-    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
-
-    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
-    is included in snapshot. (jing9)
-
-    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
-    file/directory while deletion. (jing9)
-
-    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
-    restart. (jing9 and Vinay)
-
-    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
-    (Sathish via jing9)
-
-    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
-    leads to NN safemode. (Vinay via jing9)
-
-    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
-    leads nn safemode. (jing9)
-
-    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
-    segment. (Todd Lipcon via atm)
-
-    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
-
-    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
-    
-    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
-
-    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
-    address. (Benoy Antony via jing9)
-
-    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
-    via cmccabe)
-
-    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
-
-    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
-    (brandonli)
-
-    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
-
-    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
-    (Sathish via umamahesh)
-
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1561770-1562668

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Thu Jan 30 01:55:14 2014
@@ -84,7 +84,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -1539,7 +1539,7 @@ public class DFSUtil {
     return policy;
   }
 
-  public static HttpServer.Builder loadSslConfToHttpServerBuilder(HttpServer.Builder builder,
+  public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
       Configuration sslConf) {
     return builder
         .needsClientAuth(
@@ -1644,13 +1644,13 @@ public class DFSUtil {
    * namenode can use to initialize their HTTP / HTTPS server.
    *
    */
-  public static HttpServer.Builder httpServerTemplateForNNAndJN(
+  public static HttpServer2.Builder httpServerTemplateForNNAndJN(
       Configuration conf, final InetSocketAddress httpAddr,
       final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
       String spnegoKeytabFileKey) throws IOException {
     HttpConfig.Policy policy = getHttpPolicy(conf);
 
-    HttpServer.Builder builder = new HttpServer.Builder().setName(name)
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(spnegoUserNameKey)

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java Thu Jan 30 01:55:14 2014
@@ -98,9 +98,8 @@ public class LocatedBlock {
     }
     this.storageIDs = storageIDs;
     this.storageTypes = storageTypes;
-    Preconditions.checkArgument(cachedLocs != null,
-        "cachedLocs should not be null, use a different constructor");
-    if (cachedLocs.length == 0) {
+
+    if (cachedLocs == null || cachedLocs.length == 0) {
       this.cachedLocs = EMPTY_LOCS;
     } else {
       this.cachedLocs = cachedLocs;

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java Thu Jan 30 01:55:14 2014
@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -38,7 +38,7 @@ import org.apache.hadoop.net.NetUtils;
 public class JournalNodeHttpServer {
   public static final String JN_ATTRIBUTE_KEY = "localjournal";
 
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private JournalNode localJournalNode;
 
   private final Configuration conf;
@@ -56,7 +56,7 @@ public class JournalNodeHttpServer {
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Jan 30 01:55:14 2014
@@ -120,7 +120,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -235,7 +235,7 @@ public class DataNode extends Configured
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
 
-  private HttpServer infoServer = null;
+  private HttpServer2 infoServer = null;
   private int infoPort;
   private int infoSecurePort;
 
@@ -358,7 +358,7 @@ public class DataNode extends Configured
    * Http Policy is decided.
    */
   private void startInfoServer(Configuration conf) throws IOException {
-    HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName("datanode")
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
 
     HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Thu Jan 30 01:55:14 2014
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.Connector;
 
@@ -119,7 +119,7 @@ public class SecureDataNodeStarter imple
     // certificates if they are communicating through SSL.
     Connector listener = null;
     if (policy.isHttpEnabled()) {
-      listener = HttpServer.createDefaultChannelConnector();
+      listener = HttpServer2.createDefaultChannelConnector();
       InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
       listener.setHost(infoSocAddr.getHostName());
       listener.setPort(infoSocAddr.getPort());

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java Thu Jan 30 01:55:14 2014
@@ -195,6 +195,17 @@ public final class CacheManager {
 
   }
 
+  /**
+   * Resets all tracked directives and pools. Called during 2NN checkpointing to
+   * reset FSNamesystem state. See {FSNamesystem{@link #clear()}.
+   */
+  void clear() {
+    directivesById.clear();
+    directivesByPath.clear();
+    cachePools.clear();
+    nextDirectiveId = 1;
+  }
+
   public void startMonitorThread() {
     crmLock.lock();
     try {

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java Thu Jan 30 01:55:14 2014
@@ -69,7 +69,7 @@ public enum FSEditLogOpCodes {
   OP_MODIFY_CACHE_DIRECTIVE     ((byte) 39),
   OP_SET_ACL                    ((byte) 40),
 
-  // Note that fromByte(..) depends on OP_INVALID being at the last position.  
+  // Note that the current range of the valid OP code is 0~127
   OP_INVALID                    ((byte) -1);
 
   private final byte opCode;
@@ -92,7 +92,22 @@ public enum FSEditLogOpCodes {
     return opCode;
   }
 
-  private static final FSEditLogOpCodes[] VALUES = FSEditLogOpCodes.values();
+  private static FSEditLogOpCodes[] VALUES;
+  
+  static {
+    byte max = 0;
+    for (FSEditLogOpCodes code : FSEditLogOpCodes.values()) {
+      if (code.getOpCode() > max) {
+        max = code.getOpCode();
+      }
+    }
+    VALUES = new FSEditLogOpCodes[max + 1];
+    for (FSEditLogOpCodes code : FSEditLogOpCodes.values()) {
+      if (code.getOpCode() >= 0) {
+        VALUES[code.getOpCode()] = code;
+      }
+    }
+  }
 
   /**
    * Converts byte to FSEditLogOpCodes enum value
@@ -101,12 +116,9 @@ public enum FSEditLogOpCodes {
    * @return enum with byte value of opCode
    */
   public static FSEditLogOpCodes fromByte(byte opCode) {
-    if (opCode == -1) {
-      return OP_INVALID;
-    }
-    if (opCode >= 0 && opCode < OP_INVALID.ordinal()) {
+    if (opCode >= 0 && opCode < VALUES.length) {
       return VALUES[opCode];
     }
-    return null;
+    return opCode == -1 ? OP_INVALID : null;
   }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jan 30 01:55:14 2014
@@ -544,6 +544,7 @@ public class FSNamesystem implements Nam
     leaseManager.removeAllLeases();
     inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
     snapshotManager.clearSnapshottableDirs();
+    cacheManager.clear();
   }
 
   @VisibleForTesting

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Thu Jan 30 01:55:14 2014
@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -287,7 +287,7 @@ public class GetImageServlet extends Htt
       }
     }
     
-    if (HttpServer.userHasAdministratorAccess(context, remoteUser)) {
+    if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
       LOG.info("GetImageServlet allowing administrator: " + remoteUser);
       return true;
     }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Thu Jan 30 01:55:14 2014
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.web.WebHdf
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -47,7 +47,7 @@ import org.apache.hadoop.security.UserGr
  */
 @InterfaceAudience.Private
 public class NameNodeHttpServer {
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private final Configuration conf;
   private final NameNode nn;
   
@@ -68,7 +68,7 @@ public class NameNodeHttpServer {
   }
 
   private void initWebHdfs(Configuration conf) throws IOException {
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
       // set user pattern based on configuration file
       UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
 
@@ -77,9 +77,9 @@ public class NameNodeHttpServer {
       final String classname = AuthFilter.class.getName();
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+      HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params,
           new String[]{pathSpec});
-      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+      HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
 
       // add webhdfs packages
       httpServer.addJerseyResourcePackage(
@@ -103,7 +103,7 @@ public class NameNodeHttpServer {
         DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "hdfs",
         DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
@@ -152,7 +152,7 @@ public class NameNodeHttpServer {
               SecurityUtil.getServerPrincipal(principalInConf,
                                               bindAddress.getHostName()));
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
           "' is not set.");
@@ -164,7 +164,7 @@ public class NameNodeHttpServer {
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
           httpKeytab);
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
           "' is not set.");
@@ -214,7 +214,7 @@ public class NameNodeHttpServer {
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
   }
 
-  private static void setupServlets(HttpServer httpServer, Configuration conf) {
+  private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
     httpServer.addInternalServlet("startupProgress",
         StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
     httpServer.addInternalServlet("getDelegationToken",

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Jan 30 01:55:14 2014
@@ -65,7 +65,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -90,7 +90,7 @@ import com.google.common.collect.Immutab
  * The Secondary NameNode is a daemon that periodically wakes
  * up (determined by the schedule specified in the configuration),
  * triggers a periodic checkpoint and then goes back to sleep.
- * The Secondary NameNode uses the ClientProtocol to talk to the
+ * The Secondary NameNode uses the NamenodeProtocol to talk to the
  * primary NameNode.
  *
  **********************************************************/
@@ -113,7 +113,7 @@ public class SecondaryNameNode implement
   private Configuration conf;
   private InetSocketAddress nameNodeAddr;
   private volatile boolean shouldRun;
-  private HttpServer infoServer;
+  private HttpServer2 infoServer;
   private URL imageListenURL;
 
   private Collection<URI> checkpointDirs;
@@ -257,7 +257,7 @@ public class SecondaryNameNode implement
         DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "secondary",
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
@@ -1001,7 +1001,12 @@ public class SecondaryNameNode implement
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
       }
-      dstImage.reloadFromImageFile(file, dstNamesystem);
+      dstNamesystem.writeLock();
+      try {
+        dstImage.reloadFromImageFile(file, dstNamesystem);
+      } finally {
+        dstNamesystem.writeUnlock();
+      }
       dstNamesystem.dir.imageLoadComplete();
     }
     // error simulation code for junit test

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Thu Jan 30 01:55:14 2014
@@ -620,7 +620,7 @@ public class CacheAdmin extends Configur
           "directives being added to the pool. This can be specified in " +
           "seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
           "Valid units are [smhd]. By default, no maximum is set. " +
-          "This can also be manually specified by \"never\".");
+          "A value of \"never\" specifies that there is no limit.");
       return getShortUsage() + "\n" +
           "Add a new cache pool.\n\n" + 
           listing.toString();

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Thu Jan 30 01:55:14 2014
@@ -185,8 +185,8 @@ public class DelegationTokenFetcher {
             } else {
               // otherwise we are fetching
               if (webUrl != null) {
-                Credentials creds = getDTfromRemote(connectionFactory, new URI(webUrl),
-                    renewer);
+                Credentials creds = getDTfromRemote(connectionFactory, new URI(
+                    webUrl), renewer, null);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
                   if(LOG.isDebugEnabled()) {	
@@ -213,12 +213,17 @@ public class DelegationTokenFetcher {
   }
   
   static public Credentials getDTfromRemote(URLConnectionFactory factory,
-      URI nnUri, String renewer) throws IOException {
+      URI nnUri, String renewer, String proxyUser) throws IOException {
     StringBuilder buf = new StringBuilder(nnUri.toString())
         .append(GetDelegationTokenServlet.PATH_SPEC);
+    String separator = "?";
     if (renewer != null) {
       buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
           .append(renewer);
+      separator = "&";
+    }
+    if (proxyUser != null) {
+      buf.append(separator).append("doas=").append(proxyUser);
     }
 
     boolean isHttps = nnUri.getScheme().equals("https");

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java Thu Jan 30 01:55:14 2014
@@ -57,7 +57,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
@@ -234,17 +233,23 @@ public class HftpFileSystem extends File
   }
 
   @Override
-  public synchronized Token<?> getDelegationToken(final String renewer
-                                                  ) throws IOException {
+  public synchronized Token<?> getDelegationToken(final String renewer)
+      throws IOException {
     try {
-      //Renew TGT if needed
-      ugi.checkTGTAndReloginFromKeytab();
-      return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+      // Renew TGT if needed
+      UserGroupInformation connectUgi = ugi.getRealUser();
+      final String proxyUser = connectUgi == null ? null : ugi
+          .getShortUserName();
+      if (connectUgi == null) {
+        connectUgi = ugi;
+      }
+      return connectUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
         @Override
         public Token<?> run() throws IOException {
           Credentials c;
           try {
-            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory, nnUri, renewer);
+            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory,
+                nnUri, renewer, proxyUser);
           } catch (IOException e) {
             if (e.getCause() instanceof ConnectException) {
               LOG.warn("Couldn't connect to " + nnUri +
@@ -299,13 +304,13 @@ public class HftpFileSystem extends File
    * @return user_shortname,group1,group2...
    */
   private String getEncodedUgiParameter() {
-    StringBuilder ugiParamenter = new StringBuilder(
+    StringBuilder ugiParameter = new StringBuilder(
         ServletUtil.encodeQueryValue(ugi.getShortUserName()));
     for(String g: ugi.getGroupNames()) {
-      ugiParamenter.append(",");
-      ugiParamenter.append(ServletUtil.encodeQueryValue(g));
+      ugiParameter.append(",");
+      ugiParameter.append(ServletUtil.encodeQueryValue(g));
     }
-    return ugiParamenter.toString();
+    return ugiParameter.toString();
   }
 
   /**
@@ -675,30 +680,48 @@ public class HftpFileSystem extends File
 
   @SuppressWarnings("unchecked")
   @Override
-  public long renewDelegationToken(Token<?> token) throws IOException {
+  public long renewDelegationToken(final Token<?> token) throws IOException {
     // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
-      return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
-          DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
+        @Override
+        public Long run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
     }
   }
 
   @SuppressWarnings("unchecked")
   @Override
-  public void cancelDelegationToken(Token<?> token) throws IOException {
-    // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+  public void cancelDelegationToken(final Token<?> token) throws IOException {
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
-      DelegationTokenFetcher.cancelDelegationToken(connectionFactory, DFSUtil
-          .createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
     }
   }

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1561770-1562668

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1561770-1562668

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1561770-1562668

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1561770-1562668

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm Thu Jan 30 01:55:14 2014
@@ -22,110 +22,140 @@ Centralized Cache Management in HDFS
 
 %{toc|section=1|fromDepth=2|toDepth=4}
 
-* {Background}
+* {Overview}
 
-  Normally, HDFS relies on the operating system to cache data it reads from disk.
-  However, HDFS can also be configured to use centralized cache management. Under
-  centralized cache management, the HDFS NameNode itself decides which blocks
-  should be cached, and where they should be cached.
-
-  Centralized cache management has several advantages. First of all, it
-  prevents frequently used block files from being evicted from memory. This is
-  particularly important when the size of the working set exceeds the size of
-  main memory, which is true for many big data applications. Secondly, when
-  HDFS decides what should be cached, it can let clients know about this
-  information through the getFileBlockLocations API. Finally, when the DataNode
-  knows a block is locked into memory, it can provide access to that block via
-  mmap.
+  <Centralized cache management> in HDFS is an explicit caching mechanism that
+  allows users to specify <paths> to be cached by HDFS. The NameNode will
+  communicate with DataNodes that have the desired blocks on disk, and instruct
+  them to cache the blocks in off-heap caches. 
+
+  Centralized cache management in HDFS has many significant advantages.
+
+  [[1]] Explicit pinning prevents frequently used data from being evicted from
+  memory. This is particularly important when the size of the working set
+  exceeds the size of main memory, which is common for many HDFS workloads.
+
+  [[1]] Because DataNode caches are managed by the NameNode, applications can
+  query the set of cached block locations when making task placement decisions.
+  Co-locating a task with a cached block replica improves read performance.
+
+  [[1]] When block has been cached by a DataNode, clients can use a new ,
+  more-efficient, zero-copy read API. Since checksum verification of cached
+  data is done once by the DataNode, clients can incur essentially zero
+  overhead when using this new API.
+
+  [[1]] Centralized caching can improve overall cluster memory utilization.
+  When relying on the OS buffer cache at each DataNode, repeated reads of
+  a block will result in all <n> replicas of the block being pulled into
+  buffer cache. With centralized cache management, a user can explicitly pin
+  only <m> of the <n> replicas, saving <n-m> memory.
 
 * {Use Cases}
 
-  Centralized cache management is most useful for files which are accessed very
-  often. For example, a "fact table" in Hive which is often used in joins is a
-  good candidate for caching. On the other hand, when running a classic
-  "word count" MapReduce job which counts the number of words in each
-  document, there may not be any good candidates for caching, since all the
-  files may be accessed exactly once.
+  Centralized cache management is useful for files that accessed repeatedly.
+  For example, a small <fact table> in Hive which is often used for joins is a
+  good candidate for caching. On the other hand, caching the input of a <
+  one year reporting query> is probably less useful, since the
+  historical data might only be read once.
+
+  Centralized cache management is also useful for mixed workloads with
+  performance SLAs. Caching the working set of a high-priority workload
+  insures that it does not contend for disk I/O with a low-priority workload.
 
 * {Architecture}
 
 [images/caching.png] Caching Architecture
 
-  With centralized cache management, the NameNode coordinates all caching
-  across the cluster. It receives cache information from each DataNode via the
-  cache report, a periodic message that describes all the blocks IDs cached on
-  a given DataNode. The NameNode will reply to DataNode heartbeat messages
-  with commands telling it which blocks to cache and which to uncache.
-
-  The NameNode stores a set of path cache directives, which tell it which files
-  to cache. The NameNode also stores a set of cache pools, which are groups of
-  cache directives.  These directives and pools are persisted to the edit log
-  and fsimage, and will be loaded if the cluster is restarted.
-
-  Periodically, the NameNode rescans the namespace, to see which blocks need to
-  be cached based on the current set of path cache directives. Rescans are also
-  triggered by relevant user actions, such as adding or removing a cache
-  directive or removing a cache pool.
-
-  Cache directives also may specific a numeric cache replication, which is the
-  number of replicas to cache.  This number may be equal to or smaller than the
-  file's block replication.  If multiple cache directives cover the same file
-  with different cache replication settings, then the highest cache replication
-  setting is applied.
+  In this architecture, the NameNode is responsible for coordinating all the
+  DataNode off-heap caches in the cluster. The NameNode periodically receives
+  a <cache report> from each DataNode which describes all the blocks cached
+  on a given DN. The NameNode manages DataNode caches by piggybacking cache and
+  uncache commands on the DataNode heartbeat.
+
+  The NameNode queries its set of <cache directives> to determine
+  which paths should be cached. Cache directives are persistently stored in the
+  fsimage and edit log, and can be added, removed, and modified via Java and
+  command-line APIs. The NameNode also stores a set of <cache pools>,
+  which are administrative entities used to group cache directives together for
+  resource management and enforcing permissions.
+
+  The NameNode periodically rescans the namespace and active cache directives
+  to determine which blocks need to be cached or uncached and assign caching
+  work to DataNodes. Rescans can also be triggered by user actions like adding
+  or removing a cache directive or removing a cache pool.
 
   We do not currently cache blocks which are under construction, corrupt, or
   otherwise incomplete.  If a cache directive covers a symlink, the symlink
   target is not cached.
 
-  Caching is currently done on a per-file basis, although we would like to add
-  block-level granularity in the future.
+  Caching is currently done on the file or directory-level. Block and sub-block
+  caching is an item of future work.
 
-* {Interface}
+* {Concepts}
 
-  The NameNode stores a list of "cache directives."  These directives contain a
-  path as well as the number of times blocks in that path should be replicated.
+** {Cache directive}
 
-  Paths can be either directories or files. If the path specifies a file, that
-  file is cached. If the path specifies a directory, all the files in the
-  directory will be cached. However, this process is not recursive-- only the
-  direct children of the directory will be cached.
-
-** {hdfs cacheadmin Shell}
-
-  Path cache directives can be created by the <<<hdfs cacheadmin
-  -addDirective>>> command and removed via the <<<hdfs cacheadmin
-  -removeDirective>>> command. To list the current path cache directives, use
-  <<<hdfs cacheadmin -listDirectives>>>. Each path cache directive has a
-  unique 64-bit ID number which will not be reused if it is deleted.  To remove
-  all path cache directives with a specified path, use <<<hdfs cacheadmin
-  -removeDirectives>>>.
-
-  Directives are grouped into "cache pools."  Each cache pool gets a share of
-  the cluster's resources. Additionally, cache pools are used for
-  authentication. Cache pools have a mode, user, and group, similar to regular
-  files. The same authentication rules are applied as for normal files. So, for
-  example, if the mode is 0777, any user can add or remove directives from the
-  cache pool. If the mode is 0644, only the owner can write to the cache pool,
-  but anyone can read from it. And so forth.
-
-  Cache pools are identified by name. They can be created by the <<<hdfs
-  cacheAdmin -addPool>>> command, modified by the <<<hdfs cacheadmin
-  -modifyPool>>> command, and removed via the <<<hdfs cacheadmin
-  -removePool>>> command. To list the current cache pools, use <<<hdfs
-  cacheAdmin -listPools>>>
+  A <cache directive> defines a path that should be cached. Paths can be either
+  directories or files. Directories are cached non-recursively, meaning only
+  files in the first-level listing of the directory.
+
+  Directives also specify additional parameters, such as the cache replication
+  factor and expiration time. The replication factor specifies the number of
+  block replicas to cache. If multiple cache directives refer to the same file,
+  the maximum cache replication factor is applied.
+
+  The expiration time is specified on the command line as a <time-to-live
+  (TTL)>, a relative expiration time in the future. After a cache directive
+  expires, it is no longer considered by the NameNode when making caching
+  decisions.
+
+** {Cache pool}
+
+  A <cache pool> is an administrative entity used to manage groups of cache
+  directives. Cache pools have UNIX-like <permissions>, which restrict which
+  users and groups have access to the pool. Write permissions allow users to
+  add and remove cache directives to the pool. Read permissions allow users to
+  list the cache directives in a pool, as well as additional metadata. Execute
+  permissions are unused.
+
+  Cache pools are also used for resource management. Pools can enforce a
+  maximum <limit>, which restricts the number of bytes that can be cached in
+  aggregate by directives in the pool. Normally, the sum of the pool limits
+  will approximately equal the amount of aggregate memory reserved for
+  HDFS caching on the cluster. Cache pools also track a number of statistics
+  to help cluster users determine what is and should be cached.
+
+  Pools also can enforce a maximum time-to-live. This restricts the maximum
+  expiration time of directives being added to the pool.
+
+* {<<<cacheadmin>>> command-line interface}
+
+  On the command-line, administrators and users can interact with cache pools
+  and directives via the <<<hdfs cacheadmin>>> subcommand.
+
+  Cache directives are identified by a unique, non-repeating 64-bit integer ID.
+  IDs will not be reused even if a cache directive is later removed.
+
+  Cache pools are identified by a unique string name.
+
+** {Cache directive commands}
 
 *** {addDirective}
 
-  Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
+  Usage: <<<hdfs cacheadmin -addDirective -path <path> -pool <pool-name> [-force] [-replication <replication>] [-ttl <time-to-live>]>>>
 
   Add a new cache directive.
 
 *--+--+
 \<path\> | A path to cache. The path can be a directory or a file.
 *--+--+
+\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+*--+--+
+-force | Skips checking of cache pool resource limits.
+*--+--+
 \<replication\> | The cache replication factor to use. Defaults to 1.
 *--+--+
-\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+\<time-to-live\> | How long the directive is valid. Can be specified in minutes, hours, and days, e.g. 30m, 4h, 2d. Valid units are [smhd]. "never" indicates a directive that never expires. If unspecified, the directive never expires.
 *--+--+
 
 *** {removeDirective}
@@ -150,7 +180,7 @@ Centralized Cache Management in HDFS
 
 *** {listDirectives}
 
-  Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
+  Usage: <<<hdfs cacheadmin -listDirectives [-stats] [-path <path>] [-pool <pool>]>>>
 
   List cache directives.
 
@@ -159,10 +189,14 @@ Centralized Cache Management in HDFS
 *--+--+
 \<pool\> | List only path cache directives in that pool.
 *--+--+
+-stats | List path-based cache directive statistics.
+*--+--+
+
+** {Cache pool commands}
 
 *** {addPool}
 
-  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>>>>
 
   Add a new cache pool.
 
@@ -175,12 +209,14 @@ Centralized Cache Management in HDFS
 *--+--+
 \<mode\> | UNIX-style permissions for the pool. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755.
 *--+--+
-\<weight\> | Weight of the pool. This is a relative measure of the importance of the pool used during cache resource management. By default, it is set to 100.
+\<limit\> | The maximum number of bytes that can be cached by directives in this pool, in aggregate. By default, no limit is set.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool. This can be specified in seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. Valid units are [smhd]. By default, no maximum is set. A value of \"never\" specifies that there is no limit.
 *--+--+
 
 *** {modifyPool}
 
-  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>]>>>
 
   Modifies the metadata of an existing cache pool.
 
@@ -193,7 +229,9 @@ Centralized Cache Management in HDFS
 *--+--+
 \<mode\> | Unix-style permissions of the pool in octal.
 *--+--+
-\<weight\> | Weight of the pool.
+\<limit\> | Maximum number of bytes that can be cached by this pool.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool.
 *--+--+
 
 *** {removePool}
@@ -208,12 +246,14 @@ Centralized Cache Management in HDFS
 
 *** {listPools}
 
-  Usage: <<<hdfs cacheadmin -listPools [name] >>>
+  Usage: <<<hdfs cacheadmin -listPools [-stats] [<name>]>>>
 
   Display information about one or more cache pools, e.g. name, owner, group,
   permissions, etc.
 
 *--+--+
+-stats | Display additional cache pool statistics.
+*--+--+
 \<name\> | If specified, list only the named cache pool.
 *--+--+
 
@@ -244,10 +284,12 @@ Centralized Cache Management in HDFS
 
   * dfs.datanode.max.locked.memory
 
-    The DataNode will treat this as the maximum amount of memory it can use for
-    its cache. When setting this value, please remember that you will need space
-    in memory for other things, such as the Java virtual machine (JVM) itself
-    and the operating system's page cache.
+    This determines the maximum amount of memory a DataNode will use for caching.
+    The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
+    also needs to be increased to match this parameter (see below section on
+    {{OS Limits}}). When setting this value, please remember that you will need
+    space in memory for other things as well, such as the DataNode and
+    application JVM heaps and the operating system page cache.
 
 *** Optional
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm Thu Jan 30 01:55:14 2014
@@ -19,8 +19,6 @@
 
 HDFS Federation
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
   This guide provides an overview of the HDFS Federation feature and

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 HDFS High Availability
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 HDFS High Availability Using the Quorum Journal Manager
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm Thu Jan 30 01:55:14 2014
@@ -20,8 +20,6 @@
 
 Offline Edits Viewer Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 Offline Image Viewer Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview
@@ -64,9 +62,9 @@ Offline Image Viewer Guide
       but no data recorded. The default record delimiter is a tab, but
       this may be changed via the -delimiter command line argument. This
       processor is designed to create output that is easily analyzed by
-      other tools, such as [36]Apache Pig. See the [37]Analyzing Results
-      section for further information on using this processor to analyze
-      the contents of fsimage files.
+      other tools, such as {{{http://pig.apache.org}Apache Pig}}. See
+      the {{Analyzing Results}} section for further information on using
+      this processor to analyze the contents of fsimage files.
 
    [[4]] XML creates an XML document of the fsimage and includes all of the
       information within the fsimage, similar to the lsr processor. The

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 HDFS Permissions Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview
@@ -55,8 +53,10 @@ HDFS Permissions Guide
 
      * If the user name matches the owner of foo, then the owner
        permissions are tested;
+
      * Else if the group of foo matches any of member of the groups list,
        then the group permissions are tested;
+
      * Otherwise the other permissions of foo are tested.
 
    If a permissions check fails, the client operation fails.

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 HDFS Quotas Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Thu Jan 30 01:55:14 2014
@@ -108,9 +108,11 @@ HDFS Users Guide
    The following documents describe how to install and set up a Hadoop
    cluster:
 
-     * {{Single Node Setup}} for first-time users.
+     * {{{../hadoop-common/SingleCluster.html}Single Node Setup}}
+       for first-time users.
 
-     * {{Cluster Setup}} for large, distributed clusters.
+     * {{{../hadoop-common/ClusterSetup.html}Cluster Setup}}
+       for large, distributed clusters.
 
    The rest of this document assumes the user is able to set up and run a
    HDFS with at least one DataNode. For the purpose of this document, both
@@ -136,7 +138,8 @@ HDFS Users Guide
    for a command. These commands support most of the normal files system
    operations like copying files, changing file permissions, etc. It also
    supports a few HDFS specific operations like changing replication of
-   files. For more information see {{{File System Shell Guide}}}.
+   files. For more information see {{{../hadoop-common/FileSystemShell.html}
+   File System Shell Guide}}.
 
 **  DFSAdmin Command
 
@@ -169,7 +172,7 @@ HDFS Users Guide
        of racks and datanodes attached to the tracks as viewed by the
        NameNode.
 
-   For command usage, see {{{dfsadmin}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
 
 * Secondary NameNode
 
@@ -203,7 +206,8 @@ HDFS Users Guide
    So that the check pointed image is always ready to be read by the
    primary NameNode if necessary.
 
-   For command usage, see {{{secondarynamenode}}}.
+   For command usage,
+   see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
 
 * Checkpoint Node
 
@@ -245,7 +249,7 @@ HDFS Users Guide
    Multiple checkpoint nodes may be specified in the cluster configuration
    file.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Backup Node
 
@@ -287,7 +291,7 @@ HDFS Users Guide
 
    For a complete discussion of the motivation behind the creation of the
    Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Import Checkpoint
 
@@ -310,7 +314,7 @@ HDFS Users Guide
    verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
    but does not modify it in any way.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Rebalancer
 
@@ -337,7 +341,7 @@ HDFS Users Guide
    A brief administrator's guide for rebalancer as a PDF is attached to
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
-   For command usage, see {{{balancer}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
 
 * Rack Awareness
 
@@ -379,8 +383,9 @@ HDFS Users Guide
    most of the recoverable failures. By default fsck ignores open files
    but provides an option to select all files during reporting. The HDFS
    fsck command is not a Hadoop shell command. It can be run as
-   <<<bin/hadoop fsck>>>. For command usage, see {{{fsck}}}. fsck can be run on the
-   whole file system or on a subset of files.
+   <<<bin/hadoop fsck>>>. For command usage, see 
+   {{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
+   the whole file system or on a subset of files.
 
 * fetchdt
 
@@ -393,7 +398,8 @@ HDFS Users Guide
    command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you got
    the token you can run an HDFS command without having Kerberos tickets,
    by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable to the
-   delegation token file. For command usage, see {{{fetchdt}}} command.
+   delegation token file. For command usage, see
+   {{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
 
 * Recovery Mode
 
@@ -427,10 +433,11 @@ HDFS Users Guide
    let alone to restart HDFS from scratch. HDFS allows administrators to
    go back to earlier version of Hadoop and rollback the cluster to the
    state it was in before the upgrade. HDFS upgrade is described in more
-   detail in {{{Hadoop Upgrade}}} Wiki page. HDFS can have one such backup at a
-   time. Before upgrading, administrators need to remove existing backup
-   using bin/hadoop dfsadmin <<<-finalizeUpgrade>>> command. The following
-   briefly describes the typical upgrade procedure:
+   detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}}
+   Wiki page. HDFS can have one such backup at a time. Before upgrading,
+   administrators need to remove existing backupusing bin/hadoop dfsadmin
+   <<<-finalizeUpgrade>>> command. The following briefly describes the
+   typical upgrade procedure:
 
      * Before upgrading Hadoop software, finalize if there an existing
        backup. <<<dfsadmin -upgradeProgress>>> status can tell if the cluster
@@ -450,7 +457,7 @@ HDFS Users Guide
 
           * stop the cluster and distribute earlier version of Hadoop.
 
-          * start the cluster with rollback option. (<<<bin/start-dfs.h -rollback>>>).
+          * start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
 
 * File Permissions and Security
 
@@ -465,14 +472,15 @@ HDFS Users Guide
 * Scalability
 
    Hadoop currently runs on clusters with thousands of nodes. The
-   {{{PoweredBy}}} Wiki page lists some of the organizations that deploy Hadoop
-   on large clusters. HDFS has one NameNode for each cluster. Currently
-   the total memory available on NameNode is the primary scalability
-   limitation. On very large clusters, increasing average size of files
-   stored in HDFS helps with increasing cluster size without increasing
-   memory requirements on NameNode. The default configuration may not
-   suite very large clustes. The {{{FAQ}}} Wiki page lists suggested
-   configuration improvements for large Hadoop clusters.
+   {{{http://wiki.apache.org/hadoop/PoweredBy}PoweredBy}} Wiki page lists
+   some of the organizations that deploy Hadoop on large clusters.
+   HDFS has one NameNode for each cluster. Currently the total memory
+   available on NameNode is the primary scalability limitation.
+   On very large clusters, increasing average size of files stored in
+   HDFS helps with increasing cluster size without increasing memory
+   requirements on NameNode. The default configuration may not suite
+   very large clusters. The {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}
+   Wiki page lists suggested configuration improvements for large Hadoop clusters.
 
 * Related Documentation
 
@@ -481,19 +489,22 @@ HDFS Users Guide
    documentation about Hadoop and HDFS. The following list is a starting
    point for further exploration:
 
-     * {{{Hadoop Site}}}: The home page for the Apache Hadoop site.
+     * {{{http://hadoop.apache.org}Hadoop Site}}: The home page for
+       the Apache Hadoop site.
 
-     * {{{Hadoop Wiki}}}: The home page (FrontPage) for the Hadoop Wiki. Unlike
+     * {{{http://wiki.apache.org/hadoop/FrontPage}Hadoop Wiki}}:
+       The home page (FrontPage) for the Hadoop Wiki. Unlike
        the released documentation, which is part of Hadoop source tree,
        Hadoop Wiki is regularly edited by Hadoop Community.
 
-     * {{{FAQ}}}: The FAQ Wiki page.
+     * {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}: The FAQ Wiki page.
 
-     * {{{Hadoop JavaDoc API}}}.
+     * {{{../../api/index.html}Hadoop JavaDoc API}}.
 
-     * {{{Hadoop User Mailing List}}}: core-user[at]hadoop.apache.org.
+     * Hadoop User Mailing List: user[at]hadoop.apache.org.
 
-     * Explore {{{src/hdfs/hdfs-default.xml}}}. It includes brief description of
-       most of the configuration variables available.
+     * Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
+       brief description of most of the configuration variables available.
 
-     * {{{Hadoop Commands Guide}}}: Hadoop commands usage.
+     * {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
+       Hadoop commands usage.

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm Thu Jan 30 01:55:14 2014
@@ -18,8 +18,6 @@
 
 HFTP Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Introduction

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm?rev=1562670&r1=1562669&r2=1562670&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm Thu Jan 30 01:55:14 2014
@@ -19,8 +19,6 @@
 
 HDFS Short-Circuit Local Reads
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Background}



Mime
View raw message