hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1399950 [5/27] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apach...
Date Fri, 19 Oct 2012 02:28:07 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Oct 19 02:25:55 2012
@@ -1,6 +1,6 @@
 Hadoop HDFS Change Log
 
-Trunk (unreleased changes)
+Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES
 
@@ -8,14 +8,15 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
-    HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly 
-    via jitendra)
-
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
     HDFS-3601. Add BlockPlacementPolicyWithNodeGroup to support block placement
     with 4-layer network topology.  (Junping Du via szetszwo)
 
+    HDFS-3077. Implement QuorumJournalManager, a distributed mechanism for
+    reliably storing HDFS edit logs. See dedicated section below for breakdown
+    of subtasks.
+
   IMPROVEMENTS
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -104,8 +105,50 @@ Trunk (unreleased changes)
 
     HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
 
-    HDFS-3190. Simple refactors in existing NN code to assist
-    QuorumJournalManager extension. (todd)
+    HDFS-3630 Modify TestPersistBlocks to use both flush and hflush  (sanjay)
+
+    HDFS-3768. Exception in TestJettyHelper is incorrect. 
+    (Eli Reisman via jghoman)
+
+    HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
+
+    HDFS-3789. JournalManager#format() should be able to throw IOException
+    (Ivan Kelly via todd)
+
+    HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
+    suresh)
+
+    HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
+    (Andrew Purtell via suresh)
+
+    HDFS-3817. Avoid printing SafeModeException stack trace.
+    (Brandon Li via suresh)
+
+    HDFS-3819. Should check whether invalidate work percentage default value is 
+    not greater than 1.0f. (Jing Zhao via jitendra)
+
+    HDFS-3844. Add @Override and remove {@inheritdoc} and unnecessary
+    imports. (Jing Zhao via suresh)
+
+    HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh)
+
+    HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh)
+
+    HDFS-3880. Use Builder to build RPC server in HDFS.
+    (Brandon Li vias suresh)
+
+    HDFS-2127. Add a test that ensure AccessControlExceptions contain
+    a full path. (Stephen Chu via eli)
+
+    HDFS-3995. Use DFSTestUtil.createFile() for file creation and 
+    writing in test cases. (Jing Zhao via suresh)
+
+    HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields
+    Block Pool Used, Block Pool Used(%) and Failed Volumes.
+    (Brahma Reddy Battula via suresh)
+
+    HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
+    (Jing Zhao via suresh)
 
   OPTIMIZATIONS
 
@@ -120,10 +163,6 @@ Trunk (unreleased changes)
 
     HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
 
-    HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
-    IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
-    Rao G via szetszwo)
-
     HDFS-46.   Change default namespace quota of root directory from
     Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
 
@@ -177,13 +216,274 @@ Trunk (unreleased changes)
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     (acmurthy via eli)
 
-Branch-2 ( Unreleased changes )
+    HDFS-3625. Fix TestBackupNode by properly initializing edit log during
+    startup. (Junping Du via todd)
+
+    HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
+
+    HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
+    (Jing Zhao via suresh)
+
+    HDFS-3834. Remove unused static fields NAME, DESCRIPTION and Usage from
+    Command. (Jing Zhao via suresh)
+
+    HADOOP-8158. Interrupting hadoop fs -put from the command line
+    causes a LeaseExpiredException. (daryn via harsh)
+
+  BREAKDOWN OF HDFS-3077 SUBTASKS
+
+    HDFS-3077. Quorum-based protocol for reading and writing edit logs.
+    (todd, Brandon Li, and Hari Mankude via todd)
+    
+    HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd)
+    
+    HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs
+    (todd)
+    
+    HDFS-3693. JNStorage should read its storage info even before a writer
+    becomes active (todd)
+    
+    HDFS-3725. Fix QJM startup when individual JNs have gaps (todd)
+    
+    HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd)
+    
+    HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm)
+    
+    HDFS-3793. Implement genericized format() in QJM (todd)
+    
+    HDFS-3795. QJM: validate journal dir at startup (todd)
+    
+    HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid
+    segment (todd)
+    
+    HDFS-3799. QJM: handle empty log segments during recovery (todd)
+    
+    HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd)
+    
+    HDFS-3800. improvements to QJM fault testing (todd)
+    
+    HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed
+    setting of HTTP port. (todd and atm)
+    
+    HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd
+    and atm)
+    
+    HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode"
+    (eli)
+    
+    HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
+    
+    HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
+    
+    HDFS-3863. Track last "committed" txid in QJM (todd)
+    
+    HDFS-3869. Expose non-file journal manager details in web UI (todd)
+    
+    HDFS-3884. Journal format() should reset cached values (todd)
+    
+    HDFS-3870. Add metrics to JournalNode (todd)
+    
+    HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd)
+    
+    HDFS-3726. If a logger misses an RPC, don't retry that logger until next
+    segment (todd)
+    
+    HDFS-3893. QJM: Make QJM work with security enabled. (atm)
+    
+    HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm)
+    
+    HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd)
+    
+    HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd)
+    
+    HDFS-3900. QJM: avoid validating log segments on log rolls (todd)
+    
+    HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are
+    out-of-sync (todd)
+    
+    HDFS-3899. QJM: Add client-side metrics (todd)
+    
+    HDFS-3914. QJM: acceptRecovery should abort current segment (todd)
+    
+    HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd)
+    
+    HDFS-3906. QJM: quorum timeout on failover with large log segment (todd)
+    
+    HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error
+    before being formatted (todd)
+    
+    HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC
+    client caching (todd)
+    
+    HDFS-3926. QJM: Add user documentation for QJM. (atm)
+    
+    HDFS-3943. QJM: remove currently-unused md5sum field (todd)
+    
+    HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd)
+    
+    HDFS-3955. QJM: Make acceptRecovery() atomic. (todd)
+    
+    HDFS-3956. QJM: purge temporary files when no longer within retention
+    period (todd)
+    
+    HDFS-4004. TestJournalNode#testJournal fails because of test case execution
+    order (Chao Shi via todd)
+    
+    HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
+    (Chao Shi via todd)
+
+Release 2.0.3-alpha - Unreleased 
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+    HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
+    (Jaimin D Jetly and Jing Zhao via szetszwo)
+
+    HDFS-3912. Detect and avoid stale datanodes for writes.
+    (Jing Zhao via suresh)
+
+    HDFS-4059. Add number of stale DataNodes to metrics. (Jing Zhao via suresh)
+
+  IMPROVEMENTS
+  
+    HDFS-3925. Prettify PipelineAck#toString() for printing to a log
+    (Andrew Wang via todd)
+
+    HDFS-3939. NN RPC address cleanup. (eli)
+
+    HDFS-3373. Change DFSClient input stream socket cache to global static and
+    add a thread to cleanup expired cache entries. (John George via szetszwo)
+
+    HDFS-3896. Add descriptions for dfs.namenode.rpc-address and
+    dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm)
+
+    HDFS-3996. Add debug log removed in HDFS-3873 back. (eli)
+
+    HDFS-3916. libwebhdfs (C client) code cleanups.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3813. Log error message if security and WebHDFS are enabled but
+    principal/keytab are not configured. (Stephen Chu via atm)
+
+    HDFS-3483. Better error message when hdfs fsck is run against a ViewFS
+    config. (Stephen Fritz via atm)
+
+    HDFS-3682. MiniDFSCluster#init should provide more info when it fails.
+    (todd via eli)
+
+    HDFS-4008. TestBalancerWithEncryptedTransfer needs a timeout. (eli)
+
+    HDFS-4007. Rehabilitate bit-rotted unit tests under
+    hadoop-hdfs-project/hadoop-hdfs/src/test/unit/ 
+    (Colin Patrick McCabe via todd)
+
+    HDFS-4041. Hadoop HDFS Maven protoc calls must not depend on external
+    sh script. (Chris Nauroth via suresh)
+
+    HADOOP-8911. CRLF characters in source and text files.
+    (Raja Aluri via suresh)
+
+    HDFS-4037. Rename the getReplication() method in BlockCollection to
+    getBlockReplication(). (szetszwo)
+
+    HDFS-4036. Remove "throw UnresolvedLinkException" from
+    FSDirectory.unprotectedAddFile(..). (Jing Zhao via szetszwo)
+
+    HDFS-2946. HA: Put a cap on the number of completed edits files retained
+    by the NN. (atm)
+
+    HDFS-4029. GenerationStamp should use an AtomicLong. (eli)
+
+    HDFS-4068. DatanodeID and DatanodeInfo member should be private. (eli)
+
+    HDFS-4073. Two minor improvements to FSDirectory.  (Jing Zhao via szetszwo)
+
+    HDFS-4074. Remove the unused default constructor from INode.  (Brandon Li
+    via szetszwo)
+
+    HDFS-4053. Increase the default block size. (eli)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
+    (Andy Isaacson via eli)
+
+    HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm)
+
+    HDFS-3936. MiniDFSCluster shutdown races with BlocksMap usage. (eli)
+
+    HDFS-3951. datanode web ui does not work over HTTPS when datanode is started in secure mode. (tucu)
+
+    HDFS-3949. NameNodeRpcServer#join should join on both client and
+    server RPC servers. (eli)
+
+    HDFS-3932. NameNode Web UI broken if the rpc-address is set to the wildcard.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3931. TestDatanodeBlockScanner#testBlockCorruptionPolicy2 is broken.
+    (Andy Isaacson via eli)
+
+    HDFS-3964. Make NN log of fs.defaultFS debug rather than info. (eli)
+
+    HDFS-3992. Method org.apache.hadoop.hdfs.TestHftpFileSystem.tearDown()
+    sometimes throws NPEs. (Ivan A. Veselovsky via atm)
+
+    HDFS-3753. Tests don't run with native libraries.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-4000. TestParallelLocalRead fails with "input ByteBuffers
+    must be direct buffers". (Colin Patrick McCabe via eli)
+
+    HDFS-3999. HttpFS OPEN operation expects len parameter, it should be length. (tucu)
+
+    HDFS-4006. TestCheckpoint#testSecondaryHasVeryOutOfDateImage
+    occasionally fails due to unexpected exit. (todd via eli)
+
+    HDFS-4003. test-patch should build the common native libs before
+    running hdfs tests. (Colin Patrick McCabe via eli)
+
+    HDFS-4018. testMiniDFSClusterWithMultipleNN is missing some
+    cluster cleanup. (eli)
+
+    HDFS-4020. TestRBWBlockInvalidation may time out. (eli)
+
+    HDFS-4021. Misleading error message when resources are low on the NameNode.
+    (Christopher Conner via atm)
+
+    HDFS-4044. Duplicate ChecksumType definition in HDFS .proto files.
+    (Binglin Chang via suresh)
+
+    HDFS-4049. Fix hflush performance regression due to nagling delays
+    (todd)
+
+    HDFS-3678. Edit log files are never being purged from 2NN. (atm)
+
+    HDFS-4058. DirectoryScanner may fail with IOOB if the directory
+    scanning threads return out of volume order. (eli)
+
+    HDFS-3985. Add timeouts to TestMulitipleNNDataBlockScanner. (todd via eli)
+
+    HDFS-4061. TestBalancer and TestUnderReplicatedBlocks need timeouts. (eli)
+
+    HDFS-3997. OfflineImageViewer incorrectly passes value of imageVersion when
+    visiting IS_COMPRESSED element. (Mithun Radhakrishnan via atm)
+
+    HDFS-4055. TestAuditLogs is flaky. (Binglin Chang via eli)
+
+Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
 
     HDFS-3446. HostsFileReader silently ignores bad includes/excludes
     (Matthew Jacobs via todd)
 
+    HDFS-3755. Creating an already-open-for-write file with overwrite=true fails
+    (todd)
+
   NEW FEATURES
 
     HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
@@ -194,6 +494,21 @@ Branch-2 ( Unreleased changes )
     HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
     the given HDFS is healthy. (szetszwo)
 
+    HDFS-3113. httpfs does not support delegation tokens. (tucu)
+
+    HDFS-3513. HttpFS should cache filesystems. (tucu)
+
+    HDFS-3637. Add support for encrypting the DataTransferProtocol. (atm)
+
+    HDFS-3150. Add option for clients to contact DNs via hostname. (eli)
+
+    HDFS-2793. Add an admin command to trigger an edit log roll. (todd)
+
+    HDFS-3703. Datanodes are marked stale if heartbeat is not received in
+    configured timeout and are selected as the last location to read from.
+    (Jing Zhao via suresh)
+    
+
   IMPROVEMENTS
 
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -298,10 +613,110 @@ Branch-2 ( Unreleased changes )
     HDFS-3613. GSet prints some INFO level values, which aren't
     really very useful to all (Andrew Wang via harsh)
 
-    HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping a few bytes. (Colin Patrick McCabe via harsh)
+    HDFS-3611. NameNode prints unnecessary WARNs about edit log normally skipping 
+    a few bytes. (Colin Patrick McCabe via harsh)
 
     HDFS-3582. Hook daemon process exit for testing. (eli)
 
+    HDFS-3641. Move server Util time methods to common and use now
+    instead of System#currentTimeMillis. (eli)
+
+    HDFS-3633. libhdfs: hdfsDelete should pass JNI_FALSE or JNI_TRUE.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-799. libhdfs must call DetachCurrentThread when a thread is destroyed.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3306. fuse_dfs: don't lock release operations.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3612. Single namenode image directory config warning can
+    be improved. (Andy Isaacson via harsh)
+
+    HDFS-3606. libhdfs: create self-contained unit test.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3539. libhdfs code cleanups. (Colin Patrick McCabe via eli)
+
+    HDFS-3610. fuse_dfs: Provide a way to use the default (configured) NN URI.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3663. MiniDFSCluster should capture the code path that led to
+    the first ExitException. (eli)
+
+    HDFS-3659. Add missing @Override to methods across the hadoop-hdfs
+    project. (Brandon Li via harsh)
+
+    HDFS-3537. Move libhdfs and fuse-dfs source to native subdirectories.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3665. Add a test for renaming across file systems via a symlink. (eli)
+
+    HDFS-3666. Plumb more exception messages to terminate. (eli)
+
+    HDFS-3673. libhdfs: fix some compiler warnings. (Colin Patrick McCabe via eli)
+
+    HDFS-3675. libhdfs: follow documented return codes. (Colin Patrick McCabe via eli)
+
+    HDFS-1249. With fuse-dfs, chown which only has owner (or only group)
+    argument fails with Input/output error. (Colin Patrick McCabe via eli)
+
+    HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm)
+
+    HDFS-3711. Manually convert remaining tests to JUnit4. (Andrew Wang via atm)
+
+    HDFS-3650. Use MutableQuantiles to provide latency histograms for various
+    operations. (Andrew Wang via atm)
+
+    HDFS-3667.  Add retry support to WebHdfsFileSystem.  (szetszwo)
+
+    HDFS-3291. add test that covers HttpFS working w/ a non-HDFS Hadoop
+    filesystem (tucu)
+
+    HDFS-3634. Add self-contained, mavenized fuse_dfs test. (Colin Patrick
+    McCabe via atm)
+
+    HDFS-3190. Simple refactors in existing NN code to assist
+    QuorumJournalManager extension. (todd)
+
+    HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd)
+
+    HDFS-3765. namenode -initializeSharedEdits should be able to initialize
+    all shared storages. (Vinay and todd via todd)
+
+    HDFS-3802. StartupOption.name in HdfsServerConstants should be final.
+    (Jing Zhao via szetszwo)
+
+    HDFS-3796. Speed up edit log tests by avoiding fsync() (todd)
+
+    HDFS-2963. Console Output is confusing while executing metasave
+    (dfsadmin command). (Andrew Wang via eli)
+
+    HDFS-3672. Expose disk-location information for blocks to enable better
+    scheduling. (Andrew Wang via atm)
+
+    HDFS-2727. libhdfs should get the default block size from the server.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-2686. Remove DistributedUpgrade related code. (suresh)
+
+    HDFS-3832. Remove protocol methods related to DistributedUpgrade. (suresh)
+
+    HDFS-3177. Update DFSClient and DataXceiver to handle different checkum
+    types in file checksum computation.  (Kihwal Lee via szetszwo)
+
+    HDFS-3871. Change NameNodeProxies to use RetryUtils.  (Arun C Murthy
+    via szetszwo)
+
+    HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy.
+    (Jing Zhao via szetszwo)
+
+    HDFS-3888. Clean up BlockPlacementPolicyDefault.  (Jing Zhao via szetszwo)
+
+    HDFS-3907. Allow multiple users for local block readers. (eli)
+
+    HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -313,6 +728,13 @@ Branch-2 ( Unreleased changes )
     HDFS-3110. Use directRead API to reduce the number of buffer copies in
     libhdfs (Henry Robinson via todd)
 
+    HDFS-3697. Enable fadvise readahead by default. (todd)
+
+    HDFS-2421. Improve the concurrency of SerialNumberMap in NameNode.
+    (Jing Zhao and Weiyan Wang via szetszwo)
+
+    HDFS-3866. HttpFS POM should have property where to download tomcat from (zero45 via tucu)
+
   BUG FIXES
 
     HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -454,6 +876,145 @@ Branch-2 ( Unreleased changes )
 
     HDFS-3615. Two BlockTokenSecretManager findbugs warnings. (atm)
 
+    HDFS-470. libhdfs should handle 0-length reads from FSInputStream
+    correctly. (Colin Patrick McCabe via eli)
+
+    HDFS-3492. fix some misuses of InputStream#skip.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3609. libhdfs: don't force the URI to look like hdfs://hostname:port.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3605. Block mistakenly marked corrupt during edit log catchup
+    phase of failover. (todd and Brahma Reddy Battula via todd)
+
+    HDFS-3690. BlockPlacementPolicyDefault incorrectly casts LOG. (eli)
+
+    HDFS-3597. SNN fails to start after DFS upgrade. (Andy Isaacson via todd)
+
+    HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick
+    McCabe via atm)
+
+    HDFS-3709. TestStartup tests still binding to the ephemeral port. (eli)
+
+    HDFS-3720. hdfs.h must get packaged. (Colin Patrick McCabe via atm)
+
+    HDFS-3626. Creating file with invalid path can corrupt edit log (todd)
+
+    HDFS-3679. fuse_dfs notrash option sets usetrash. (Conrad Meyer via suresh)
+
+    HDFS-3732. fuse_dfs: incorrect configuration value checked for connection
+    expiry timer period. (Colin Patrick McCabe via atm)
+
+    HDFS-3738. TestDFSClientRetries#testFailuresArePerOperation sets incorrect
+    timeout config. (atm)
+
+    HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second 
+    one not properly configured. (tucu)
+
+    HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm)
+
+    HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli)
+
+    HDFS-3760. primitiveCreate is a write, not a read. (Andy Isaacson via atm)
+
+    HDFS-3710. libhdfs misuses O_RDONLY/WRONLY/RDWR. (Andy Isaacson via atm)
+
+    HDFS-3721. hsync support broke wire compatibility. (todd and atm)
+
+    HDFS-3758. TestFuseDFS test failing. (Colin Patrick McCabe via eli)
+
+    HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
+    IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
+    Rao G via szetszwo)
+
+    HDFS-3790. test_fuse_dfs.c doesn't compile on centos 5. (Colin Patrick
+    McCabe via atm)
+
+    HDFS-3658. Fix bugs in TestDFSClientRetries and add more tests.  (szetszwo)
+
+    HDFS-3794. WebHDFS OPEN returns the incorrect Content-Length in the HTTP
+    header when offset is specified and length is omitted.
+    (Ravi Prakash via szetszwo)
+
+    HDFS-3048. Small race in BlockManager#close. (Andy Isaacson via eli)
+
+    HDFS-3194. DataNode block scanner is running too frequently.
+    (Andy Isaacson via eli)
+
+    HDFS-3808. fuse_dfs: postpone libhdfs intialization until after fork.
+    (Colin Patrick McCabe via atm)
+
+    HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header
+    when chunked transfer-encoding is used.  (szetszwo)
+
+    HDFS-3816. Invalidate work percentage default value should be 0.32f
+    instead of 32. (Jing Zhao via suresh)
+
+    HDFS-3707. TestFSInputChecker: improper use of skip.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3830. test_libhdfs_threaded: use forceNewInstance.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3835. Long-lived 2NN cannot perform a checkpoint if security is
+    enabled and the NN restarts with outstanding delegation tokens. (atm)
+
+    HDFS-3715. Fix TestFileCreation#testFileCreationNamenodeRestart.
+    (Andrew Whang via eli)
+
+    HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3683. Edit log replay progress indicator shows >100% complete. (Plamen
+    Jeliazkov via atm)
+
+    HDFS-3856. TestHDFSServerPorts failure is causing surefire fork failure.
+    (eli)
+
+    HDFS-3860. HeartbeatManager#Monitor may wrongly hold the writelock of
+    namesystem. (Jing Zhao via atm)
+
+    HDFS-3849. When re-loading the FSImage, we should clear the existing
+    genStamp and leases. (Colin Patrick McCabe via atm)
+
+    HDFS-3864. NN does not update internal file mtime for OP_CLOSE when reading
+    from the edit log. (atm)
+
+    HDFS-3837. Fix DataNode.recoverBlock findbugs warning. (eli)
+
+    HDFS-3733. Audit logs should include WebHDFS access. (Andy Isaacson via 
+    eli)
+
+    HDFS-3466. Get HTTP kerberos principal from the web authentication keytab.
+    (omalley)
+
+    HDFS-3469. start-dfs.sh will start zkfc, but stop-dfs.sh will not stop zkfc similarly.
+    (Vinay via umamahesh)
+
+    HDFS-1490. TransferFSImage should timeout (Dmytro Molkov and Vinay via todd)
+
+    HDFS-3828. Block Scanner rescans blocks too frequently.
+    (Andy Isaacson via eli)
+
+    HDFS-3809. Make BKJM use protobufs for all serialization with ZK.(Ivan Kelly via umamahesh)
+
+    HDFS-3895. hadoop-client must include commons-cli (tucu)
+
+    HDFS-2757. Cannot read a local block that's being written to when
+    using the local read short circuit. (Jean-Daniel Cryans via eli)
+
+    HDFS-3664. BlockManager race when stopping active services.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3928. MiniDFSCluster should reset the first ExitException on shutdown. (eli)
+   
+    HDFS-3938. remove current limitations from HttpFS docs. (tucu)
+
+    HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
+
+    HDFS-3972. Trash emptier fails in secure HA cluster. (todd via eli)
+ 
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -470,6 +1031,12 @@ Branch-2 ( Unreleased changes )
     
     HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
 
+    HDFS-3833. TestDFSShell fails on windows due to concurrent file 
+    read/write. (Brandon Li via suresh)
+
+    HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken.
+    (Andy Isaacson via eli)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
@@ -553,6 +1120,9 @@ Release 2.0.0-alpha - 05-23-2012
 
     HDFS-3298. Add HdfsDataOutputStream as a public API.  (szetszwo)
 
+    HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly 
+    via jitendra)
+
   IMPROVEMENTS
 
     HDFS-2018. Move all journal stream management code into one place.
@@ -748,7 +1318,7 @@ Release 2.0.0-alpha - 05-23-2012
     (Brandon Li via szetszwo)
 
     HDFS-2617. Replaced Kerberized SSL for image transfer and fsck
-    with SPNEGO-based solution. (jghoman, tucu, and atm via eli)
+    with SPNEGO-based solution. (jghoman, omalley, tucu, and atm via eli)
 
     HDFS-3365. Enable users to disable socket caching in DFS client
     configuration (todd)
@@ -1280,7 +1850,43 @@ Release 2.0.0-alpha - 05-23-2012
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
-Release 0.23.3 - UNRELEASED
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor
+    Robinson via tgraves)
+
+    HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HDFS-3224. Bug in check for DN re-registration with different storage ID
+    (jlowe)
+
+Release 0.23.4 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-3831. Failure to renew tokens due to test-sources left in classpath
+    (jlowe via bobby)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 
@@ -1317,6 +1923,30 @@ Release 0.23.3 - UNRELEASED
     HDFS-3331. In namenode, check superuser privilege for setBalancerBandwidth
     and acquire the write lock for finalizeUpgrade.  (szetszwo)
 
+    HDFS-3577. In DatanodeWebHdfsMethods, use MessageBodyWriter instead of
+    StreamingOutput, otherwise, it will fail to transfer large files.
+    (szetszwo)
+
+    HDFS-3646. LeaseRenewer can hold reference to inactive DFSClient
+    instances forever. (Kihwal Lee via daryn)
+
+    HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations
+    to get around a Java library bug causing OutOfMemoryError.  (szetszwo)
+
+    HDFS-3553. Hftp proxy tokens are broken (daryn)
+
+    HDFS-3718. Datanode won't shutdown because of runaway DataBlockScanner
+    thread (Kihwal Lee via daryn)
+
+    HDFS-3861. Deadlock in DFSClient (Kihwal Lee via daryn)
+
+    HDFS-3873. Hftp assumes security is disabled if token fetch fails (daryn)
+
+    HDFS-3852. TestHftpDelegationToken is broken after HADOOP-8225 (daryn)
+
+    HDFS-3890. filecontext mkdirs doesn't apply umask as expected
+    (Tom Graves via daryn)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Fri Oct 19 02:25:55 2012
@@ -9,6 +9,9 @@
        <Package name="org.apache.hadoop.hdfs.server.namenode.ha.proto" />
      </Match>
      <Match>
+       <Package name="org.apache.hadoop.hdfs.qjournal.protocol" />
+     </Match>
+     <Match>
        <Bug pattern="EI_EXPOSE_REP" />
      </Match>
      <Match>
@@ -273,4 +276,18 @@
        <Method name="quit" />
        <Bug pattern="DM_EXIT" />
      </Match>
+
+     <!-- Don't complain about recoverBlock equality check -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.datanode.DataNode" />
+       <Method name="recoverBlock" />
+       <Bug pattern="EC_UNRELATED_TYPES" />
+     </Match>
+
+     <!-- More complex cleanup logic confuses findbugs -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.qjournal.server.Journal" />
+       <Method name="persistPaxosData" />
+       <Bug pattern="OS_OPEN_STREAM" />
+     </Match>
  </FindBugsFilter>

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/pom.xml Fri Oct 19 02:25:55 2012
@@ -34,22 +34,19 @@ http://maven.apache.org/xsd/maven-4.0.0.
     <hadoop.component>hdfs</hadoop.component>
     <kdc.resource.dir>../../hadoop-common-project/hadoop-common/src/test/resources/kdc</kdc.resource.dir>
     <is.hadoop.component>true</is.hadoop.component>
+    <require.fuse>false</require.fuse>
+    <require.libwebhdfs>false</require.libwebhdfs>
   </properties>
 
   <dependencies>
     <dependency>
-      <groupId>org.aspectj</groupId>
-      <artifactId>aspectjtools</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.aspectj</groupId>
-      <artifactId>aspectjrt</artifactId>
-      <scope>test</scope>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
+      <artifactId>hadoop-auth</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
@@ -64,6 +61,58 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <type>test-jar</type>
     </dependency>
     <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.2</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
       <scope>compile</scope>
@@ -74,6 +123,11 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>javax.servlet.jsp</groupId>
+      <artifactId>jsp-api</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <scope>compile</scope>
@@ -84,8 +138,8 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.avro</groupId>
-      <artifactId>avro</artifactId>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
@@ -99,27 +153,29 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.ant</groupId>
-      <artifactId>ant</artifactId>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <scope>provided</scope>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>provided</scope>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <type>test-jar</type>
-      <scope>test</scope>
+      <groupId>tomcat</groupId>
+      <artifactId>jasper-runtime</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>xmlenc</groupId>
+      <artifactId>xmlenc</artifactId>
+      <scope>compile</scope>
     </dependency>
   </dependencies>
 
@@ -133,6 +189,12 @@ http://maven.apache.org/xsd/maven-4.0.0.
             <startKdc>${startKdc}</startKdc>
             <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
           </systemPropertyVariables>
+          <properties>
+            <property>
+              <name>listener</name>
+              <value>org.apache.hadoop.test.TimedOutTestsListener</value>
+            </property>
+          </properties>
         </configuration>
       </plugin>
       <plugin>
@@ -178,6 +240,25 @@ http://maven.apache.org/xsd/maven-4.0.0.
             </configuration>
           </execution>
           <execution>
+            <id>journal</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+            <configuration>
+              <compile>false</compile>
+              <workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
+              <webFragmentFile>${project.build.directory}/journal-jsp-servlet-definitions.xml</webFragmentFile>
+              <packageName>org.apache.hadoop.hdfs.server.journalservice</packageName>
+              <sources>
+                <directory>${basedir}/src/main/webapps/journal</directory>
+                <includes>
+                  <include>*.jsp</include>
+                </includes>
+              </sources>
+            </configuration>
+          </execution>
+          <execution>
             <id>datanode</id>
             <phase>generate-sources</phase>
             <goals>
@@ -239,37 +320,14 @@ http://maven.apache.org/xsd/maven-4.0.0.
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-proto</id>
-            <phase>generate-sources</phase>
+            <id>create-protobuf-generated-sources-directory</id>
+            <phase>initialize</phase>
             <goals>
               <goal>run</goal>
             </goals>
             <configuration>
               <target>
-                <echo file="target/compile-proto.sh">
-                    PROTO_DIR=src/main/proto
-                    JAVA_DIR=target/generated-sources/java
-                    which cygpath 2&gt; /dev/null
-                    if [ $? = 1 ]; then
-                      IS_WIN=false
-                    else
-                      IS_WIN=true
-                      WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
-                      WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
-                    fi
-                    mkdir -p $JAVA_DIR 2&gt; /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
-                    do
-                        if [ "$IS_WIN" = "true" ]; then
-                          protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
-                        else
-                          protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE
-                        fi
-                    done
-                </echo>
-                <exec executable="sh" dir="${basedir}" failonerror="true">
-                  <arg line="target/compile-proto.sh"/>
-                </exec>
+                <mkdir dir="target/generated-sources/java" />
               </target>
             </configuration>
           </execution>
@@ -284,6 +342,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
                 <loadfile property="hdfs.servlet.definitions" srcFile="${project.build.directory}/hdfs-jsp-servlet-definitions.xml"/>
                 <loadfile property="secondary.servlet.definitions" srcFile="${project.build.directory}/secondary-jsp-servlet-definitions.xml"/>
                 <loadfile property="datanode.servlet.definitions" srcFile="${project.build.directory}/datanode-jsp-servlet-definitions.xml"/>
+                <loadfile property="journal.servlet.definitions" srcFile="${project.build.directory}/journal-jsp-servlet-definitions.xml"/>               
                 <echoproperties destfile="${project.build.directory}/webxml.properties">
                   <propertyset>
                     <propertyref regex=".*.servlet.definitions"/>
@@ -299,6 +358,9 @@ http://maven.apache.org/xsd/maven-4.0.0.
                 <copy file="${basedir}/src/main/webapps/proto-datanode-web.xml"
                       tofile="${project.build.directory}/webapps/datanode/WEB-INF/web.xml"
                       filtering="true"/>
+                <copy file="${basedir}/src/main/webapps/proto-journal-web.xml"
+                      tofile="${project.build.directory}/webapps/journal/WEB-INF/web.xml"
+                      filtering="true"/>
                 <copy toDir="${project.build.directory}/webapps">
                   <fileset dir="${basedir}/src/main/webapps">
                     <exclude name="**/*.jsp"/>
@@ -343,6 +405,81 @@ http://maven.apache.org/xsd/maven-4.0.0.
         </executions>
       </plugin>
       <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>compile-proto</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/GetUserMappingsProtocol.proto</argument>
+                <argument>src/main/proto/HAZKInfo.proto</argument>
+                <argument>src/main/proto/InterDatanodeProtocol.proto</argument>
+                <argument>src/main/proto/JournalProtocol.proto</argument>
+                <argument>src/main/proto/RefreshAuthorizationPolicyProtocol.proto</argument>
+                <argument>src/main/proto/RefreshUserMappingsProtocol.proto</argument>
+                <argument>src/main/proto/datatransfer.proto</argument>
+                <argument>src/main/proto/hdfs.proto</argument>
+              </arguments>
+            </configuration>
+          </execution>
+          <execution>
+            <id>compile-proto-datanode</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/ClientDatanodeProtocol.proto</argument>
+                <argument>src/main/proto/DatanodeProtocol.proto</argument>
+              </arguments>
+            </configuration>
+          </execution>
+          <execution>
+            <id>compile-proto-namenode</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/ClientNamenodeProtocol.proto</argument>
+                <argument>src/main/proto/NamenodeProtocol.proto</argument>
+              </arguments>
+            </configuration>
+          </execution>
+          <execution>
+            <id>compile-proto-qjournal</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/QJournalProtocol.proto</argument>
+              </arguments>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-javadoc-plugin</artifactId>
         <configuration>
@@ -402,38 +539,60 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <build>
         <plugins>
           <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-antrun-plugin</artifactId>
+            <version>1.7</version>
             <executions>
               <execution>
-                <id>make</id>
-                <phase>compile</phase>
+                <id>define-classpath</id>
+                <phase>process-resources</phase>
                 <goals><goal>run</goal></goals>
                 <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}/native"/>
-                    <exec executable="cmake" dir="${project.build.directory}/native" 
-                        failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
-                    </exec>
-                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
-                      <arg line="VERBOSE=1"/>
-                    </exec>
-                  </target>
+                  <exportAntProperties>true</exportAntProperties>
+                  <tests>
+                    <property name="test.classpath" refid="maven.test.classpath"/>
+                  </tests>
                 </configuration>
               </execution>
-              <!-- TODO wire here native testcases
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.hadoop.cmake.maven.ng</groupId>
+            <artifactId>cmake-ng</artifactId>
+            <executions>
               <execution>
-                <id>test</id>
-                <phase>test</phase>
-                <goals>
-                  <goal>test</goal>
-                </goals>
+                <id>cmake-compile</id>
+                <goals><goal>compile</goal></goals>
+                <configuration>
+                  <target>all</target>
+                  <source>${basedir}/src</source>
+                  <vars>
+                    <GENERATED_JAVAH>${project.build.directory}/native/javah</GENERATED_JAVAH>
+                    <JVM_ARCH_DATA_MODEL>${sun.arch.data.model}</JVM_ARCH_DATA_MODEL>
+                    <REQUIRE_FUSE>${require.fuse}</REQUIRE_FUSE>
+                    <REQUIRE_LIBWEBHDFS>${require.libwebhdfs}</REQUIRE_LIBWEBHDFS>
+                  </vars>
+                </configuration>
+              </execution>
+              <execution>
+                <id>test_libhdfs_threaded</id>
+                <goals><goal>test</goal></goals>
+                <configuration>
+                  <binary>${project.build.directory}/native/test_libhdfs_threaded</binary>
+                  <env><CLASSPATH>${test.classpath}</CLASSPATH></env>
+                  <timeout>300</timeout>
+                  <results>${project.build.directory}/results</results>
+                </configuration>
+              </execution>
+              <execution>
+                <id>test_native_mini_dfs</id>
+                <goals><goal>test</goal></goals>
                 <configuration>
-                  <destDir>${project.build.directory}/native/target</destDir>
+                  <binary>${project.build.directory}/native/test_native_mini_dfs</binary>
+                  <env><CLASSPATH>${test.classpath}</CLASSPATH></env>
+                  <timeout>300</timeout>
+                  <results>${project.build.directory}/results</results>
                 </configuration>
               </execution>
-              -->
             </executions>
           </plugin>
         </plugins>

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Fri Oct 19 02:25:55 2012
@@ -21,18 +21,7 @@ cmake_minimum_required(VERSION 2.6 FATAL
 # Default to release builds
 set(CMAKE_BUILD_TYPE, Release)
 
-# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
-# This variable is set by maven.
-if (JVM_ARCH_DATA_MODEL EQUAL 32)
-    # force 32-bit code generation on amd64/x86_64, ppc64, sparc64
-    if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64")
-        set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
-        set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
-    endif ()
-    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
-        set(CMAKE_SYSTEM_PROCESSOR "i686")
-    endif ()
-endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLICY_SCOPE)
 
 # Compile a library with both shared and static variants
 function(add_dual_library LIBNAME)
@@ -67,6 +56,12 @@ function(FLATTEN_LIST INPUT SEPARATOR OU
   set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
 endfunction()
 
+# Check to see if our compiler and linker support the __thread attribute.
+# On Linux and some other operating systems, this is a more efficient 
+# alternative to POSIX thread local storage. 
+INCLUDE(CheckCSourceCompiles)
+CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
+
 find_package(JNI REQUIRED)
 if (NOT GENERATED_JAVAH)
     # Must identify where the generated headers have been placed
@@ -81,15 +76,17 @@ include_directories(
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
     ${JNI_INCLUDE_DIRS}
-    main/native/
+    main/native
+    main/native/libhdfs
 )
 
 set(_FUSE_DFS_VERSION 0.1.0)
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
 
 add_dual_library(hdfs
-    main/native/hdfs.c
-    main/native/hdfsJniHelper.c
+    main/native/libhdfs/exception.c
+    main/native/libhdfs/jni_helper.c
+    main/native/libhdfs/hdfs.c
 )
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}
@@ -99,31 +96,58 @@ set(LIBHDFS_VERSION "0.0.0")
 set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
 
-add_executable(hdfs_test
-    main/native/hdfs_test.c
+add_library(posix_util
+    main/native/util/posix_util.c
+)
+
+add_executable(test_libhdfs_ops
+    main/native/libhdfs/test/test_libhdfs_ops.c
 )
-target_link_libraries(hdfs_test
+target_link_libraries(test_libhdfs_ops
     hdfs
     ${JAVA_JVM_LIBRARY}
 )
-output_directory(hdfs_test target/usr/local/bin)
 
-add_executable(hdfs_read
-    main/native/hdfs_read.c
+add_executable(test_libhdfs_read
+    main/native/libhdfs/test/test_libhdfs_read.c
 )
-target_link_libraries(hdfs_read
+target_link_libraries(test_libhdfs_read
     hdfs
     ${JAVA_JVM_LIBRARY}
 )
-output_directory(hdfs_read target/usr/local/bin)
 
-add_executable(hdfs_write
-    main/native/hdfs_write.c
+add_executable(test_libhdfs_write
+    main/native/libhdfs/test/test_libhdfs_write.c
 )
-target_link_libraries(hdfs_write
+target_link_libraries(test_libhdfs_write
     hdfs
     ${JAVA_JVM_LIBRARY}
 )
-output_directory(hdfs_write target/usr/local/bin)
 
-add_subdirectory(contrib/fuse-dfs/src)
+add_library(native_mini_dfs
+    main/native/libhdfs/native_mini_dfs.c
+)
+target_link_libraries(native_mini_dfs
+    hdfs
+)
+
+add_executable(test_native_mini_dfs
+    main/native/libhdfs/test_native_mini_dfs.c
+)
+target_link_libraries(test_native_mini_dfs
+    native_mini_dfs
+)
+
+add_executable(test_libhdfs_threaded
+    main/native/libhdfs/test_libhdfs_threaded.c
+)
+target_link_libraries(test_libhdfs_threaded
+    hdfs
+    native_mini_dfs
+    pthread
+)
+
+IF(REQUIRE_LIBWEBHDFS)
+    add_subdirectory(contrib/libwebhdfs)
+ENDIF(REQUIRE_LIBWEBHDFS)
+add_subdirectory(main/native/fuse-dfs)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake Fri Oct 19 02:25:55 2012
@@ -3,4 +3,6 @@
 
 #cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
 
+#cmakedefine HAVE_BETTER_TLS
+
 #endif

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Fri Oct 19 02:25:55 2012
@@ -37,9 +37,9 @@ http://maven.apache.org/xsd/maven-4.0.0.
 
   <dependencies>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <scope>provided</scope>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <scope>compile</scope>
     </dependency>
     <dependency> 
       <groupId>org.apache.hadoop</groupId>
@@ -69,6 +69,16 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
@@ -79,6 +89,87 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>test</scope>
     </dependency>
   </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>${project.build.directory}/generated-sources/java</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <configuration>
+          <skipTests>false</skipTests>
+        </configuration>
+        <executions>
+          <execution>
+            <id>create-protobuf-generated-sources-directory</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <mkdir dir="target/generated-sources/java" />
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>compile-proto</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>-I../../main/proto</argument>
+                <argument>--java_out=target/generated-sources/java</argument>
+                <argument>src/main/proto/bkjournal.proto</argument>
+              </arguments>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
   <profiles>
     <profile>
       <id>dist</id>

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java Fri Oct 19 02:25:55 2012
@@ -70,7 +70,7 @@ class BookKeeperEditLogInputStream exten
     this.lh = lh;
     this.firstTxId = metadata.getFirstTxId();
     this.lastTxId = metadata.getLastTxId();
-    this.logVersion = metadata.getVersion();
+    this.logVersion = metadata.getDataLayoutVersion();
     this.inProgress = metadata.isInProgress();
 
     if (firstBookKeeperEntry < 0

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java Fri Oct 19 02:25:55 2012
@@ -84,7 +84,7 @@ class BookKeeperEditLogOutputStream
   @Override
   public void close() throws IOException {
     setReadyToFlush();
-    flushAndSync();
+    flushAndSync(true);
     try {
       lh.close();
     } catch (InterruptedException ie) {
@@ -130,7 +130,7 @@ class BookKeeperEditLogOutputStream
   }
 
   @Override
-  public void flushAndSync() throws IOException {
+  public void flushAndSync(boolean durable) throws IOException {
     assert(syncLatch != null);
     try {
       syncLatch.await();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Fri Oct 19 02:25:55 2012
@@ -50,6 +50,11 @@ import java.io.IOException;
 
 import java.net.URI;
 
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.VersionProto;
+import com.google.protobuf.TextFormat;
+import static com.google.common.base.Charsets.UTF_8;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import com.google.common.annotations.VisibleForTesting;
@@ -143,36 +148,17 @@ public class BookKeeperJournalManager im
   private final int quorumSize;
   private final String digestpw;
   private final CountDownLatch zkConnectLatch;
-
+  private final NamespaceInfo nsInfo;
   private LedgerHandle currentLedger = null;
 
-  private int bytesToInt(byte[] b) {
-    assert b.length >= 4;
-    return b[0] << 24 | b[1] << 16 | b[2] << 8 | b[3];
-  }
-
-  private byte[] intToBytes(int i) {
-    return new byte[] {
-      (byte)(i >> 24),
-      (byte)(i >> 16),
-      (byte)(i >> 8),
-      (byte)(i) };
-  }
-
-  BookKeeperJournalManager(Configuration conf, URI uri) throws IOException {
-    this(conf, uri, null);
-    // TODO(ivank): update BookKeeperJournalManager to do something
-    // with the NamespaceInfo. This constructor has been added
-    // for compatibility with the old tests, and may be removed
-    // when the tests are updated.
-  }
-
   /**
    * Construct a Bookkeeper journal manager.
    */
   public BookKeeperJournalManager(Configuration conf, URI uri,
       NamespaceInfo nsInfo) throws IOException {
     this.conf = conf;
+    this.nsInfo = nsInfo;
+
     String zkConnect = uri.getAuthority().replace(";", ",");
     String zkPath = uri.getPath();
     ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
@@ -202,10 +188,32 @@ public class BookKeeperJournalManager im
       Stat versionStat = zkc.exists(versionPath, false);
       if (versionStat != null) {
         byte[] d = zkc.getData(versionPath, false, versionStat);
+        VersionProto.Builder builder = VersionProto.newBuilder();
+        TextFormat.merge(new String(d, UTF_8), builder);
+        if (!builder.isInitialized()) {
+          throw new IOException("Invalid/Incomplete data in znode");
+        }
+        VersionProto vp = builder.build();
+
         // There's only one version at the moment
-        assert bytesToInt(d) == BKJM_LAYOUT_VERSION;
-      } else {
-        zkc.create(versionPath, intToBytes(BKJM_LAYOUT_VERSION),
+        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
+
+        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
+
+        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
+            !nsInfo.clusterID.equals(readns.getClusterID()) ||
+            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
+          String err = String.format("Environment mismatch. Running process %s"
+                                     +", stored in ZK %s", nsInfo, readns);
+          LOG.error(err);
+          throw new IOException(err);
+        }
+      } else if (nsInfo.getNamespaceID() > 0) {
+        VersionProto.Builder builder = VersionProto.newBuilder();
+        builder.setNamespaceInfo(PBHelper.convert(nsInfo))
+          .setLayoutVersion(BKJM_LAYOUT_VERSION);
+        byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
+        zkc.create(versionPath, data,
                    Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
       }
 
@@ -214,11 +222,11 @@ public class BookKeeperJournalManager im
             Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
       }
       prepareBookKeeperEnv();
-      bkc = new BookKeeper(new ClientConfiguration(),
-                           zkc);
+      bkc = new BookKeeper(new ClientConfiguration(), zkc);
     } catch (KeeperException e) {
       throw new IOException("Error initializing zk", e);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Interrupted while initializing bk journal manager",
                             ie);
     }
@@ -271,6 +279,23 @@ public class BookKeeperJournalManager im
     }
   }
 
+  @Override
+  public void format(NamespaceInfo ns) throws IOException {
+    // Currently, BKJM automatically formats itself when first accessed.
+    // TODO: change over to explicit formatting so that the admin can
+    // clear out the BK storage when reformatting a cluster.
+    LOG.info("Not formatting " + this + " - BKJM does not currently " +
+        "support reformatting. If it has not been used before, it will" +
+        "be formatted automatically upon first use.");
+  }
+  
+  @Override
+  public boolean hasSomeData() throws IOException {
+    // Don't confirm format on BKJM, since format() is currently a
+    // no-op anyway
+    return false;
+  }
+
   /**
    * Start a new log segment in a BookKeeper ledger.
    * First ensure that we have the write lock for this journal.
@@ -305,13 +330,14 @@ public class BookKeeperJournalManager im
     } catch (KeeperException ke) {
       throw new IOException("Error in zookeeper while creating ledger", ke);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Interrupted creating ledger", ie);
     }
 
     try {
       String znodePath = inprogressZNode(txId);
       EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
-          HdfsConstants.LAYOUT_VERSION,  currentLedger.getId(), txId);
+          HdfsConstants.LAYOUT_VERSION, currentLedger.getId(), txId);
       /* Write the ledger metadata out to the inprogress ledger znode
        * This can fail if for some reason our write lock has
        * expired (@see WriteLock) and another process has managed to
@@ -339,6 +365,7 @@ public class BookKeeperJournalManager im
       //log & ignore, an IOException will be thrown soon
       LOG.error("Error closing ledger", bke);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       LOG.warn("Interrupted while closing ledger", ie);
     }
   }
@@ -408,6 +435,7 @@ public class BookKeeperJournalManager im
     } catch (KeeperException e) {
       throw new IOException("Error finalising ledger", e);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Error finalising ledger", ie);
     } 
   }
@@ -437,6 +465,7 @@ public class BookKeeperJournalManager im
         } catch (BKException e) {
           throw new IOException("Could not open ledger for " + fromTxId, e);
         } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
           throw new IOException("Interrupted opening ledger for "
                                          + fromTxId, ie);
         }
@@ -550,6 +579,7 @@ public class BookKeeperJournalManager im
       } catch (KeeperException ke) {
         throw new IOException("Couldn't get list of inprogress segments", ke);
       } catch (InterruptedException ie) {
+        Thread.currentThread().interrupt();
         throw new IOException("Interrupted getting list of inprogress segments",
                               ie);
       }
@@ -566,6 +596,7 @@ public class BookKeeperJournalManager im
           zkc.delete(l.getZkPath(), stat.getVersion());
           bkc.deleteLedger(l.getLedgerId());
         } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
           LOG.error("Interrupted while purging " + l, ie);
         } catch (BKException bke) {
           LOG.error("Couldn't delete ledger from bookkeeper", bke);
@@ -584,6 +615,7 @@ public class BookKeeperJournalManager im
     } catch (BKException bke) {
       throw new IOException("Couldn't close bookkeeper client", bke);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Interrupted while closing journal manager", ie);
     }
   }
@@ -618,6 +650,7 @@ public class BookKeeperJournalManager im
     } catch (BKException bke) {
       throw new IOException("Exception opening ledger for " + l, bke);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Interrupted opening ledger for " + l, ie);
     }
 
@@ -675,6 +708,7 @@ public class BookKeeperJournalManager im
     } catch (KeeperException e) {
       throw new IOException("Exception reading ledger list from zk", e);
     } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
       throw new IOException("Interrupted getting list of ledgers from zk", ie);
     }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java Fri Oct 19 02:25:55 2012
@@ -29,6 +29,10 @@ import org.apache.zookeeper.KeeperExcept
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.CurrentInprogressProto;
+import com.google.protobuf.TextFormat;
+import static com.google.common.base.Charsets.UTF_8;
+
 /**
  * Distributed write permission lock, using ZooKeeper. Read the version number
  * and return the current inprogress node path available in CurrentInprogress
@@ -42,29 +46,28 @@ import org.apache.zookeeper.data.Stat;
  */
 
 class CurrentInprogress {
-  private static final String CONTENT_DELIMITER = ",";
-
   static final Log LOG = LogFactory.getLog(CurrentInprogress.class);
 
   private final ZooKeeper zkc;
   private final String currentInprogressNode;
   private volatile int versionNumberForPermission = -1;
-  private static final int CURRENT_INPROGRESS_LAYOUT_VERSION = -1; 
   private final String hostName = InetAddress.getLocalHost().toString();
 
   CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
     this.currentInprogressNode = lockpath;
     this.zkc = zkc;
     try {
-      Stat isCurrentInprogressNodeExists = zkc.exists(lockpath, false);
+      Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
+                                                      false);
       if (isCurrentInprogressNodeExists == null) {
         try {
-          zkc.create(lockpath, null, Ids.OPEN_ACL_UNSAFE,
-                  CreateMode.PERSISTENT);
+          zkc.create(currentInprogressNode, null, Ids.OPEN_ACL_UNSAFE,
+                     CreateMode.PERSISTENT);
         } catch (NodeExistsException e) {
           // Node might created by other process at the same time. Ignore it.
           if (LOG.isDebugEnabled()) {
-            LOG.debug(lockpath + " already created by other process.", e);
+            LOG.debug(currentInprogressNode + " already created by other process.",
+                      e);
           }
         }
       }
@@ -83,10 +86,13 @@ class CurrentInprogress {
    * @throws IOException
    */
   void update(String path) throws IOException {
-    String content = CURRENT_INPROGRESS_LAYOUT_VERSION
-        + CONTENT_DELIMITER + hostName + CONTENT_DELIMITER + path;
+    CurrentInprogressProto.Builder builder = CurrentInprogressProto.newBuilder();
+    builder.setPath(path).setHostname(hostName);
+
+    String content = TextFormat.printToString(builder.build());
+
     try {
-      zkc.setData(this.currentInprogressNode, content.getBytes(),
+      zkc.setData(this.currentInprogressNode, content.getBytes(UTF_8),
           this.versionNumberForPermission);
     } catch (KeeperException e) {
       throw new IOException("Exception when setting the data "
@@ -123,23 +129,12 @@ class CurrentInprogress {
     }
     this.versionNumberForPermission = stat.getVersion();
     if (data != null) {
-      String stringData = new String(data);
-      LOG.info("Read data[layout version number,hostname,inprogressNode path]"
-          + "= [" + stringData + "] from CurrentInprogress");
-      String[] contents = stringData.split(CONTENT_DELIMITER);
-      assert contents.length == 3 : "As per the current data format, "
-          + "CurrentInprogress node data should contain 3 fields. "
-          + "i.e layout version number,hostname,inprogressNode path";
-      String layoutVersion = contents[0];
-      if (Long.valueOf(layoutVersion) > CURRENT_INPROGRESS_LAYOUT_VERSION) {
-        throw new IOException(
-            "Supported layout version of CurrentInprogress node is : "
-                + CURRENT_INPROGRESS_LAYOUT_VERSION
-                + " . Layout version of CurrentInprogress node in ZK is : "
-                + layoutVersion);
+      CurrentInprogressProto.Builder builder = CurrentInprogressProto.newBuilder();
+      TextFormat.merge(new String(data, UTF_8), builder);
+      if (!builder.isInitialized()) {
+        throw new IOException("Invalid/Incomplete data in znode");
       }
-      String inprogressNodePath = contents[2];
-      return inprogressNodePath;
+      return builder.build().getPath();
     } else {
       LOG.info("No data available in CurrentInprogress");
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java Fri Oct 19 02:25:55 2012
@@ -29,6 +29,10 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.EditLogLedgerProto;
+import com.google.protobuf.TextFormat;
+import static com.google.common.base.Charsets.UTF_8;
+
 /**
  * Utility class for storing the metadata associated 
  * with a single edit log segment, stored in a single ledger
@@ -37,8 +41,8 @@ public class EditLogLedgerMetadata {
   static final Log LOG = LogFactory.getLog(EditLogLedgerMetadata.class);
 
   private String zkPath;
+  private final int dataLayoutVersion;
   private final long ledgerId;
-  private final int version;
   private final long firstTxId;
   private long lastTxId;
   private boolean inprogress;
@@ -57,21 +61,22 @@ public class EditLogLedgerMetadata {
     }
   };
 
-  EditLogLedgerMetadata(String zkPath, int version, 
+  EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
                         long ledgerId, long firstTxId) {
     this.zkPath = zkPath;
+    this.dataLayoutVersion = dataLayoutVersion;
     this.ledgerId = ledgerId;
-    this.version = version;
     this.firstTxId = firstTxId;
     this.lastTxId = HdfsConstants.INVALID_TXID;
     this.inprogress = true;
   }
   
-  EditLogLedgerMetadata(String zkPath, int version, long ledgerId, 
-                        long firstTxId, long lastTxId) {
+  EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
+                        long ledgerId, long firstTxId,
+                        long lastTxId) {
     this.zkPath = zkPath;
+    this.dataLayoutVersion = dataLayoutVersion;
     this.ledgerId = ledgerId;
-    this.version = version;
     this.firstTxId = firstTxId;
     this.lastTxId = lastTxId;
     this.inprogress = false;
@@ -93,14 +98,14 @@ public class EditLogLedgerMetadata {
     return ledgerId;
   }
   
-  int getVersion() {
-    return version;
-  }
-
   boolean isInProgress() {
     return this.inprogress;
   }
 
+  int getDataLayoutVersion() {
+    return this.dataLayoutVersion;
+  }
+
   void finalizeLedger(long newLastTxId) {
     assert this.lastTxId == HdfsConstants.INVALID_TXID;
     this.lastTxId = newLastTxId;
@@ -111,22 +116,27 @@ public class EditLogLedgerMetadata {
       throws IOException, KeeperException.NoNodeException  {
     try {
       byte[] data = zkc.getData(path, false, null);
-      String[] parts = new String(data).split(";");
-      if (parts.length == 3) {
-        int version = Integer.valueOf(parts[0]);
-        long ledgerId = Long.valueOf(parts[1]);
-        long txId = Long.valueOf(parts[2]);
-        return new EditLogLedgerMetadata(path, version, ledgerId, txId);
-      } else if (parts.length == 4) {
-        int version = Integer.valueOf(parts[0]);
-        long ledgerId = Long.valueOf(parts[1]);
-        long firstTxId = Long.valueOf(parts[2]);
-        long lastTxId = Long.valueOf(parts[3]);
-        return new EditLogLedgerMetadata(path, version, ledgerId,
-                                         firstTxId, lastTxId);
+
+      EditLogLedgerProto.Builder builder = EditLogLedgerProto.newBuilder();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Reading " + path + " data: " + new String(data, UTF_8));
+      }
+      TextFormat.merge(new String(data, UTF_8), builder);
+      if (!builder.isInitialized()) {
+        throw new IOException("Invalid/Incomplete data in znode");
+      }
+      EditLogLedgerProto ledger = builder.build();
+
+      int dataLayoutVersion = ledger.getDataLayoutVersion();
+      long ledgerId = ledger.getLedgerId();
+      long firstTxId = ledger.getFirstTxId();
+      if (ledger.hasLastTxId()) {
+        long lastTxId = ledger.getLastTxId();
+        return new EditLogLedgerMetadata(path, dataLayoutVersion,
+                                         ledgerId, firstTxId, lastTxId);
       } else {
-        throw new IOException("Invalid ledger entry, "
-                              + new String(data));
+        return new EditLogLedgerMetadata(path, dataLayoutVersion,
+                                         ledgerId, firstTxId);
       }
     } catch(KeeperException.NoNodeException nne) {
       throw nne;
@@ -140,17 +150,17 @@ public class EditLogLedgerMetadata {
   void write(ZooKeeper zkc, String path)
       throws IOException, KeeperException.NodeExistsException {
     this.zkPath = path;
-    String finalisedData;
-    if (inprogress) {
-      finalisedData = String.format("%d;%d;%d",
-          version, ledgerId, firstTxId);
-    } else {
-      finalisedData = String.format("%d;%d;%d;%d",
-          version, ledgerId, firstTxId, lastTxId);
+
+    EditLogLedgerProto.Builder builder = EditLogLedgerProto.newBuilder();
+    builder.setDataLayoutVersion(dataLayoutVersion)
+      .setLedgerId(ledgerId).setFirstTxId(firstTxId);
+
+    if (!inprogress) {
+      builder.setLastTxId(lastTxId);
     }
     try {
-      zkc.create(path, finalisedData.getBytes(), Ids.OPEN_ACL_UNSAFE,
-          CreateMode.PERSISTENT);
+      zkc.create(path, TextFormat.printToString(builder.build()).getBytes(UTF_8),
+                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
     } catch (KeeperException.NodeExistsException nee) {
       throw nee;
     } catch (KeeperException e) {
@@ -183,9 +193,9 @@ public class EditLogLedgerMetadata {
     }
     EditLogLedgerMetadata ol = (EditLogLedgerMetadata)o;
     return ledgerId == ol.ledgerId
+      && dataLayoutVersion == ol.dataLayoutVersion
       && firstTxId == ol.firstTxId
-      && lastTxId == ol.lastTxId
-      && version == ol.version;
+      && lastTxId == ol.lastTxId;
   }
 
   public int hashCode() {
@@ -193,15 +203,15 @@ public class EditLogLedgerMetadata {
     hash = hash * 31 + (int) ledgerId;
     hash = hash * 31 + (int) firstTxId;
     hash = hash * 31 + (int) lastTxId;
-    hash = hash * 31 + (int) version;
+    hash = hash * 31 + (int) dataLayoutVersion;
     return hash;
   }
     
   public String toString() {
     return "[LedgerId:"+ledgerId +
       ", firstTxId:" + firstTxId +
-      ", lastTxId:" + lastTxId + 
-      ", version:" + version + "]";
+      ", lastTxId:" + lastTxId +
+      ", dataLayoutVersion:" + dataLayoutVersion + "]";
   }
 
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java Fri Oct 19 02:25:55 2012
@@ -27,6 +27,10 @@ import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.MaxTxIdProto;
+import com.google.protobuf.TextFormat;
+import static com.google.common.base.Charsets.UTF_8;
+
 /**
  * Utility class for storing and reading
  * the max seen txid in zookeeper
@@ -55,14 +59,16 @@ class MaxTxId {
   }
 
   synchronized void reset(long maxTxId) throws IOException {
-    String txidStr = Long.toString(maxTxId);
     try {
+      MaxTxIdProto.Builder builder = MaxTxIdProto.newBuilder().setTxId(maxTxId);
+
+      byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
       if (currentStat != null) {
-        currentStat = zkc.setData(path, txidStr.getBytes("UTF-8"), currentStat
+        currentStat = zkc.setData(path, data, currentStat
             .getVersion());
       } else {
-        zkc.create(path, txidStr.getBytes("UTF-8"), Ids.OPEN_ACL_UNSAFE,
-            CreateMode.PERSISTENT);
+        zkc.create(path, data, Ids.OPEN_ACL_UNSAFE,
+                   CreateMode.PERSISTENT);
       }
     } catch (KeeperException e) {
       throw new IOException("Error writing max tx id", e);
@@ -77,9 +83,16 @@ class MaxTxId {
       if (currentStat == null) {
         return 0;
       } else {
+
         byte[] bytes = zkc.getData(path, false, currentStat);
-        String txidString = new String(bytes, "UTF-8");
-        return Long.valueOf(txidString);
+
+        MaxTxIdProto.Builder builder = MaxTxIdProto.newBuilder();
+        TextFormat.merge(new String(bytes, UTF_8), builder);
+        if (!builder.isInitialized()) {
+          throw new IOException("Invalid/Incomplete data in znode");
+        }
+
+        return builder.build().getTxId();
       }
     } catch (KeeperException e) {
       throw new IOException("Error reading the max tx id from zk", e);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.contrib.bkjournal;
 
 import static org.junit.Assert.*;
+
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
@@ -25,6 +26,9 @@ import org.junit.BeforeClass;
 import org.junit.AfterClass;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 import org.apache.hadoop.hdfs.HAUtil;
@@ -35,12 +39,16 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 
 import org.apache.hadoop.ipc.RemoteException;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 
 import org.apache.bookkeeper.proto.BookieServer;
@@ -48,7 +56,9 @@ import org.apache.bookkeeper.proto.Booki
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import java.io.File;
 import java.io.IOException;
+import java.net.URISyntaxException;
 
 /**
  * Integration test to ensure that the BookKeeper JournalManager
@@ -67,6 +77,11 @@ public class TestBookKeeperAsHASharedDir
     bkutil = new BKJMUtil(numBookies);
     bkutil.start();
   }
+  
+  @Before
+  public void clearExitStatus() {
+    ExitUtil.resetFirstExitException();
+  }
 
   @AfterClass
   public static void teardownBookkeeper() throws Exception {
@@ -244,4 +259,97 @@ public class TestBookKeeperAsHASharedDir
       }
     }
   }
+  
+  /**
+   * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
+   * the edits log segments to new bkjm shared edits.
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testInitializeBKSharedEdits() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      HAUtil.setAllowStandbyReads(conf, true);
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+
+      MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
+          .numDataNodes(0).build();
+      cluster.waitActive();
+      // Shutdown and clear the current filebased shared dir.
+      cluster.shutdownNameNodes();
+      File shareddir = new File(cluster.getSharedEditsDir(0, 1));
+      assertTrue("Initial Shared edits dir not fully deleted",
+          FileUtil.fullyDelete(shareddir));
+
+      // Check namenodes should not start without shared dir.
+      assertCanNotStartNamenode(cluster, 0);
+      assertCanNotStartNamenode(cluster, 1);
+
+      // Configure bkjm as new shared edits dir in both namenodes
+      Configuration nn1Conf = cluster.getConfiguration(0);
+      Configuration nn2Conf = cluster.getConfiguration(1);
+      nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+          .createJournalURI("/initializeSharedEdits").toString());
+      nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+          .createJournalURI("/initializeSharedEdits").toString());
+      BKJMUtil.addJournalManagerDefinition(nn1Conf);
+      BKJMUtil.addJournalManagerDefinition(nn2Conf);
+
+      // Initialize the BKJM shared edits.
+      assertFalse(NameNode.initializeSharedEdits(nn1Conf));
+
+      // NameNode should be able to start and should be in sync with BKJM as
+      // shared dir
+      assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private void assertCanNotStartNamenode(MiniDFSCluster cluster, int nnIndex) {
+    try {
+      cluster.restartNameNode(nnIndex, false);
+      fail("Should not have been able to start NN" + (nnIndex)
+          + " without shared dir");
+    } catch (IOException ioe) {
+      LOG.info("Got expected exception", ioe);
+      GenericTestUtils.assertExceptionContains(
+          "Cannot start an HA namenode with name dirs that need recovery", ioe);
+    }
+  }
+
+  private void assertCanStartHANameNodes(MiniDFSCluster cluster,
+      Configuration conf, String path) throws ServiceFailedException,
+      IOException, URISyntaxException, InterruptedException {
+    // Now should be able to start both NNs. Pass "false" here so that we don't
+    // try to waitActive on all NNs, since the second NN doesn't exist yet.
+    cluster.restartNameNode(0, false);
+    cluster.restartNameNode(1, true);
+
+    // Make sure HA is working.
+    cluster
+        .getNameNode(0)
+        .getRpcServer()
+        .transitionToActive(
+            new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
+    FileSystem fs = null;
+    try {
+      Path newPath = new Path(path);
+      fs = HATestUtil.configureFailoverFs(cluster, conf);
+      assertTrue(fs.mkdirs(newPath));
+      HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+          cluster.getNameNode(1));
+      assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
+          newPath.toString(), false).isDir());
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+    }
+  }
 }



Mime
View raw message