zookeeper-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From an...@apache.org
Subject [03/22] zookeeper git commit: ZOOKEEPER-3031: 3.5: MAVEN MIGRATION - move client dir
Date Wed, 22 Aug 2018 06:18:42 GMT
http://git-wip-us.apache.org/repos/asf/zookeeper/blob/8c87cc49/zookeeper-client/zookeeper-client-c/tests/TestOperations.cc
----------------------------------------------------------------------
diff --git a/zookeeper-client/zookeeper-client-c/tests/TestOperations.cc b/zookeeper-client/zookeeper-client-c/tests/TestOperations.cc
new file mode 100644
index 0000000..b8a4b3f
--- /dev/null
+++ b/zookeeper-client/zookeeper-client-c/tests/TestOperations.cc
@@ -0,0 +1,710 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cppunit/extensions/HelperMacros.h>
+#include "CppAssertHelper.h"
+
+#include "ZKMocks.h"
+#include <proto.h>
+
+using namespace std;
+
+class Zookeeper_operations : public CPPUNIT_NS::TestFixture
+{
+    CPPUNIT_TEST_SUITE(Zookeeper_operations);
+#ifndef THREADED
+    CPPUNIT_TEST(testPing);
+    CPPUNIT_TEST(testUnsolicitedPing);
+    CPPUNIT_TEST(testTimeoutCausedByWatches1);
+    CPPUNIT_TEST(testTimeoutCausedByWatches2);
+#else    
+    CPPUNIT_TEST(testAsyncWatcher1);
+    CPPUNIT_TEST(testAsyncGetOperation);
+#endif
+    CPPUNIT_TEST(testOperationsAndDisconnectConcurrently1);
+    CPPUNIT_TEST(testOperationsAndDisconnectConcurrently2);
+    CPPUNIT_TEST(testConcurrentOperations1);
+    CPPUNIT_TEST_SUITE_END();
+    zhandle_t *zh;
+    FILE *logfile;
+
+    static void watcher(zhandle_t *, int, int, const char *,void*){}
+public: 
+    Zookeeper_operations() {
+      logfile = openlogfile("Zookeeper_operations");
+    }
+
+    ~Zookeeper_operations() {
+      if (logfile) {
+        fflush(logfile);
+        fclose(logfile);
+        logfile = 0;
+      }
+    }
+
+    void setUp()
+    {
+        zoo_set_log_stream(logfile);
+
+        zoo_deterministic_conn_order(0);
+        zh=0;
+    }
+    
+    void tearDown()
+    {
+        zookeeper_close(zh);
+    }
+
+    class AsyncGetOperationCompletion: public AsyncCompletion{
+    public:
+        AsyncGetOperationCompletion():called_(false),rc_(ZAPIERROR){}
+        virtual void dataCompl(int rc, const char *value, int len, const Stat *stat){
+            synchronized(mx_);
+            called_=true;
+            rc_=rc;
+            value_.erase();
+            if(rc!=ZOK) return;
+            value_.assign(value,len);
+            if(stat)
+                stat_=*stat;
+        }
+        bool operator()()const{
+            synchronized(mx_);
+            return called_;
+        }
+        mutable Mutex mx_;
+        bool called_;
+        int rc_;
+        string value_;
+        NodeStat stat_;
+    };
+#ifndef THREADED
+    // send two get data requests; verify that the corresponding completions called
+    void testConcurrentOperations1()
+    {
+        Mock_gettimeofday timeMock;
+        ZookeeperServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // first operation
+        AsyncGetOperationCompletion res1;
+        zkServer.addOperationResponse(new ZooGetResponse("1",1));
+        int rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // second operation
+        AsyncGetOperationCompletion res2;
+        zkServer.addOperationResponse(new ZooGetResponse("2",1));
+        rc=zoo_aget(zh,"/x/y/2",0,asyncCompletion,&res2);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // process the send queue
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        while((rc=zookeeper_process(zh,interest))==ZOK) {
+          millisleep(100);
+          //printf("%d\n", rc);
+        }
+        //printf("RC = %d", rc);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,res1.rc_);
+        CPPUNIT_ASSERT_EQUAL(string("1"),res1.value_);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,res2.rc_);
+        CPPUNIT_ASSERT_EQUAL(string("2"),res2.value_);
+    }
+    // send two getData requests and disconnect while the second request is
+    // outstanding;
+    // verify the completions are called
+    void testOperationsAndDisconnectConcurrently1()
+    {
+        Mock_gettimeofday timeMock;
+        ZookeeperServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // first operation
+        AsyncGetOperationCompletion res1;
+        zkServer.addOperationResponse(new ZooGetResponse("1",1));
+        int rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // second operation
+        AsyncGetOperationCompletion res2;
+        zkServer.addOperationResponse(new ZooGetResponse("2",1));
+        rc=zoo_aget(zh,"/x/y/2",0,asyncCompletion,&res2);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // process the send queue
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // simulate a disconnect
+        zkServer.setConnectionLost();
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZCONNECTIONLOSS,rc);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,res1.rc_);
+        CPPUNIT_ASSERT_EQUAL(string("1"),res1.value_);
+        CPPUNIT_ASSERT_EQUAL((int)ZCONNECTIONLOSS,res2.rc_);
+        CPPUNIT_ASSERT_EQUAL(string(""),res2.value_);
+    }
+    // send two getData requests and simulate timeout while the both request
+    // are pending;
+    // verify the completions are called
+    void testOperationsAndDisconnectConcurrently2()
+    {
+        Mock_gettimeofday timeMock;
+        ZookeeperServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // first operation
+        AsyncGetOperationCompletion res1;
+        zkServer.addOperationResponse(new ZooGetResponse("1",1));
+        int rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // second operation
+        AsyncGetOperationCompletion res2;
+        zkServer.addOperationResponse(new ZooGetResponse("2",1));
+        rc=zoo_aget(zh,"/x/y/2",0,asyncCompletion,&res2);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // simulate timeout
+        timeMock.tick(+10); // advance system time by 10 secs
+        // the next call to zookeeper_interest should return ZOPERATIONTIMEOUT
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOPERATIONTIMEOUT,rc);
+        // make sure the completions have been called
+        CPPUNIT_ASSERT_EQUAL((int)ZOPERATIONTIMEOUT,res1.rc_);
+        CPPUNIT_ASSERT_EQUAL((int)ZOPERATIONTIMEOUT,res2.rc_);
+    }
+
+    class PingCountingServer: public ZookeeperServer{
+    public:
+        PingCountingServer():pingCount_(0){}
+        // called when a client request is received
+        virtual void onMessageReceived(const RequestHeader& rh, iarchive* ia){
+           if(rh.type==ZOO_PING_OP){
+               pingCount_++;
+           }
+        }
+        int pingCount_;
+    };
+
+    // establish a connection; idle for a while
+    // verify ping was sent at least once
+    void testPing()
+    {
+        const int TIMEOUT=9; // timeout in secs
+        Mock_gettimeofday timeMock;
+        PingCountingServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        // receive timeout is in milliseconds
+        zh=zookeeper_init("localhost:1234",watcher,TIMEOUT*1000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // Round 1.
+        int rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // simulate waiting for the select() call to timeout; 
+        // advance the system clock accordingly
+        timeMock.tick(tv);  
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        // verify no ping sent
+        CPPUNIT_ASSERT(zkServer.pingCount_==0);
+        
+        // Round 2.
+        // the client should have the idle threshold exceeded, by now
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // assume the socket is writable, so no idling here; move on to 
+        // zookeeper_process immediately
+        rc=zookeeper_process(zh,interest);
+        // ZNOTHING means the client hasn't received a ping response yet
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        // verify a ping is sent
+        CPPUNIT_ASSERT_EQUAL(1,zkServer.pingCount_);
+        
+        // Round 3.
+        // we're going to receive a server PING response and make sure
+        // that the client has updated its last_recv timestamp 
+        zkServer.addRecvResponse(new PingResponse);
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // pseudo-sleep for a short while (10 ms)
+        timeMock.millitick(10);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // only one ping so far?
+        CPPUNIT_ASSERT_EQUAL(1,zkServer.pingCount_);
+        CPPUNIT_ASSERT(timeMock==zh->last_recv);
+
+        // Round 4
+        // make sure that a ping is not sent if something is outstanding
+        AsyncGetOperationCompletion res1;
+        rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        timeMock.tick(tv);  
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // pseudo-sleep for a short while (10 ms)
+        timeMock.millitick(10);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        // only one ping so far?
+        CPPUNIT_ASSERT_EQUAL(1,zkServer.pingCount_);
+    }
+
+    // ZOOKEEPER-2253: Permit unsolicited pings
+    void testUnsolicitedPing()
+    {
+        const int TIMEOUT=9; // timeout in secs
+        Mock_gettimeofday timeMock;
+        PingCountingServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+
+        // receive timeout is in milliseconds
+        zh=zookeeper_init("localhost:1234",watcher,TIMEOUT*1000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+
+        int fd=0;
+        int interest=0;
+        timeval tv;
+
+        int rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+
+        // verify no ping sent
+        CPPUNIT_ASSERT(zkServer.pingCount_==0);
+
+        // we're going to receive a unsolicited PING response; ensure
+        // that the client has updated its last_recv timestamp
+        timeMock.tick(tv);
+        zkServer.addRecvResponse(new PingResponse);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        CPPUNIT_ASSERT(timeMock==zh->last_recv);
+    }
+
+    // simulate a watch arriving right before a ping is due
+    // assert the ping is sent nevertheless
+    void testTimeoutCausedByWatches1()
+    {
+        const int TIMEOUT=9; // timeout in secs
+        Mock_gettimeofday timeMock;
+        PingCountingServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        // receive timeout is in milliseconds
+        zh=zookeeper_init("localhost:1234",watcher,TIMEOUT*1000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // Round 1.
+        int rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // simulate waiting for the select() call to timeout; 
+        // advance the system clock accordingly
+        timeMock.tick(tv);
+        timeMock.tick(-1); // set the clock to a millisecond before a ping is due
+        // trigger a watch now
+        zkServer.addRecvResponse(new ZNodeEvent(ZOO_CHANGED_EVENT,"/x/y/z"));
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // arrival of a watch sets the last_recv to the current time
+        CPPUNIT_ASSERT(timeMock==zh->last_recv);
+        // spend 1 millisecond by processing the watch
+        timeMock.tick(1);
+        
+        // Round 2.
+        // a ping is due; zookeeper_interest() must send it now
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // no delay here -- as if the socket is immediately writable
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        // verify a ping is sent
+        CPPUNIT_ASSERT_EQUAL(1,zkServer.pingCount_);        
+    }
+
+    // similar to testTimeoutCausedByWatches1, but this time the watch is 
+    // triggered while the client has an outstanding request
+    // assert the ping is sent on time
+    void testTimeoutCausedByWatches2()
+    {
+        const int TIMEOUT=9; // timeout in secs
+        Mock_gettimeofday now;
+        PingCountingServer zkServer;
+        // must call zookeeper_close() while all the mocks are in scope
+        CloseFinally guard(&zh);
+        
+        // receive timeout is in milliseconds
+        zh=zookeeper_init("localhost:1234",watcher,TIMEOUT*1000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // simulate connected state
+        forceConnected(zh);
+        
+        // queue up a request; keep it pending (as if the server is busy or has died)
+        AsyncGetOperationCompletion res1;
+        zkServer.addOperationResponse(new ZooGetResponse("2",1));
+        int rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+
+        int fd=0;
+        int interest=0;
+        timeval tv;
+        // Round 1.
+        // send the queued up zoo_aget() request
+        Mock_gettimeofday beginningOfTimes(now); // remember when we started
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // no delay -- the socket is writable
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc); 
+        
+        // Round 2.
+        // what's next?
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        // no response from the server yet -- waiting in the select() call
+        now.tick(tv);
+        // a watch has arrived, thus preventing the connection from timing out 
+        zkServer.addRecvResponse(new ZNodeEvent(ZOO_CHANGED_EVENT,"/x/y/z"));        
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc); // read the watch message
+        CPPUNIT_ASSERT_EQUAL(0,zkServer.pingCount_); // not yet!
+        
+        //Round 3.
+        // now is the time to send a ping; make sure it's actually sent
+        rc=zookeeper_interest(zh,&fd,&interest,&tv);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        rc=zookeeper_process(zh,interest);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTHING,rc);
+        // verify a ping is sent
+        CPPUNIT_ASSERT_EQUAL(1,zkServer.pingCount_);
+        // make sure only 1/3 of the timeout has passed
+        CPPUNIT_ASSERT_EQUAL((int32_t)TIMEOUT/3*1000,toMilliseconds(now-beginningOfTimes));
+    }
+
+#else   
+    class TestGetDataJob: public TestJob{
+    public:
+        TestGetDataJob(ZookeeperServer* svr,zhandle_t* zh, int reps=500)
+            :svr_(svr),zh_(zh),rc_(ZAPIERROR),reps_(reps){}
+        virtual void run(){
+            int i;
+            for(i=0;i<reps_;i++){
+                char buf;
+                int size=sizeof(buf);
+
+                if (i % 10 == 0) {
+                    // We need to pause every once in a while so we don't
+                    // get too far ahead and finish before the disconnect
+	            millisleep(1);
+                }
+                svr_->addOperationResponse(new ZooGetResponse("1",1));
+                rc_=zoo_get(zh_,"/x/y/z",0,&buf,&size,0);
+                if(rc_!=ZOK){
+                    break;
+                }
+            }
+        }
+        ZookeeperServer* svr_;
+        zhandle_t* zh_;
+        int rc_;
+        int reps_;
+    };
+    class TestConcurrentOpJob: public TestGetDataJob{
+    public:
+        static const int REPS=500;
+        TestConcurrentOpJob(ZookeeperServer* svr,zhandle_t* zh):
+            TestGetDataJob(svr,zh,REPS){}
+        virtual TestJob* clone() const {
+            return new TestConcurrentOpJob(svr_,zh_);
+        }
+        virtual void validate(const char* file, int line) const{
+            CPPUNIT_ASSERT_EQUAL_MESSAGE_LOC("ZOK != rc",(int)ZOK,rc_,file,line);
+        }
+    };
+    void testConcurrentOperations1()
+    {
+        for(int counter=0; counter<50; counter++){
+            // frozen time -- no timeouts and no pings
+            Mock_gettimeofday timeMock;
+            
+            ZookeeperServer zkServer;
+            Mock_poll pollMock(&zkServer,ZookeeperServer::FD);
+            // must call zookeeper_close() while all the mocks are in the scope!
+            CloseFinally guard(&zh);
+            
+            zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+            CPPUNIT_ASSERT(zh!=0);
+            // make sure the client has connected
+            CPPUNIT_ASSERT(ensureCondition(ClientConnected(zh),1000)<1000);
+            
+            TestJobManager jmgr(TestConcurrentOpJob(&zkServer,zh),10);
+            jmgr.startAllJobs();
+            jmgr.wait();
+            // validate test results
+            VALIDATE_JOBS(jmgr);
+        }
+    }
+    class ZKGetJob: public TestJob{
+    public:
+        static const int REPS=1000;
+        ZKGetJob(zhandle_t* zh)
+            :zh_(zh),rc_(ZAPIERROR){}
+        virtual TestJob* clone() const {
+            return new ZKGetJob(zh_);
+        }
+        virtual void run(){
+            int i;
+            for(i=0;i<REPS;i++){
+                char buf;
+                int size=sizeof(buf);                
+                rc_=zoo_get(zh_,"/xyz",0,&buf,&size,0);
+                if(rc_!=ZOK){
+                    break;
+                }
+            }
+            //TEST_TRACE("Finished %d iterations",i);
+        }
+        virtual void validate(const char* file, int line) const{
+            CPPUNIT_ASSERT_EQUAL_MESSAGE_LOC("ZOK != rc",(int)ZOK,rc_,file,line);
+        }
+        zhandle_t* zh_;
+        int rc_;
+    };
+
+    // this test connects to a real ZK server and creates the /xyz node and sends
+    // lots of zoo_get requests.
+    // to run this test use the following command:
+    // zktest-mt Zookeeper_operations::testOperationsAndDisconnectConcurrently2 localhost:3181
+    // where the second parameter is the server host and port
+    void testOperationsAndDisconnectConcurrently2()
+    {
+        if(globalTestConfig.getTestName().find(__func__)==string::npos || 
+                globalTestConfig.getExtraOptCount()==0)
+        {
+            // only run this test when specifically asked so
+            return;
+        }
+        string host(*(globalTestConfig.getExtraOptBegin()));
+        zhandle_t* lzh=zookeeper_init(host.c_str(),watcher,10000,0,0,0);
+        CPPUNIT_ASSERT(lzh!=0);
+        // make sure the client has connected
+        CPPUNIT_ASSERT_MESSAGE("Unable to connect to the host",
+                ensureCondition(ClientConnected(zh),5000)<5000);
+        
+        char realpath[1024];
+        int rc=zoo_create(lzh,"/xyz","1",1,&ZOO_OPEN_ACL_UNSAFE,0,realpath,sizeof(realpath)-1);
+        CPPUNIT_ASSERT(rc==ZOK || rc==ZNODEEXISTS);
+        zookeeper_close(lzh); 
+  
+        for(int counter=0; counter<200; counter++){
+            TEST_TRACE("Loop count %d",counter);
+            
+            CloseFinally guard(&zh);
+
+            zh=zookeeper_init(host.c_str(),watcher,10000,0,0,0);
+            CPPUNIT_ASSERT(zh!=0);
+            // make sure the client has connected
+            CPPUNIT_ASSERT_MESSAGE("Unable to connect to the host",
+                    ensureCondition(ClientConnected(zh),5000)<5000);
+            
+            TestJobManager jmgr(ZKGetJob(zh),10);
+            jmgr.startJobsImmediately();
+            jmgr.wait();
+            VALIDATE_JOBS(jmgr);
+            TEST_TRACE("run %d finished",counter);
+        }
+
+    }
+
+    class TestConcurrentOpWithDisconnectJob: public TestGetDataJob{
+    public:
+        static const int REPS=1000;
+        TestConcurrentOpWithDisconnectJob(ZookeeperServer* svr,zhandle_t* zh):
+            TestGetDataJob(svr,zh,REPS){}
+        virtual TestJob* clone() const {
+            return new TestConcurrentOpWithDisconnectJob(svr_,zh_);
+        }
+        virtual void validate(const char* file, int line) const{
+            CPPUNIT_ASSERT_EQUAL_MESSAGE_LOC("ZCONNECTIONLOSS != rc",(int)ZCONNECTIONLOSS,rc_,file,line);
+        }
+    };
+
+    // this test is not 100% accurate in a sense it may not detect all error cases.
+    // TODO: I can't think of a test that is 100% accurate and doesn't interfere
+    //       with the code being tested (in terms of introducing additional 
+    //       implicit synchronization points)
+    void testOperationsAndDisconnectConcurrently1()
+    {
+        for(int counter=0; counter<50; counter++){
+            //TEST_TRACE("Loop count %d",counter);
+            // frozen time -- no timeouts and no pings
+            Mock_gettimeofday timeMock;
+            
+            ZookeeperServer zkServer;
+            Mock_poll pollMock(&zkServer,ZookeeperServer::FD);
+            // must call zookeeper_close() while all the mocks are in the scope!
+            CloseFinally guard(&zh);
+            
+            zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+            CPPUNIT_ASSERT(zh!=0);
+            // make sure the client has connected
+            CPPUNIT_ASSERT(ensureCondition(ClientConnected(zh),1000)<1000);
+            
+            TestJobManager jmgr(TestConcurrentOpWithDisconnectJob(&zkServer,zh),10);
+            jmgr.startJobsImmediately();
+            // let everything startup before we shutdown the server
+            millisleep(4);
+            // reconnect attempts will start failing immediately 
+            zkServer.setServerDown(0);
+            // next recv call will return 0
+            zkServer.setConnectionLost();
+            jmgr.wait();
+            VALIDATE_JOBS(jmgr);
+        }
+        
+    }
+    // call zoo_aget() in the multithreaded mode
+    void testAsyncGetOperation()
+    {
+        Mock_gettimeofday timeMock;
+        
+        ZookeeperServer zkServer;
+        Mock_poll pollMock(&zkServer,ZookeeperServer::FD);
+        // must call zookeeper_close() while all the mocks are in the scope!
+        CloseFinally guard(&zh);
+        
+        zh=zookeeper_init("localhost:2121",watcher,10000,TEST_CLIENT_ID,0,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // make sure the client has connected
+        CPPUNIT_ASSERT(ensureCondition(ClientConnected(zh),1000)<1000);
+
+        AsyncGetOperationCompletion res1;
+        zkServer.addOperationResponse(new ZooGetResponse("1",1));
+        int rc=zoo_aget(zh,"/x/y/1",0,asyncCompletion,&res1);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        
+        CPPUNIT_ASSERT(ensureCondition(res1,1000)<1000);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,res1.rc_);
+        CPPUNIT_ASSERT_EQUAL(string("1"),res1.value_);        
+    }
+    class ChangeNodeWatcher: public WatcherAction{
+    public:
+        ChangeNodeWatcher():changed_(false){}
+        virtual void onNodeValueChanged(zhandle_t*,const char* path){
+            synchronized(mx_);
+            changed_=true;
+            if(path!=0) path_=path;
+        }
+        // this predicate checks if CHANGE_EVENT event type was triggered, unlike
+        // the isWatcherTriggered() that returns true whenever a watcher is triggered
+        // regardless of the event type
+        SyncedBoolCondition isNodeChangedTriggered() const{
+            return SyncedBoolCondition(changed_,mx_);
+        }
+        bool changed_;
+        string path_;
+    };
+    
+    class AsyncWatcherCompletion: public AsyncCompletion{
+    public:
+        AsyncWatcherCompletion(ZookeeperServer& zkServer):zkServer_(zkServer){}
+        virtual void statCompl(int rc, const Stat *stat){
+            // we received a server response, now enqueue a watcher event
+            // to trigger the watcher
+            zkServer_.addRecvResponse(new ZNodeEvent(ZOO_CHANGED_EVENT,"/x/y/z"));
+        }
+        ZookeeperServer& zkServer_;
+    };
+    // verify that async watcher is called for znode events (CREATED, DELETED etc.)
+    void testAsyncWatcher1(){
+        Mock_gettimeofday timeMock;
+        
+        ZookeeperServer zkServer;
+        Mock_poll pollMock(&zkServer,ZookeeperServer::FD);
+        // must call zookeeper_close() while all the mocks are in the scope!
+        CloseFinally guard(&zh);
+        
+        ChangeNodeWatcher action;        
+        zh=zookeeper_init("localhost:2121",activeWatcher,10000,
+                TEST_CLIENT_ID,&action,0);
+        CPPUNIT_ASSERT(zh!=0);
+        // make sure the client has connected
+        CPPUNIT_ASSERT(ensureCondition(ClientConnected(zh),1000)<1000);
+        
+        // set the watcher
+        AsyncWatcherCompletion completion(zkServer);
+        // prepare a response for the zoo_aexists() request
+        zkServer.addOperationResponse(new ZooStatResponse);
+        int rc=zoo_aexists(zh,"/x/y/z",1,asyncCompletion,&completion);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK,rc);
+        
+        CPPUNIT_ASSERT(ensureCondition(action.isNodeChangedTriggered(),1000)<1000);
+        CPPUNIT_ASSERT_EQUAL(string("/x/y/z"),action.path_);                
+    }
+#endif
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(Zookeeper_operations);

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/8c87cc49/zookeeper-client/zookeeper-client-c/tests/TestReadOnlyClient.cc
----------------------------------------------------------------------
diff --git a/zookeeper-client/zookeeper-client-c/tests/TestReadOnlyClient.cc b/zookeeper-client/zookeeper-client-c/tests/TestReadOnlyClient.cc
new file mode 100644
index 0000000..d73f189
--- /dev/null
+++ b/zookeeper-client/zookeeper-client-c/tests/TestReadOnlyClient.cc
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cppunit/extensions/HelperMacros.h>
+#include "CppAssertHelper.h"
+
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <zookeeper.h>
+
+#include "Util.h"
+#include "WatchUtil.h"
+
+#ifdef THREADED
+class Zookeeper_readOnly : public CPPUNIT_NS::TestFixture {
+    CPPUNIT_TEST_SUITE(Zookeeper_readOnly);
+    CPPUNIT_TEST(testReadOnly);
+    CPPUNIT_TEST_SUITE_END();
+
+    static void watcher(zhandle_t* zh, int type, int state,
+                        const char* path, void* v) {
+        watchctx_t *ctx = (watchctx_t*)v;
+
+        if (state==ZOO_CONNECTED_STATE || state==ZOO_READONLY_STATE) {
+            ctx->connected = true;
+        } else {
+            ctx->connected = false;
+        }
+        if (type != ZOO_SESSION_EVENT) {
+            evt_t evt;
+            evt.path = path;
+            evt.type = type;
+            ctx->putEvent(evt);
+        }
+    }
+
+    FILE *logfile;
+public:
+
+    Zookeeper_readOnly() {
+      logfile = openlogfile("Zookeeper_readOnly");
+    }
+
+    ~Zookeeper_readOnly() {
+      if (logfile) {
+        fflush(logfile);
+        fclose(logfile);
+        logfile = 0;
+      }
+    }
+
+    void setUp() {
+        zoo_set_log_stream(logfile);
+    }
+
+    void startReadOnly() {
+        char cmd[1024];
+        sprintf(cmd, "%s startReadOnly", ZKSERVER_CMD);
+        CPPUNIT_ASSERT(system(cmd) == 0);
+    }
+
+    void stopPeer() {
+        char cmd[1024];
+        sprintf(cmd, "%s stop", ZKSERVER_CMD);
+        CPPUNIT_ASSERT(system(cmd) == 0);
+    }
+
+    void testReadOnly() {
+        startReadOnly();
+        watchctx_t watch;
+        zhandle_t* zh = zookeeper_init("localhost:22181",
+                                       watcher,
+                                       10000,
+                                       NULL,
+                                       &watch,
+                                       ZOO_READONLY);
+        watch.zh = zh;
+        CPPUNIT_ASSERT(zh != 0);
+        sleep(1);
+        int len = 1024;
+        char buf[len];
+        int res = zoo_get(zh, "/", 0, buf, &len, 0);
+        CPPUNIT_ASSERT_EQUAL((int)ZOK, res);
+
+        char path[1024];
+        res = zoo_create(zh, "/test", buf, 10, &ZOO_OPEN_ACL_UNSAFE, 0, path,
+                         512);
+        CPPUNIT_ASSERT_EQUAL((int)ZNOTREADONLY, res);
+        stopPeer();
+    }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(Zookeeper_readOnly);
+#endif

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/8c87cc49/zookeeper-client/zookeeper-client-c/tests/TestReconfig.cc
----------------------------------------------------------------------
diff --git a/zookeeper-client/zookeeper-client-c/tests/TestReconfig.cc b/zookeeper-client/zookeeper-client-c/tests/TestReconfig.cc
new file mode 100644
index 0000000..ee030d5
--- /dev/null
+++ b/zookeeper-client/zookeeper-client-c/tests/TestReconfig.cc
@@ -0,0 +1,605 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <errno.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <exception>
+#include <stdlib.h>
+
+#include "Util.h"
+#include "LibCMocks.h"
+#include "ZKMocks.h"
+
+using namespace std;
+
+static const int portOffset = 2000;
+
+class Client
+{
+
+private:    
+    // Member variables
+    zhandle_t *zh;
+    unsigned int seed;
+
+public:
+    /**
+     * Create a client with given connection host string and add to our internal
+     * vector of clients. These are disconnected and cleaned up in tearDown().
+     */
+    Client(const string hosts, unsigned int seed) :
+        seed((seed * seed) + 0xAFAFAFAF)
+    {
+        reSeed();
+
+        zh = zookeeper_init(hosts.c_str(),0,1000,0,0,0);
+        CPPUNIT_ASSERT(zh);
+
+        // Set the flag to disable ZK from reconnecting to a different server.
+        // Our reconfig test case will do explicit server shuffling through
+        // zoo_cycle_next_server, and the reconnection attempts would interfere
+        // with the server states the tests cases assume.
+        zh->disable_reconnection_attempt = 1;
+        reSeed();
+
+        cycleNextServer();
+    }
+
+    void close()
+    {
+        zookeeper_close(zh);
+        zh = NULL;
+    }
+
+    bool isReconfig()
+    {
+        return zh->reconfig != 0;
+    }
+
+    /**
+     * re-seed this client with it's own previously generated seed so its
+     * random choices are unique and separate from the other clients
+     */
+    void reSeed()
+    {
+        srandom(seed);
+        srand48(seed);
+    }
+
+    /**
+     * Get the server that this client is currently connected to.
+     */
+    string getServer()
+    {
+        const char* addrstring = zoo_get_current_server(zh);
+        return string(addrstring);
+    }
+
+    /**
+     * Get the server this client is currently connected to with no port
+     * specification.
+     */
+    string getServerNoPort()
+    {
+        string addrstring = getServer();
+
+        size_t found = addrstring.find(":");
+        CPPUNIT_ASSERT(found != string::npos);
+
+        return addrstring.substr(0, found);
+    }
+
+    /**
+     * Get the port of the server this client is currently connected to.
+     */
+    uint32_t getServerPort()
+    {
+        string addrstring = getServer();
+
+        size_t found = addrstring.find(":");
+        CPPUNIT_ASSERT(found != string::npos);
+
+        string portStr = addrstring.substr(found+1);
+
+        stringstream ss(portStr);
+        uint32_t port;
+        ss >> port;
+
+        CPPUNIT_ASSERT(port >= portOffset);
+
+        return port;
+    }
+
+    /**
+     * Cycle to the next available server on the next connect attempt. It also
+     * calls into getServer (above) to return the server connected to.
+     */ 
+    string cycleNextServer()
+    {
+        zoo_cycle_next_server(zh);
+        return getServer();
+    }
+
+    void cycleUntilServer(const string requested)
+    {
+        // Call cycleNextServer until the one it's connected to is the one
+        // specified (disregarding port).
+        string first;
+
+        while(true)
+        {
+            string next = cycleNextServer();
+            if (first.empty())
+            {
+                first = next;
+            } 
+            // Else we've looped around!
+            else if (first == next)
+            {
+                CPPUNIT_ASSERT(false);
+            }
+
+            // Strip port off
+            string server = getServerNoPort();
+
+            // If it matches the requested host we're now 'connected' to the right host
+            if (server == requested)
+            {
+                break;
+            }
+        }
+    }
+
+    /**
+     * Set servers for this client.
+     */
+    void setServers(const string new_hosts)
+    {
+        int rc = zoo_set_servers(zh, new_hosts.c_str());
+        CPPUNIT_ASSERT_EQUAL((int)ZOK, rc);
+    }
+
+    /**
+     * Set servers for this client and validate reconfig value matches expected.
+     */
+    void setServersAndVerifyReconfig(const string new_hosts, bool is_reconfig)
+    {
+        setServers(new_hosts);
+        CPPUNIT_ASSERT_EQUAL(is_reconfig, isReconfig());
+    }
+
+    /**
+     * Sets the server list this client is connecting to AND if this requires
+     * the client to be reconfigured (as dictated by internal client policy)
+     * then it will trigger a call to cycleNextServer.
+     */
+    void setServersAndCycleIfNeeded(const string new_hosts)
+    {
+        setServers(new_hosts);
+        if (isReconfig())
+        {
+            cycleNextServer();
+        }
+    }
+};
+
+class Zookeeper_reconfig : public CPPUNIT_NS::TestFixture
+{
+    CPPUNIT_TEST_SUITE(Zookeeper_reconfig);
+
+    // Test cases
+    CPPUNIT_TEST(testcycleNextServer);
+    CPPUNIT_TEST(testMigrateOrNot);
+    CPPUNIT_TEST(testMigrationCycle);
+
+    // In threaded mode each 'create' is a thread -- it's not practical to create
+    // 10,000 threads to test load balancing. The load balancing code can easily
+    // be tested in single threaded mode as concurrency doesn't affect the algorithm.
+#ifndef THREADED
+    CPPUNIT_TEST(testMigrateProbability);
+    CPPUNIT_TEST(testLoadBalancing);
+#endif
+
+    CPPUNIT_TEST_SUITE_END();
+
+    FILE *logfile;
+
+    double slackPercent;
+    static const int numClients = 10000;
+    static const int portOffset = 2000;
+
+    vector<Client> clients;
+    vector<uint32_t> numClientsPerHost;
+
+public:
+    Zookeeper_reconfig() :
+        slackPercent(10.0)
+    {
+      logfile = openlogfile("Zookeeper_reconfig");
+    }
+
+    ~Zookeeper_reconfig() 
+    {
+      if (logfile) 
+      {
+        fflush(logfile);
+        fclose(logfile);
+        logfile = 0;
+      }
+    }
+
+    void setUp()
+    {
+        zoo_set_log_stream(logfile);
+        zoo_deterministic_conn_order(1);
+
+        numClientsPerHost.resize(numClients);
+    }
+
+    void tearDown()
+    {
+        for (int i = 0; i < clients.size(); i++)
+        {
+            clients.at(i).close();
+        }
+    }
+
+    /**
+     * Create a client with given connection host string and add to our internal
+     * vector of clients. These are disconnected and cleaned up in tearDown().
+     */
+    Client& createClient(const string hosts)
+    {
+        Client client(hosts, clients.size());
+        clients.push_back(client);
+
+        return clients.back();
+    }
+
+    /**
+     * Same as createClient(hosts) only it takes a specific host that this client
+     * should simulate being connected to.
+     */
+    Client& createClient(const string hosts, const string host)
+    {
+        // Ensure requested host is in the list
+        size_t found = hosts.find(host);
+        CPPUNIT_ASSERT(found != hosts.npos);
+
+        Client client(hosts, clients.size());
+        client.cycleUntilServer(host);
+        clients.push_back(client);
+
+        return clients.back();
+    }
+
+    /**
+     * Create a connection host list starting at 'start' and stopping at 'stop'
+     * where start >= stop. This creates a connection string with host:port pairs
+     * separated by commas. The given 'octet' is the starting octet that is used
+     * as the last octet in the host's IP. This is decremented on each iteration. 
+     * Each port will be portOffset + octet.
+     */
+    string createHostList(uint32_t start, uint32_t stop = 1, uint32_t octet = 0)
+    {
+        if (octet == 0)
+        {
+            octet = start;
+        }
+
+        stringstream ss;
+
+        for (int i = start; i >= stop; i--, octet--)
+        {
+            ss << "10.10.10." << octet << ":" << portOffset + octet;
+
+            if (i > stop)
+            {
+                ss << ", ";
+            }
+        }
+
+        return ss.str();
+    }
+
+    /**
+     * Gets the lower bound of the number of clients per server that we expect
+     * based on the probabilistic load balancing algorithm implemented by the
+     * client code.
+     */
+    double lowerboundClientsPerServer(int numClients, int numServers)
+    {
+        return (1 - slackPercent/100.0) * numClients / numServers;
+    }
+
+    /**
+     * Gets the upper bound of the number of clients per server that we expect
+     * based on the probabilistic load balancing algorithm implemented by the
+     * client code.
+     */
+    double upperboundClientsPerServer(int numClients, int numServers)
+    {
+        return (1 + slackPercent/100.0) * numClients / numServers;
+    }
+
+    /**
+     * Update all the clients to use a new list of servers. This will also cause
+     * the client to cycle to the next server as needed (e.g. due to a reconfig).
+     * It then updates the number of clients connected to the server based on
+     * this change.
+     * 
+     * Afterwards it validates that all of the servers have the correct amount of
+     * clients based on the probabilistic load balancing algorithm.
+     */
+    void updateAllClientsAndServers(int start, int stop = 1)
+    {
+        string newServers = createHostList(start, stop);
+        int numServers = start - stop + 1;
+
+        for (int i = 0; i < numClients; i++) {
+
+            Client &client = clients.at(i);
+            client.reSeed();
+
+            client.setServersAndCycleIfNeeded(newServers);
+            numClientsPerHost.at(client.getServerPort() - portOffset - 1)++;
+        }
+
+        int offset = stop - 1;
+        for (int index = offset; index < numServers; index++) {
+
+            if (numClientsPerHost.at(index) > upperboundClientsPerServer(numClients, numServers))
+            {
+                cout << "INDEX=" << index << " too many -- actual=" << numClientsPerHost.at(index) 
+                     << " expected=" << upperboundClientsPerServer(numClients, numServers) << endl;
+            }
+
+
+            CPPUNIT_ASSERT(numClientsPerHost.at(index) <= upperboundClientsPerServer(numClients, numServers));
+
+            if (numClientsPerHost.at(index) < lowerboundClientsPerServer(numClients, numServers))
+            {
+                cout << "INDEX=" << index << " too few -- actual=" << numClientsPerHost.at(index) 
+                     << " expected=" << lowerboundClientsPerServer(numClients, numServers) << endl;
+            }
+
+            CPPUNIT_ASSERT(numClientsPerHost.at(index) >= lowerboundClientsPerServer(numClients, numServers));
+            numClientsPerHost.at(index) = 0; // prepare for next test
+        }
+    }
+
+    /*-------------------------------------------------------------------------*
+     * TESTCASES
+     *------------------------------------------------------------------------*/
+
+    /**
+     * Very basic sunny day test to ensure basic functionality of zoo_set_servers
+     * and zoo_cycle_next_server.
+     */
+    void testcycleNextServer()
+    {
+        const string initial_hosts = createHostList(10); // 2010..2001
+        const string new_hosts = createHostList(4);      // 2004..2001
+
+        Client &client = createClient(initial_hosts);
+
+        client.setServersAndVerifyReconfig(new_hosts, true);
+
+        for (int i = 0; i < 10; i++)
+        {
+            string next = client.cycleNextServer();
+        }
+    }
+
+    /**
+     * Test the migration policy implicit within the probabilistic load balancing
+     * algorithm the Client implements. Tests all the corner cases whereby the
+     * list of servers is decreased, increased, and stays the same. Also combines
+     * various combinations of the currently connected server being in the new
+     * configuration and not.
+     */
+    void testMigrateOrNot()
+    {
+        const string initial_hosts = createHostList(4); // 2004..2001
+
+        Client &client = createClient(initial_hosts, "10.10.10.3");
+
+        // Ensemble size decreasing, my server is in the new list
+        client.setServersAndVerifyReconfig(createHostList(3), false);
+
+        // Ensemble size decreasing, my server is NOT in the new list
+        client.setServersAndVerifyReconfig(createHostList(2), true);
+
+        // Ensemble size stayed the same, my server is NOT in the new list
+        client.setServersAndVerifyReconfig(createHostList(2), true);
+
+        // Ensemble size increased, my server is not in the new ensemble
+        client.setServers(createHostList(4));
+        client.cycleUntilServer("10.10.10.1");
+        client.setServersAndVerifyReconfig(createHostList(7,2), true);
+    }
+
+    /**
+     * This tests that as a client is in reconfig mode it will properly try to
+     * connect to all the new servers first. Then it will try to connect to all
+     * the 'old' servers that are staying in the new configuration. Finally it
+     * will fallback to the normal behavior of trying servers in round-robin.
+     */
+    void testMigrationCycle()
+    {
+        int num_initial = 4;
+        const string initial_hosts = createHostList(num_initial); // {2004..2001}
+
+        int num_new = 10;
+        string new_hosts = createHostList(12, 3);      // {2012..2003}
+
+        // servers from the old list that appear in the new list {2004..2003}
+        int num_staying = 2;
+        string oldStaying = createHostList(4, 3);
+
+        // servers in the new list that are not in the old list  {2012..2005}
+        int num_coming = 8;
+        string newComing = createHostList(12, 5);
+
+        // Ensemble in increasing in size, my server is not in the new ensemble
+        // load on the old servers must be decreased, so must connect to one of
+        // new servers (pNew = 1)
+        Client &client = createClient(initial_hosts, "10.10.10.1");
+        client.setServersAndVerifyReconfig(new_hosts, true);
+
+        // Since we're in reconfig mode, next connect should be from new list
+        // We should try all the new servers *BEFORE* trying any old servers
+        string seen;
+        for (int i = 0; i < num_coming; i++) {
+            string next = client.cycleNextServer();
+
+            // Assert next server is in the 'new' list
+            size_t found = newComing.find(next);
+            CPPUNIT_ASSERT_MESSAGE(next + " not in newComing list",
+                                   found != string::npos);
+
+            // Assert not in seen list then append
+            found = seen.find(next);
+            CPPUNIT_ASSERT_MESSAGE(next + " in seen list",
+                                   found == string::npos);
+            seen += found + ", ";
+        }
+
+        // Now it should start connecting to the old servers
+        seen.clear();
+        for (int i = 0; i < num_staying; i++) {
+            string next = client.cycleNextServer();
+
+            // Assert it's in the old list
+            size_t found = oldStaying.find(next);
+            CPPUNIT_ASSERT(found != string::npos);
+
+            // Assert not in seen list then append
+            found = seen.find(next);
+            CPPUNIT_ASSERT(found == string::npos);
+            seen += found + ", ";
+        }
+
+        // NOW it goes back to normal as we've tried all the new and old
+        string first = client.cycleNextServer();
+        for (int i = 0; i < num_new - 1; i++) {
+            client.cycleNextServer();
+        }
+
+        CPPUNIT_ASSERT_EQUAL(first, client.cycleNextServer());
+    }
+
+    /**
+     * Test the migration probability to ensure that it conforms to our expected
+     * lower and upper bounds of the number of clients per server as we are 
+     * reconfigured.
+     * 
+     * In this case, the list of servers is increased and the client's server is
+     * in the new list. Whether to move or not depends on the difference of
+     * server sizes with probability 1 - |old|/|new| the client disconnects.
+     * 
+     * In the test below 1-9/10 = 1/10 chance of disconnecting
+     */
+    void testMigrateProbability()
+    {
+        const string initial_hosts = createHostList(9); // 10.10.10.9:2009...10.10.10.1:2001
+        string new_hosts = createHostList(10); // 10.10.10.10:2010...10.10.10.1:2001
+
+        uint32_t numDisconnects = 0;
+        for (int i = 0; i < numClients; i++) {
+            Client &client = createClient(initial_hosts, "10.10.10.3");
+            client.setServers(new_hosts);
+            if (client.isReconfig())
+            {
+                numDisconnects++;
+            }
+        }
+
+        // should be numClients/10 in expectation, we test that it's numClients/10 +- slackPercent
+        CPPUNIT_ASSERT(numDisconnects < upperboundClientsPerServer(numClients, 10));
+    }
+
+    /**
+     * Tests the probabilistic load balancing algorithm implemented by the Client
+     * code. 
+     * 
+     * Test strategy:
+     * 
+     * (1) Start with 9 servers and 10,000 clients. Remove a server, update
+     *     everything, and ensure that the clients are redistributed properly.
+     * 
+     * (2) Remove two more nodes and repeat the same validations of proper client
+     *     redistribution. Ensure no clients are connected to the two removed
+     *     nodes.
+     * 
+     * (3) Remove the first server in the list and simultaneously add the three
+     *     previously removed servers. Ensure everything is redistributed and
+     *     no clients are connected to the one missing node.
+     * 
+     * (4) Add the one missing server back into the mix and validate.
+     */
+    void testLoadBalancing()
+    {
+        zoo_deterministic_conn_order(0);
+
+        int rc = ZOK;
+
+        uint32_t numServers = 9;
+        const string initial_hosts = createHostList(numServers); // 10.10.10.9:2009...10.10.10.1:2001
+
+        // Create connections to servers
+        for (int i = 0; i < numClients; i++) {
+            Client &client = createClient(initial_hosts);
+            numClientsPerHost.at(client.getServerPort() - portOffset - 1)++;
+        }
+
+        for (int i = 0; i < numServers; i++) {
+            CPPUNIT_ASSERT(numClientsPerHost.at(i) <= upperboundClientsPerServer(numClients, numServers));
+            CPPUNIT_ASSERT(numClientsPerHost.at(i) >= lowerboundClientsPerServer(numClients, numServers));
+            numClientsPerHost.at(i) = 0; // prepare for next test
+        }
+
+        // remove last server
+        numServers = 8;
+        updateAllClientsAndServers(numServers);
+        CPPUNIT_ASSERT_EQUAL((uint32_t)0, numClientsPerHost.at(numServers));
+
+        // Remove two more nodes
+        numServers = 6;
+        updateAllClientsAndServers(numServers);
+        CPPUNIT_ASSERT_EQUAL((uint32_t)0, numClientsPerHost.at(numServers));
+        CPPUNIT_ASSERT_EQUAL((uint32_t)0, numClientsPerHost.at(numServers+1));
+        CPPUNIT_ASSERT_EQUAL((uint32_t)0, numClientsPerHost.at(numServers+2));
+
+        // remove host 0 (first one in list) and add back 6, 7, and 8
+        numServers = 8;
+        updateAllClientsAndServers(numServers, 1);
+        CPPUNIT_ASSERT_EQUAL((uint32_t)0, numClientsPerHost.at(0));
+
+        // add back host number 0
+        numServers = 9;
+        updateAllClientsAndServers(numServers);
+    }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(Zookeeper_reconfig);

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/8c87cc49/zookeeper-client/zookeeper-client-c/tests/TestReconfigServer.cc
----------------------------------------------------------------------
diff --git a/zookeeper-client/zookeeper-client-c/tests/TestReconfigServer.cc b/zookeeper-client/zookeeper-client-c/tests/TestReconfigServer.cc
new file mode 100644
index 0000000..c15774e
--- /dev/null
+++ b/zookeeper-client/zookeeper-client-c/tests/TestReconfigServer.cc
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+#include <algorithm>
+#include <sstream>
+#include <vector>
+#include <utility>
+#include <cppunit/extensions/HelperMacros.h>
+#include <unistd.h>
+#include "zookeeper.h"
+
+#include "Util.h"
+#include "ZooKeeperQuorumServer.h"
+
+#ifdef THREADED
+class TestReconfigServer : public CPPUNIT_NS::TestFixture {
+    CPPUNIT_TEST_SUITE(TestReconfigServer);
+    CPPUNIT_TEST(testNonIncremental);
+    CPPUNIT_TEST(testRemoveConnectedFollower);
+    CPPUNIT_TEST(testRemoveFollower);
+    CPPUNIT_TEST(testReconfigFailureWithoutAuth);
+    CPPUNIT_TEST(testReconfigFailureWithoutServerSuperuserPasswordConfigured);
+    CPPUNIT_TEST_SUITE_END();
+
+  public:
+    TestReconfigServer();
+    virtual ~TestReconfigServer();
+    void setUp();
+    void tearDown();
+    void testNonIncremental();
+    void testRemoveConnectedFollower();
+    void testRemoveFollower();
+    void testReconfigFailureWithoutAuth();
+    void testReconfigFailureWithoutServerSuperuserPasswordConfigured();
+  private:
+    static const uint32_t NUM_SERVERS;
+    FILE* logfile_;
+    std::vector<ZooKeeperQuorumServer*> cluster_;
+    int32_t getLeader();
+    std::vector<int32_t> getFollowers();
+    void parseConfig(char* buf, int len, std::vector<std::string>& servers,
+                     std::string& version);
+    bool waitForConnected(zhandle_t* zh, uint32_t timeout_sec);
+    zhandle_t* connectFollowers(std::vector<int32_t> &followers);
+};
+
+const uint32_t TestReconfigServer::NUM_SERVERS = 3;
+
+TestReconfigServer::
+TestReconfigServer() :
+    logfile_(openlogfile("TestReconfigServer")) {
+    zoo_set_log_stream(logfile_);
+}
+
+TestReconfigServer::
+~TestReconfigServer() {
+    if (logfile_) {
+        fflush(logfile_);
+        fclose(logfile_);
+        logfile_ = NULL;
+    }
+}
+
+void TestReconfigServer::
+setUp() {
+    ZooKeeperQuorumServer::tConfigPairs configs;
+    configs.push_back(std::make_pair("reconfigEnabled", "true"));
+    cluster_ = ZooKeeperQuorumServer::getCluster(NUM_SERVERS, configs,
+        "SERVER_JVMFLAGS=-Dzookeeper.DigestAuthenticationProvider.superDigest=super:D/InIHSb7yEEbrWz8b9l71RjZJU="/* password is test */);
+}
+
+void TestReconfigServer::
+tearDown() {
+    for (int i = 0; i < cluster_.size(); i++) {
+        delete cluster_[i];
+    }
+    cluster_.clear();
+}
+
+int32_t TestReconfigServer::
+getLeader() {
+    for (int32_t i = 0; i < cluster_.size(); i++) {
+        if (cluster_[i]->isLeader()) {
+            return i;
+        }
+    }
+    return -1;
+}
+
+std::vector<int32_t> TestReconfigServer::
+getFollowers() {
+    std::vector<int32_t> followers;
+    for (int32_t i = 0; i < cluster_.size(); i++) {
+        if (cluster_[i]->isFollower()) {
+            followers.push_back(i);
+        }
+    }
+    return followers;
+}
+
+void TestReconfigServer::
+parseConfig(char* buf, int len, std::vector<std::string>& servers,
+            std::string& version) {
+    std::string config(buf, len);
+    std::stringstream ss(config);
+    std::string line;
+    std::string serverPrefix("server.");
+    std::string versionPrefix("version=");
+    servers.clear();
+    while(std::getline(ss, line, '\n')) {
+        if (line.compare(0, serverPrefix.size(), serverPrefix) == 0) {
+            servers.push_back(line);
+        } else if (line.compare(0, versionPrefix.size(), versionPrefix) == 0) {
+            version = line.substr(versionPrefix.size());
+        }
+    }
+}
+
+bool TestReconfigServer::
+waitForConnected(zhandle_t* zh, uint32_t timeout_sec) {
+    for (uint32_t i = 0; i < timeout_sec; i++) {
+        if (zoo_state(zh) == ZOO_CONNECTED_STATE) {
+            return true;
+        }
+        sleep(1);
+    }
+    return false;
+}
+
+/**
+ * 1. Connect to the leader.
+ * 2. Remove a follower using incremental reconfig.
+ * 3. Add the follower back using incremental reconfig.
+ */
+void TestReconfigServer::
+testRemoveFollower() {
+    std::vector<std::string> servers;
+    std::string version;
+    struct Stat stat;
+    int len = 1024;
+    char buf[len];
+
+    // get config from leader.
+    int32_t leader = getLeader();
+    CPPUNIT_ASSERT(leader >= 0);
+    std::string host = cluster_[leader]->getHostPort();
+    zhandle_t* zk = zookeeper_init(host.c_str(), NULL, 10000, NULL, NULL, 0);
+    CPPUNIT_ASSERT_EQUAL(true, waitForConnected(zk, 10));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_getconfig(zk, 0, buf, &len, &stat));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:test", 10, NULL,(void*)ZOK));
+    // check if all the servers are listed in the config.
+    parseConfig(buf, len, servers, version);
+    // initially should be 1<<32, which is 0x100000000. This is the zxid
+    // of the first NEWLEADER message, used as the initial version
+    CPPUNIT_ASSERT_EQUAL(std::string("100000000"), version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+
+    // remove a follower.
+    std::vector<int32_t> followers = getFollowers();
+    len = 1024;
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1,
+                         (uint32_t)(followers.size()));
+    std::stringstream ss;
+    ss << followers[0];
+    int rc = zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len,
+                          &stat);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, rc);
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(std::string("100000002"), version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        if (i == followers[0]) {
+            continue;
+        }
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+
+    // add the follower back.
+    len = 1024;
+    std::string serverString = cluster_[followers[0]]->getServerString();
+    rc = zoo_reconfig(zk, serverString.c_str(), NULL, NULL, -1, buf, &len,
+                          &stat);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, rc);
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+    zookeeper_close(zk);
+}
+
+/**
+ * 1. Connect to the leader.
+ * 2. Remove a follower using non-incremental reconfig.
+ * 3. Add the follower back using non-incremental reconfig.
+ */
+void TestReconfigServer::
+testNonIncremental() {
+    std::vector<std::string> servers;
+    std::string version;
+    struct Stat stat;
+    int len = 1024;
+    char buf[len];
+
+    // get config from leader.
+    int32_t leader = getLeader();
+    CPPUNIT_ASSERT(leader >= 0);
+    std::string host = cluster_[leader]->getHostPort();
+    zhandle_t* zk = zookeeper_init(host.c_str(), NULL, 10000, NULL, NULL, 0);
+    CPPUNIT_ASSERT_EQUAL(true, waitForConnected(zk, 10));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_getconfig(zk, 0, buf, &len, &stat));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:test", 10, NULL,(void*)ZOK));
+
+    // check if all the servers are listed in the config.
+    parseConfig(buf, len, servers, version);
+    // initially should be 1<<32, which is 0x100000000. This is the zxid
+    // of the first NEWLEADER message, used as the initial version
+    CPPUNIT_ASSERT_EQUAL(std::string("100000000"), version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+
+    // remove a follower.
+    std::vector<int32_t> followers = getFollowers();
+    len = 1024;
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1,
+                         (uint32_t)(followers.size()));
+    std::stringstream ss;
+    for (int i = 1; i < followers.size(); i++) {
+      ss << cluster_[followers[i]]->getServerString() << ",";
+    }
+    ss << cluster_[leader]->getServerString();
+
+    int rc = zoo_reconfig(zk, NULL, NULL, ss.str().c_str(), -1, buf, &len,
+                          &stat);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, rc);
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(std::string("100000002"), version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        if (i == followers[0]) {
+            continue;
+        }
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+
+    // add the follower back.
+    len = 1024;
+    ss.str("");
+    for (int i = 0; i < cluster_.size(); i++) {
+      ss << cluster_[i]->getServerString() << ",";
+    }
+    rc = zoo_reconfig(zk, NULL, NULL, ss.str().c_str(), -1, buf, &len,
+                          &stat);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, rc);
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+    zookeeper_close(zk);
+}
+
+zhandle_t* TestReconfigServer::
+connectFollowers(std::vector<int32_t> &followers) {
+    std::stringstream ss;
+    int32_t leader = getLeader();
+    CPPUNIT_ASSERT(leader >= 0);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1, (uint32_t)(followers.size()));
+    for (int i = 0; i < followers.size(); i++) {
+        ss << cluster_[followers[i]]->getHostPort() << ",";
+    }
+    ss << cluster_[leader]->getHostPort();
+    std::string hosts = ss.str().c_str();
+    zoo_deterministic_conn_order(true);
+    zhandle_t* zk = zookeeper_init(hosts.c_str(), NULL, 10000, NULL, NULL, 0);
+    CPPUNIT_ASSERT_EQUAL(true, waitForConnected(zk, 10));
+
+    std::string connectedHost(zoo_get_current_server(zk));
+    std::string portString = connectedHost.substr(connectedHost.find(":") + 1);
+    uint32_t port;
+    std::istringstream (portString) >> port;
+    CPPUNIT_ASSERT_EQUAL(cluster_[followers[0]]->getClientPort(), port);
+    return zk;
+}
+
+/**
+ * 1. Connect to a follower.
+ * 2. Remove the follower the client is connected to.
+ */
+void TestReconfigServer::
+testRemoveConnectedFollower() {
+    std::vector<std::string> servers;
+    std::string version;
+    struct Stat stat;
+    int len = 1024;
+    char buf[len];
+
+    // connect to a follower.
+    std::stringstream ss;
+    std::vector<int32_t> followers = getFollowers();
+    zhandle_t* zk = connectFollowers(followers);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:test", 10, NULL,(void*)ZOK));
+
+    // remove the follower.
+    len = 1024;
+    ss.str("");
+    ss << followers[0];
+    zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat);
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_getconfig(zk, 0, buf, &len, &stat));
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        if (i == followers[0]) {
+            continue;
+        }
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+    zookeeper_close(zk);
+}
+
+/**
+ * ZOOKEEPER-2014: only admin or users who are explicitly granted permission can do reconfig.
+ */
+void TestReconfigServer::
+testReconfigFailureWithoutAuth() {
+    std::vector<std::string> servers;
+    std::string version;
+    struct Stat stat;
+    int len = 1024;
+    char buf[len];
+
+    // connect to a follower.
+    std::stringstream ss;
+    std::vector<int32_t> followers = getFollowers();
+    zhandle_t* zk = connectFollowers(followers);
+
+    // remove the follower.
+    len = 1024;
+    ss.str("");
+    ss << followers[0];
+    // No auth, should fail.
+    CPPUNIT_ASSERT_EQUAL((int)ZNOAUTH, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    // Wrong auth, should fail.
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:wrong", 11, NULL,(void*)ZOK));
+    CPPUNIT_ASSERT_EQUAL((int)ZNOAUTH, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    // Right auth, should pass.
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:test", 10, NULL,(void*)ZOK));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_getconfig(zk, 0, buf, &len, &stat));
+    parseConfig(buf, len, servers, version);
+    CPPUNIT_ASSERT_EQUAL(NUM_SERVERS - 1, (uint32_t)(servers.size()));
+    for (int i = 0; i < cluster_.size(); i++) {
+        if (i == followers[0]) {
+            continue;
+        }
+        CPPUNIT_ASSERT(std::find(servers.begin(), servers.end(),
+                       cluster_[i]->getServerString()) != servers.end());
+    }
+    zookeeper_close(zk);
+}
+
+void TestReconfigServer::
+testReconfigFailureWithoutServerSuperuserPasswordConfigured() {
+    std::vector<std::string> servers;
+    std::string version;
+    struct Stat stat;
+    int len = 1024;
+    char buf[len];
+
+    // Create a new quorum with the super user's password not configured.
+    tearDown();
+    ZooKeeperQuorumServer::tConfigPairs configs;
+    configs.push_back(std::make_pair("reconfigEnabled", "true"));
+    cluster_ = ZooKeeperQuorumServer::getCluster(NUM_SERVERS, configs, "");
+
+    // connect to a follower.
+    std::stringstream ss;
+    std::vector<int32_t> followers = getFollowers();
+    zhandle_t* zk = connectFollowers(followers);
+
+    // remove the follower.
+    len = 1024;
+    ss.str("");
+    ss << followers[0];
+    // All cases should fail as server ensemble was not configured with the super user's password.
+    CPPUNIT_ASSERT_EQUAL((int)ZNOAUTH, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:", 11, NULL,(void*)ZOK));
+    CPPUNIT_ASSERT_EQUAL((int)ZNOAUTH, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    CPPUNIT_ASSERT_EQUAL((int)ZOK, zoo_add_auth(zk, "digest", "super:test", 10, NULL,(void*)ZOK));
+    CPPUNIT_ASSERT_EQUAL((int)ZNOAUTH, zoo_reconfig(zk, NULL, ss.str().c_str(), NULL, -1, buf, &len, &stat));
+    zookeeper_close(zk);
+}
+
+CPPUNIT_TEST_SUITE_REGISTRATION(TestReconfigServer);
+#endif


Mime
View raw message