hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject incubator-hawq git commit: HAWQ-1502. Add verification to support TDE write function.
Date Wed, 02 Aug 2017 04:38:14 GMT
Repository: incubator-hawq
Updated Branches:
  refs/heads/master 2662bebd1 -> 71bb3676a


HAWQ-1502. Add verification to support TDE write function.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/71bb3676
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/71bb3676
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/71bb3676

Branch: refs/heads/master
Commit: 71bb3676a7e34e80e4a34e57265ea133b61551d9
Parents: 2662beb
Author: amyrazz44 <abai@pivotal.io>
Authored: Mon Jul 31 17:45:30 2017 +0800
Committer: amyrazz44 <abai@pivotal.io>
Committed: Wed Aug 2 11:41:57 2017 +0800

----------------------------------------------------------------------
 depends/libhdfs3/src/client/CryptoCodec.cpp     |   4 +-
 depends/libhdfs3/src/client/CryptoCodec.h       |  14 +-
 .../libhdfs3/src/client/KmsClientProvider.cpp   |   8 +-
 .../libhdfs3/test/function/TestCInterface.cpp   | 318 +++++++++++++++----
 .../libhdfs3/test/function/TestKmsClient.cpp    |  40 +--
 5 files changed, 279 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/71bb3676/depends/libhdfs3/src/client/CryptoCodec.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/CryptoCodec.cpp b/depends/libhdfs3/src/client/CryptoCodec.cpp
index 0ca2d16..77ccf09 100644
--- a/depends/libhdfs3/src/client/CryptoCodec.cpp
+++ b/depends/libhdfs3/src/client/CryptoCodec.cpp
@@ -94,7 +94,7 @@ namespace Hdfs {
 		std::replace(key.begin(), key.end(), '-', '+');
 		std::replace(key.begin(), key.end(), '_', '/');
 
-		LOG(INFO, "CryptoCodec : getDecryptedKeyFromKms material is :%s", key.c_str());
+		LOG(DEBUG3, "CryptoCodec : getDecryptedKeyFromKms material is :%s", key.c_str());
 
 		key = KmsClientProvider::base64Decode(key);
 		return key;
@@ -144,7 +144,7 @@ namespace Hdfs {
 		//AES/CTR/NoPadding 
 		EVP_CIPHER_CTX_set_padding(cipherCtx, 0);
 
-		LOG(INFO, "CryptoCodec init success, key_length:%llu, is_encode:%d", AlgorithmBlockSize,
enc);
+		LOG(DEBUG3, "CryptoCodec init success, key_length:%llu, is_encode:%d", AlgorithmBlockSize,
enc);
 		is_init = true;
 		return 1;
 	}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/71bb3676/depends/libhdfs3/src/client/CryptoCodec.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/CryptoCodec.h b/depends/libhdfs3/src/client/CryptoCodec.h
index cae7d3b..45b1088 100644
--- a/depends/libhdfs3/src/client/CryptoCodec.h
+++ b/depends/libhdfs3/src/client/CryptoCodec.h
@@ -86,16 +86,16 @@ namespace Hdfs {
 		 */
 		std::string calculateIV(const std::string& initIV, unsigned long counter);
 
-		shared_ptr<KmsClientProvider> kcp;
-		FileEncryptionInfo* encryptionInfo;
-		EVP_CIPHER_CTX*     cipherCtx;
-		const EVP_CIPHER*   cipher;
+		shared_ptr<KmsClientProvider>	kcp;
+		FileEncryptionInfo*	encryptionInfo;
+		EVP_CIPHER_CTX*	cipherCtx;
+		const EVP_CIPHER*	cipher;
 		CryptoMethod	method;
 
 		bool	is_init;
-		int32_t		bufSize;
-		int64_t		padding;
-		int64_t		counter;
+		int32_t	bufSize;
+		int64_t	padding;
+		int64_t	counter;
 	};
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/71bb3676/depends/libhdfs3/src/client/KmsClientProvider.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/KmsClientProvider.cpp b/depends/libhdfs3/src/client/KmsClientProvider.cpp
index f1b4628..ac59570 100644
--- a/depends/libhdfs3/src/client/KmsClientProvider.cpp
+++ b/depends/libhdfs3/src/client/KmsClientProvider.cpp
@@ -229,7 +229,7 @@ void KmsClientProvider::createKey(const std::string &keyName, const
std::string
     hc->setExpectedResponseCode(201);
     std::string response = hc->post();
 
-    LOG(INFO,
+    LOG(DEBUG3,
             "KmsClientProvider::createKey : The key name, key cipher, key length, key material,
description are : %s, %s, %d, %s, %s. The kms url is : %s . The kms body is : %s. The response
of kms server is : %s .",
             keyName.c_str(), cipher.c_str(), length, material.c_str(),
             description.c_str(), url.c_str(), body.c_str(), response.c_str());
@@ -254,7 +254,7 @@ ptree KmsClientProvider::getKeyMetadata(const FileEncryptionInfo &encryptionInfo
     hc->setRequestTimeout(conf->getCurlTimeOut());
     std::string response = hc->get();
 
-    LOG(INFO,
+    LOG(DEBUG3,
             "KmsClientProvider::getKeyMetadata : The kms url is : %s. The response of kms
server is : %s .",
             url.c_str(), response.c_str());
 
@@ -279,7 +279,7 @@ void KmsClientProvider::deleteKey(const FileEncryptionInfo &encryptionInfo)
     hc->setRequestTimeout(conf->getCurlTimeOut());
     std::string response = hc->del();
 
-    LOG(INFO,
+    LOG(DEBUG3,
             "KmsClientProvider::deleteKey : The kms url is : %s. The response of kms server
is : %s .",
             url.c_str(), response.c_str());
 }
@@ -315,7 +315,7 @@ ptree KmsClientProvider::decryptEncryptedKey(const FileEncryptionInfo
&encryptio
     hc->setRequestTimeout(conf->getCurlTimeOut());
     std::string response = hc->post();
 
-    LOG(INFO,
+    LOG(DEBUG3,
             "KmsClientProvider::decryptEncryptedKey : The kms url is : %s . The kms body
is : %s. The response of kms server is : %s .",
             url.c_str(), body.c_str(), response.c_str());
     return fromJson(response);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/71bb3676/depends/libhdfs3/test/function/TestCInterface.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestCInterface.cpp b/depends/libhdfs3/test/function/TestCInterface.cpp
index 40f6a1b..ce0731b 100644
--- a/depends/libhdfs3/test/function/TestCInterface.cpp
+++ b/depends/libhdfs3/test/function/TestCInterface.cpp
@@ -47,7 +47,7 @@ using namespace Hdfs::Internal;
 
 #define BASE_DIR TEST_HDFS_PREFIX"/testCInterface/"
 #define MAXDATABUFF 1024
-#define MD5LENTH 16
+#define MD5LENTH 33
 
 using namespace std;
 using Hdfs::CheckBuffer;
@@ -136,8 +136,8 @@ static void bufferMD5(const char* strFilePath, int size, char* result)
{
 
 static void diff_file2buffer(const char *file_path, const char *buf) {
     std::cout << "diff file: " << file_path << std::endl;
-    char resultFile[33] = { 0 };
-    char resultBuffer[33] = { 0 };
+    char resultFile[MD5LENTH] = { 0 };
+    char resultBuffer[MD5LENTH] = { 0 };
 
     fileMD5(file_path, resultFile);
     std::cout << "resultFile is " << resultFile << std::endl;
@@ -266,23 +266,25 @@ TEST(TestCInterfaceTDE, DISABLED_TestCreateEnRPC_Success) {
     hdfsBuilderSetNameNode(bld, "default");
     fs = hdfsBuilderConnect(bld);
     ASSERT_TRUE(fs != NULL);
-    system("hadoop fs -rmr /TDE");
-    system("hadoop key create keytde");
-    system("hadoop fs -mkdir /TDE");
-    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDE", "keytde"));
-    enInfo = hdfsGetEZForPath(fs, "/TDE");
+    //Test TDE API.
+    system("hadoop fs -rmr /TDEEnRPC");
+    system("hadoop key create keytderpc");
+    system("hadoop fs -mkdir /TDEEnRPC");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEEnRPC", "keytderpc"));
+    enInfo = hdfsGetEZForPath(fs, "/TDEEnRPC");
     ASSERT_TRUE(enInfo != NULL);
-    EXPECT_TRUE(enInfo->mKeyName != NULL);
+    ASSERT_STREQ("keytderpc", enInfo->mKeyName);
     std::cout << "----hdfsEncryptionZoneInfo----:" << " KeyName : " <<
enInfo->mKeyName << " Suite : " << enInfo->mSuite << " CryptoProtocolVersion
: " << enInfo->mCryptoProtocolVersion << " Id : " << enInfo->mId <<
" Path : " << enInfo->mPath << std::endl;
     hdfsFreeEncryptionZoneInfo(enInfo, 1);
-    for (int i = 0; i <= 10; i++){
+    //Test create multiple encryption zones.
+    for (int i = 0; i < 10; i++){
         std::stringstream newstr;
         newstr << i;
-        std::string tde = "/TDE" + newstr.str();
-        std::string key = "keytde" + newstr.str();
-        std::string rmTde = "hadoop fs -rmr /TDE" + newstr.str();
-        std::string tdeKey = "hadoop key create keytde" + newstr.str();
-        std::string mkTde = "hadoop fs -mkdir /TDE" + newstr.str();
+        std::string tde = "/TDEEnRPC" + newstr.str();
+        std::string key = "keytderpc" + newstr.str();
+        std::string rmTde = "hadoop fs -rmr /TDEEnRPC" + newstr.str();
+        std::string tdeKey = "hadoop key create keytderpc" + newstr.str();
+        std::string mkTde = "hadoop fs -mkdir /TDEEnRPC" + newstr.str();
         system(rmTde.c_str());
         system(tdeKey.c_str());
         system(mkTde.c_str());
@@ -295,9 +297,8 @@ TEST(TestCInterfaceTDE, DISABLED_TestCreateEnRPC_Success) {
     hdfsFreeBuilder(bld);
 }
 
-TEST(TestCInterfaceTDE, TestAppendWithTDE_Success) {
+TEST(TestCInterfaceTDE, TestOpenCreateWithTDE_Success) {
     hdfsFS fs = NULL;
-    hdfsEncryptionZoneInfo * enInfo = NULL;
     setenv("LIBHDFS3_CONF", "function-test.xml", 1);
     struct hdfsBuilder * bld = hdfsNewBuilder();
     assert(bld != NULL);
@@ -305,61 +306,139 @@ TEST(TestCInterfaceTDE, TestAppendWithTDE_Success) {
     fs = hdfsBuilderConnect(bld);
     hdfsBuilderSetUserName(bld, HDFS_SUPERUSER);
     ASSERT_TRUE(fs != NULL);
-    system("hadoop fs -rmr /TDE");
-    system("hadoop key delete keytde4append -f");
-    system("hadoop key create keytde4append");
-    system("hadoop fs -mkdir /TDE");
-    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDE", "keytde4append"));
-    enInfo = hdfsGetEZForPath(fs, "/TDE");
-    ASSERT_TRUE(enInfo != NULL);
-    EXPECT_TRUE(enInfo->mKeyName != NULL);
-    hdfsFreeEncryptionZoneInfo(enInfo, 1);
-    const char *tdefile = "/TDE/testfile";
-    ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Create encryption zone for test.
+    system("hadoop fs -rmr /TDEOpen");
+    system("hadoop key create keytde4open");
+    system("hadoop fs -mkdir /TDEOpen");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEOpen", "keytde4open"));
+    //Create tdefile under the encryption zone for TDE to write.
+    const char *tdefile = "/TDEOpen/testfile";;
+    //Write buffer to tdefile.
+    const char *buffer = "test tde open file with create flag success";
+    hdfsFile out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_CREAT, 0, 0, 0);
+    ASSERT_TRUE(out != NULL)<< hdfsGetLastError();
+    EXPECT_EQ(strlen(buffer), hdfsWrite(fs, out, (const void *)buffer, strlen(buffer)))
+            << hdfsGetLastError();
+    hdfsCloseFile(fs, out);
+    //Read buffer from tdefile with hadoop API.
+    FILE *file = popen("hadoop fs -cat /TDEOpen/testfile", "r");
+    char bufGets[128];
+    while (fgets(bufGets, sizeof(bufGets), file)) {
+    }
+    pclose(file);
+    //Check the buffer is eaqual to the data reading from tdefile.
+    ASSERT_STREQ(bufGets, buffer);
+    system("hadoop fs -rmr /TDEOpen");
+    system("hadoop key delete keytde4open -f");
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
 
-    const char *buffer = "hello world";
+TEST(TestCInterfaceTDE, TestAppendOnceWithTDE_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    hdfsBuilderSetUserName(bld, HDFS_SUPERUSER);
+    ASSERT_TRUE(fs != NULL);
+    //Create encryption zone for test.
+    system("hadoop fs -rmr /TDEAppend1");
+    //system("hadoop key delete keytde4append1 -f");
+    system("hadoop key create keytde4append1");
+    system("hadoop fs -mkdir /TDEAppend1");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEAppend1", "keytde4append1"));
+    //Create tdefile under the encryption zone for TDE to write.
+    const char *tdefile = "/TDEAppend1/testfile";
+    ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Write buffer to tdefile.
+    const char *buffer = "test tde append once success";
     hdfsFile out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 0);
     ASSERT_TRUE(out != NULL)<< hdfsGetLastError();
     EXPECT_EQ(strlen(buffer), hdfsWrite(fs, out, (const void *)buffer, strlen(buffer)))
             << hdfsGetLastError();
     hdfsCloseFile(fs, out);
-    FILE *file = popen("hadoop fs -cat /TDE/testfile", "r");
+    //Read buffer from tdefile with hadoop API.
+    FILE *file = popen("hadoop fs -cat /TDEAppend1/testfile", "r");
     char bufGets[128];
     while (fgets(bufGets, sizeof(bufGets), file)) {
     }
     pclose(file);
+    //Check the buffer is eaqual to the data reading from tdefile.
     ASSERT_STREQ(bufGets, buffer);
-    system("hadoop fs -rmr /TDE");
-    system("hadoop key delete keytde4append -f");
+    system("hadoop fs -rmr /TDEAppend1");
+    system("hadoop key delete keytde4append1 -f");
     ASSERT_EQ(hdfsDisconnect(fs), 0);
     hdfsFreeBuilder(bld);
 }
 
-TEST(TestCInterfaceTDE, TestAppendWithTDELargeFiles_Success) {
+TEST(TestCInterfaceTDE, TestMultipleAppendReopenfileWithTDE_Success) {
     hdfsFS fs = NULL;
-    hdfsEncryptionZoneInfo * enInfo = NULL;
     setenv("LIBHDFS3_CONF", "function-test.xml", 1);
     struct hdfsBuilder * bld = hdfsNewBuilder();
     assert(bld != NULL);
     hdfsBuilderSetNameNode(bld, "default");
     fs = hdfsBuilderConnect(bld);
+    hdfsBuilderSetUserName(bld, HDFS_SUPERUSER);
     ASSERT_TRUE(fs != NULL);
-
-    //creake key and encryption zone
-    system("hadoop fs -rmr /TDE");
-    system("hadoop key delete keytde4append -f");
-    system("hadoop key create keytde4append");
-    system("hadoop fs -mkdir /TDE");
-    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDE", "keytde4append"));
-    enInfo = hdfsGetEZForPath(fs, "/TDE");
-    ASSERT_TRUE(enInfo != NULL);
-    EXPECT_TRUE(enInfo->mKeyName != NULL);
-    hdfsFreeEncryptionZoneInfo(enInfo, 1);
-    const char *tdefile = "/TDE/testfile";
+    //Create encryption zone for test.
+    system("hadoop fs -rmr /TDEAppend2");
+    system("hadoop key delete keytde4append2 -f");
+    system("hadoop key create keytde4append2");
+    system("hadoop fs -mkdir /TDEAppend2");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEAppend2", "keytde4append2"));
+    //Create tdefile under the encryption zone for TDE to write.
+    const char *tdefile = "/TDEAppend2/testfile";
     ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Write buffer to tdefile.
+    std::string buffer1 = "test tde multiple append";
+    std::string buffer2 = "with reopen file success";
+    std::string buffer = buffer1 + buffer2;
+    hdfsFile out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 0);
+    ASSERT_TRUE(out != NULL)<< hdfsGetLastError();
+    EXPECT_EQ(buffer1.length(), hdfsWrite(fs, out, (const void *)buffer1.c_str(), buffer1.length()))
+            << hdfsGetLastError();
+    hdfsCloseFile(fs, out);
+    //Reopen tdefile to append buffer.
+    out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 0);
+    EXPECT_EQ(buffer2.length(), hdfsWrite(fs, out, (const void *)buffer2.c_str(), buffer2.length()))
<< hdfsGetLastError();
+    hdfsCloseFile(fs, out);
+    //Read buffer from tdefile with hadoop API.
+    FILE *file = popen("hadoop fs -cat /TDEAppend2/testfile", "r");
+    char bufGets[128];
+    while (fgets(bufGets, sizeof(bufGets), file)) {
+    }
+    pclose(file);
+    //Check the buffer is eaqual to the data reading from tdefile.
+    ASSERT_STREQ(bufGets, buffer.c_str());
+    system("hadoop fs -rmr /TDEAppend2");
+    system("hadoop key delete keytde4append2 -f");
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
 
-    //case1: append
-    int size = 1024 * 32;
+
+TEST(TestCInterfaceTDE, TestMultipleAppendfileWithTDE_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    hdfsBuilderSetUserName(bld, HDFS_SUPERUSER);
+    ASSERT_TRUE(fs != NULL);
+    //Create encryption zone for test.
+    system("hadoop fs -rmr /TDEAppend3");
+    system("hadoop key delete keytde4append3 -f");
+    system("hadoop key create keytde4append3");
+    system("hadoop fs -mkdir /TDEAppend3");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEAppend3", "keytde4append3"));
+    //Create tdefile under the encryption zone for TDE to write.
+    const char *tdefile = "/TDEAppend3/testfile";
+    ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Write buffer to tdefile with multiple append.
+    int size = 3 * 128;
     size_t offset = 0;
     hdfsFile out;
     int64_t todo = size;
@@ -369,10 +448,9 @@ TEST(TestCInterfaceTDE, TestAppendWithTDELargeFiles_Success) {
         if (NULL == (out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 1024))) {
             break;
         }
-        Hdfs::FillBuffer(&buffer[0], buffer.size(), 1024);
-        buffer.push_back(0);
+        Hdfs::FillBuffer(&buffer[0], 128 * 3, 1024);
         while (todo > 0) {
-            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], todo))) {
+            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], 128))) {
                 break;
             }
             todo -= rc;
@@ -380,18 +458,144 @@ TEST(TestCInterfaceTDE, TestAppendWithTDELargeFiles_Success) {
         }
         rc = hdfsCloseFile(fs, out);
     } while (0);
+
+    //Read buffer from tdefile with hadoop API.
+    FILE *file = popen("hadoop fs -cat /TDEAppend3/testfile", "r");
+    char bufGets[128];
+    while (fgets(bufGets, sizeof(bufGets), file)) {
+    }
+    pclose(file);
+    //Check the buffer's md5 value is eaqual to the tdefile's md5 value.
     system("rm -rf ./testfile");
-    system("hadoop fs -get /TDE/testfile ./");
-    diff_file2buffer("testfile", &buffer[0]);
+    system("hadoop fs -get /TDEAppend3/testfile ./");
+    char resultFile[MD5LENTH] = { 0 };
+    fileMD5("./testfile", resultFile);
+    char resultBuffer[MD5LENTH] = { 0 };
+    bufferMD5(&buffer[0], size, resultBuffer);
+    ASSERT_STREQ(resultFile, resultBuffer);
     system("rm ./testfile");
+    system("hadoop fs -rmr /TDEAppend3");
+    system("hadoop key delete keytde4append3 -f");
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
 
-    //case5: a large file (> 64M) TODO
-    system("hadoop fs -rmr /TDE");
-    system("hadoop key delete keytde4append -f");
+
+TEST(TestCInterfaceTDE, TestAppendWithTDEMultipleChunks_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    ASSERT_TRUE(fs != NULL);
+    //creake key and encryption zone
+    system("hadoop fs -rmr /TDEAppend4");
+    system("hadoop key delete keytde4append4 -f");
+    system("hadoop key create keytde4append4");
+    system("hadoop fs -mkdir /TDEAppend4");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEAppend4", "keytde4append4"));
+    const char *tdefile = "/TDEAppend4/testfile";
+    ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Write buffer to tdefile.
+    int size = 1024;
+    size_t offset = 0;
+    hdfsFile out;
+    int64_t todo = size;
+	int64_t batch;
+    std::vector<char> buffer(size);
+    int rc = -1;
+    do {
+        if (NULL == (out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 1024))) {
+            break;
+        }
+        while (todo > 0) {
+            batch = todo < static_cast<int32_t>(buffer.size()) ?
+                    todo : buffer.size();
+
+            Hdfs::FillBuffer(&buffer[0], batch, offset);
+
+            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], batch))) {
+                break;
+            }
+            LOG(INFO, "todo is %d. offset is %d", todo, offset);
+            todo -= rc;
+            offset += rc;
+        }
+        rc = hdfsCloseFile(fs, out);
+    } while (0);
+    //Check the testfile's md5 value is equal to buffer's md5 value.
+    system("rm -rf ./testfile");
+    system("hadoop fs -get /TDEAppend4/testfile ./");
+    char resultFile[MD5LENTH] = { 0 };
+    fileMD5("./testfile", resultFile);
+    char resultBuffer[MD5LENTH] = { 0 };
+    bufferMD5(&buffer[0], size, resultBuffer);
+    ASSERT_STREQ(resultFile, resultBuffer);
+    system("rm ./testfile");
+    system("hadoop fs -rmr /TDEAppend4");
+    system("hadoop key delete keytde4append4 -f");
     ASSERT_EQ(hdfsDisconnect(fs), 0);
     hdfsFreeBuilder(bld);
 }
 
+TEST(TestCInterfaceTDE, TestAppendWithTDEMultipleBlocks_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    ASSERT_TRUE(fs != NULL);
+    //creake key and encryption zone
+    system("hadoop fs -rmr /TDEAppend5");
+    system("hadoop key delete keytde4append5 -f");
+    system("hadoop key create keytde4append5");
+    system("hadoop fs -mkdir /TDEAppend5");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDEAppend5", "keytde4append5"));
+    const char *tdefile = "/TDEAppend5/testfile";
+    ASSERT_TRUE(CreateFile(fs, tdefile, 0, 0));
+    //Write buffer to tdefile.
+    int size = 256 * 1024 * 1024;
+    size_t offset = 0;
+    hdfsFile out;
+    int64_t todo = size;
+    int64_t batch;
+    std::vector<char> buffer(size);
+    int rc = -1;
+    do {
+        if (NULL == (out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 0, 0, 1024))) {
+            break;
+        }
+        while (todo > 0) {
+            batch = todo < static_cast<int32_t>(buffer.size()) ?
+                    todo : buffer.size();
+
+            Hdfs::FillBuffer(&buffer[0], batch, offset);
+
+            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], batch))) {
+                break;
+            }
+            LOG(INFO, "todo is %d. offset is %d", todo, offset);
+            todo -= rc;
+            offset += rc;
+        }
+        rc = hdfsCloseFile(fs, out);
+    } while (0);
+    //Check the testfile's md5 value is equal to buffer's md5 value.
+    system("rm -rf ./testfile");
+    system("hadoop fs -get /TDEAppend5/testfile ./");
+    char resultFile[MD5LENTH] = { 0 };
+    fileMD5("./testfile", resultFile);
+    char resultBuffer[MD5LENTH] = { 0 };
+    bufferMD5(&buffer[0], size, resultBuffer);
+    ASSERT_STREQ(resultFile, resultBuffer);
+    system("rm ./testfile");
+    system("hadoop fs -rmr /TDEAppend5");
+    system("hadoop key delete keytde4append5 -f");
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
 
 TEST(TestCInterfaceTDE, TestAppendMultiTimes_Success) {
     hdfsFS fs = NULL;
@@ -442,6 +646,7 @@ TEST(TestCInterfaceTDE, TestAppendMultiTimes_Success) {
     hdfsCloseFile(fs, out);
     system("rm ./testfile3");
     system("hadoop fs -get /TDE/testfile3 ./");
+
     diff_file2buffer("testfile3", out_data3);
 
 
@@ -476,7 +681,6 @@ TEST(TestCInterfaceTDE, TestAppendMultiTimes_Success) {
         offset += rc;
     }
 
-
     ASSERT_EQ(data_size-1, offset);
 
     hdfsCloseFile(fs, out);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/71bb3676/depends/libhdfs3/test/function/TestKmsClient.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestKmsClient.cpp b/depends/libhdfs3/test/function/TestKmsClient.cpp
index 21280de..0295866 100644
--- a/depends/libhdfs3/test/function/TestKmsClient.cpp
+++ b/depends/libhdfs3/test/function/TestKmsClient.cpp
@@ -76,38 +76,6 @@ protected:
     shared_ptr<FileSystem> fs;
 };
 
-static bool CreateFile(hdfsFS fs, const char * path, int64_t blockSize, int64_t fileSize)
{
-    hdfsFile out;
-    size_t offset = 0;
-    int64_t todo = fileSize, batch;
-    std::vector<char> buffer(32 * 1024);
-    int rc = -1;
-
-    do {
-        if (NULL == (out = hdfsOpenFile(fs, path, O_WRONLY, 0, 0, blockSize))) {
-            break;
-        }
-
-        while (todo > 0) {
-            batch = todo < static_cast<int32_t>(buffer.size()) ?
-                    todo : buffer.size();
-            Hdfs::FillBuffer(&buffer[0], batch, offset);
-
-            if (0 > (rc = hdfsWrite(fs, out, &buffer[0], batch))) {
-                break;
-            }
-
-            todo -= rc;
-            offset += rc;
-        }
-
-        rc = hdfsCloseFile(fs, out);
-    } while (0);
-
-    return rc >= 0;
-}
-
-
 TEST_F(TestKmsClient, CreateKeySuccess) {
     std::string keyName = "testcreatekeyname";
     std::string cipher = "AES/CTR/NoPadding";
@@ -160,14 +128,16 @@ TEST_F(TestKmsClient, DecryptEncryptedKeySuccess) {
     //create encryption zone and encrypted file
     ASSERT_EQ(0,
             hdfsCreateEncryptionZone(hfs, BASE_DIR"/testDEKey", "testdekeyname"));
-    const char * tdeFile = BASE_DIR"/testDEKey/tdefile";
-    ASSERT_TRUE(CreateFile(hfs, tdeFile, 0, 0));
+    std::string hadoop_command = "hadoop fs -touchz ";
+    std::string tdeFile = BASE_DIR"/testDEKey/tdefile";
+    std::string createFile = hadoop_command + tdeFile;
+    std::system(createFile.c_str());
 
     //decrypt encrypted key
     hc.reset(new HttpClient());
     kcp.reset(new KmsClientProvider(auth, sconf));
     kcp->setHttpClient(hc);
-    FileStatus fileStatus = fs->getFileStatus(tdeFile);
+    FileStatus fileStatus = fs->getFileStatus(tdeFile.c_str());
     FileEncryptionInfo *enInfo = fileStatus.getFileEncryption();
     ptree map = kcp->decryptEncryptedKey(*enInfo);
     std::string versionName = map.get < std::string > ("versionName");


Mime
View raw message