hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject [2/2] incubator-hawq git commit: HAWQ-1509. Add TDE read test cases.
Date Tue, 08 Aug 2017 03:54:32 GMT
HAWQ-1509. Add TDE read test cases.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/3bdc1946
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/3bdc1946
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/3bdc1946

Branch: refs/heads/master
Commit: 3bdc1946408c4d3ddd5176273698c466436a8add
Parents: 90f1c4d
Author: interma <interma@outlook.com>
Authored: Mon Aug 7 13:44:45 2017 +0800
Committer: Wen Lin <wlin@pivotal.io>
Committed: Tue Aug 8 11:52:53 2017 +0800

----------------------------------------------------------------------
 depends/libhdfs3/src/client/CryptoCodec.cpp     |  10 +-
 depends/libhdfs3/src/client/CryptoCodec.h       |   2 +-
 depends/libhdfs3/src/client/InputStreamImpl.cpp |   6 +-
 .../libhdfs3/test/function/TestCInterface.cpp   | 223 +++++++++++++++++++
 4 files changed, 235 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3bdc1946/depends/libhdfs3/src/client/CryptoCodec.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/CryptoCodec.cpp b/depends/libhdfs3/src/client/CryptoCodec.cpp
index 417f51e..bd4443f 100644
--- a/depends/libhdfs3/src/client/CryptoCodec.cpp
+++ b/depends/libhdfs3/src/client/CryptoCodec.cpp
@@ -119,16 +119,22 @@ namespace Hdfs {
 			return -1;
 		}
 
+		is_init = true;
 		// Calculate iv and counter in order to init cipher context with cipher method. Default
value is 0.
-		resetStreamOffset(crypto_method, stream_offset);
+		if ((resetStreamOffset(crypto_method, stream_offset)) < 0) {
+			is_init = false;
+			return -1;
+		}
 
 		LOG(DEBUG3, "CryptoCodec init success, length of the decrypted key is : %llu, crypto method
is : %d", AlgorithmBlockSize, crypto_method);
-		is_init = true;
 		return 1;
 
 	}
 
 	int CryptoCodec::resetStreamOffset(CryptoMethod crypto_method, int64_t stream_offset) {
+		// Check CryptoCodec init or not.
+		if (is_init == false)
+			return -1;
 		// Calculate new IV when appending an existed file.
 		std::string iv = encryptionInfo->getIv();
 		if (stream_offset > 0) {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3bdc1946/depends/libhdfs3/src/client/CryptoCodec.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/CryptoCodec.h b/depends/libhdfs3/src/client/CryptoCodec.h
index ee68637..f5070fe 100644
--- a/depends/libhdfs3/src/client/CryptoCodec.h
+++ b/depends/libhdfs3/src/client/CryptoCodec.h
@@ -75,7 +75,7 @@ namespace Hdfs {
 		 * Reset iv and padding value when seek file.
 		 * @param crypto_method do encrypt/decrypt work according to crypto_method.
 		 * @param stream_offset the offset of the current file.
-		 * @return 1 sucess; 0 already initialized; -1 failed.
+		 * @return 1 sucess; -1 failed.
 		 */
 		virtual int resetStreamOffset(CryptoMethod crypto_method, int64_t stream_offset);
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3bdc1946/depends/libhdfs3/src/client/InputStreamImpl.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/InputStreamImpl.cpp b/depends/libhdfs3/src/client/InputStreamImpl.cpp
index c899a83..f2ed5de 100644
--- a/depends/libhdfs3/src/client/InputStreamImpl.cpp
+++ b/depends/libhdfs3/src/client/InputStreamImpl.cpp
@@ -648,8 +648,8 @@ int32_t InputStreamImpl::readInternal(char * buf, int32_t size) {
             std::string bufDecode;
             if (fileStatus.isFileEncrypted()) {
                 /* Decrypt buffer if the file is encrypted. */
-                bufDecode = cryptoCodec->cipher_wrap(buf, size);
-                memcpy(buf, bufDecode.c_str(), size);
+                bufDecode = cryptoCodec->cipher_wrap(buf, retval);
+                memcpy(buf, bufDecode.c_str(), retval);
             }
 
             return retval;
@@ -766,7 +766,7 @@ void InputStreamImpl::seekInternal(int64_t pos) {
                 int ret = cryptoCodec->resetStreamOffset(CryptoMethod::DECRYPT,
                         cursor);
                 if (ret < 0) {
-                    THROW(HdfsIOException, "init CryptoCodec failed, file:%s",
+                    THROW(HdfsIOException, "Reset offset failed, file:%s",
                             this->path.c_str());
                 }
             }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3bdc1946/depends/libhdfs3/test/function/TestCInterface.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestCInterface.cpp b/depends/libhdfs3/test/function/TestCInterface.cpp
index ce0731b..bca7884 100644
--- a/depends/libhdfs3/test/function/TestCInterface.cpp
+++ b/depends/libhdfs3/test/function/TestCInterface.cpp
@@ -334,6 +334,7 @@ TEST(TestCInterfaceTDE, TestOpenCreateWithTDE_Success) {
     hdfsFreeBuilder(bld);
 }
 
+
 TEST(TestCInterfaceTDE, TestAppendOnceWithTDE_Success) {
     hdfsFS fs = NULL;
     setenv("LIBHDFS3_CONF", "function-test.xml", 1);
@@ -2107,3 +2108,225 @@ TEST_F(TestCInterface, TestConcurrentWrite_Failure) {
     int retval = hdfsCloseFile(fs, fout1);
     ASSERT_TRUE(retval == 0);
 }
+
+/*all TDE read cases*/
+
+//helper function
+static void generate_file(const char *file_path, int file_size) {
+    char buffer[1024];
+    Hdfs::FillBuffer(buffer, sizeof(buffer), 0);
+
+    int todo = file_size;
+    FILE *f = fopen(file_path, "w");
+    assert(f != NULL);
+    while (todo > 0) {
+        int batch = file_size;
+        if (batch > sizeof(buffer))
+            batch = sizeof(buffer);
+        int rc = fwrite(buffer, 1, batch, f);
+        //assert(rc == batch);
+        todo -= rc;
+    }
+    fclose(f);
+}
+
+int diff_buf2filecontents(const char *file_path, const char *buf, int offset,
+        int len) {
+    char *local_buf = (char *) malloc(len);
+
+    FILE *f = fopen(file_path, "r");
+    assert(f != NULL);
+    fseek(f, offset, SEEK_SET);
+
+    int todo = len;
+    int off = 0;
+    while (todo > 0) {
+        int rc = fread(local_buf + off, 1, todo, f);
+        todo -= rc;
+        off += rc;
+    }
+    fclose(f);
+
+    int ret = strncmp(buf, local_buf, len);
+    free(local_buf);
+    return ret;
+}
+
+TEST(TestCInterfaceTDE, TestReadWithTDE_Basic_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    ASSERT_TRUE(fs != NULL);
+
+    //create a normal file
+    char cmd[128];
+    const char *file_name = "tde_read_file";
+    int file_size = 1024;
+    generate_file(file_name, file_size);
+
+    //put file to TDE encryption zone
+    system("hadoop fs -rmr /TDEBasicRead");
+    system("hadoop key create keytde4basicread");
+    system("hadoop fs -mkdir /TDEBasicRead");
+    ASSERT_EQ(0,
+            hdfsCreateEncryptionZone(fs, "/TDEBasicRead", "keytde4basicread"));
+    sprintf(cmd, "hdfs dfs -put `pwd`/%s /TDEBasicRead/", file_name);
+    system(cmd);
+
+    int offset = 0;
+    int rc = 0;
+    char buf[1024];
+    int to_read = 5;
+    char file_path[128];
+    sprintf(file_path, "/TDEBasicRead/%s", file_name);
+    hdfsFile fin = hdfsOpenFile(fs, file_path, O_RDONLY, 0, 0, 0);
+
+    //case1: read from beginning
+    offset = 0;
+    rc = hdfsRead(fs, fin, buf, to_read);
+    ASSERT_GT(rc, 0);
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //case2: read after seek
+    offset = 123;
+    hdfsSeek(fs, fin, offset);
+    rc = hdfsRead(fs, fin, buf, to_read);
+    ASSERT_GT(rc, 0);
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //case3: multi read
+    offset = 456;
+    hdfsSeek(fs, fin, offset);
+    rc = hdfsRead(fs, fin, buf, to_read);
+    ASSERT_GT(rc, 0);
+    int rc2 = hdfsRead(fs, fin, buf + rc, to_read);
+    ASSERT_GT(rc2, 0);
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc + rc2) == 0);
+    //clean up
+    int retval = hdfsCloseFile(fs, fin);
+    ASSERT_TRUE(retval == 0);
+    system("hadoop fs -rmr /TDEBasicRead");
+    system("hadoop key delete keytde4basicread -f");
+}
+
+TEST(TestCInterfaceTDE, TestReadWithTDE_Advanced_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    ASSERT_TRUE(fs != NULL);
+
+    //create a big file
+    char cmd[128];
+    const char *file_name = "tde_read_bigfile";
+    int file_size = 150 * 1024 * 1024; //150M
+    generate_file(file_name, file_size);
+
+    //put file to TDE encryption zone
+    system("hadoop fs -rmr /TDEAdvancedRead");
+    system("hadoop key create keytde4advancedread");
+    system("hadoop fs -mkdir /TDEAdvancedRead");
+    ASSERT_EQ(0,
+            hdfsCreateEncryptionZone(fs, "/TDEAdvancedRead",
+                    "keytde4advancedread"));
+    sprintf(cmd, "hdfs dfs -put `pwd`/%s /TDEAdvancedRead/", file_name);
+    system(cmd);
+
+    int offset = 0;
+    int rc = 0;
+    char *buf = (char *) malloc(8 * 1024 * 1024); //8M
+    int to_read = 5;
+    char file_path[128];
+    sprintf(file_path, "/TDEAdvancedRead/%s", file_name);
+    hdfsFile fin = hdfsOpenFile(fs, file_path, O_RDONLY, 0, 0, 0);
+    //case4: skip block size(128M) read
+    offset = 128 * 1024 * 1024 + 12345;
+    hdfsSeek(fs, fin, offset);
+    rc = hdfsRead(fs, fin, buf, to_read);
+
+    ASSERT_GT(rc, 0);
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //case5: skip package size(64k) read
+    offset = 64 * 1024 * 2 + 1234;
+    hdfsSeek(fs, fin, offset);
+    rc = hdfsRead(fs, fin, buf, to_read);
+    ASSERT_GT(rc, 0);
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //case6: read block intervals
+    offset = 128 * 1024 * 1024 - 123;
+    to_read = 128;
+    hdfsSeek(fs, fin, offset);
+    rc = hdfsRead(fs, fin, buf, to_read);
+    ASSERT_TRUE(rc == 123); //only in remote read
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //case7: read more bytes
+    offset = 5678;
+    to_read = 5 * 1024 * 1024 + 4567; //5M
+    int off = 0;
+    hdfsSeek(fs, fin, offset);
+    while (to_read > 0) {
+        rc = hdfsRead(fs, fin, buf + off, to_read);
+        ASSERT_GT(rc, 0);
+        std::cout << "loop read bytes:" << rc << std::endl;
+        to_read -= rc;
+        off += rc;
+    }
+    ASSERT_TRUE(diff_buf2filecontents(file_name, buf, offset, rc) == 0);
+
+    //clean up
+    int retval = hdfsCloseFile(fs, fin);
+    ASSERT_TRUE(retval == 0);
+    system("hadoop fs -rmr /TDEAdvancedRead");
+    system("hadoop key delete keytde4advancedread -f");
+    free(buf);
+}
+
+TEST(TestCInterfaceTDE, TestWriteReadWithTDE_Success) {
+    hdfsFS fs = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    hdfsBuilderSetUserName(bld, HDFS_SUPERUSER);
+    ASSERT_TRUE(fs != NULL);
+    //Create encryption zone for test.
+    system("hadoop fs -rmr /TDE");
+    system("hadoop key create keytde");
+    system("hadoop fs -mkdir /TDE");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDE", "keytde"));
+    //Create tdefile under the encryption zone for TDE to write.
+    const char *tdefile = "/TDE/testfile";
+    //Write buffer to tdefile.
+    const char *buffer = "test tde write and read function success";
+    hdfsFile out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_CREAT, 0, 0, 0);
+    ASSERT_TRUE(out != NULL)<< hdfsGetLastError();
+    EXPECT_EQ(strlen(buffer), hdfsWrite(fs, out, (const void *)buffer, strlen(buffer)))
+            << hdfsGetLastError();
+    hdfsCloseFile(fs, out);
+    //Read buffer from tdefile with TDE read function.
+    int offset = 0;
+    int rc = 0;
+    char buf[1024];
+    hdfsFile fin = hdfsOpenFile(fs, tdefile, O_RDONLY, 0, 0, 0);
+    rc = hdfsRead(fs, fin, buf, strlen(buffer));
+    buf[strlen(buffer)] = '\0';
+    ASSERT_GT(rc, 0);
+    //Check the buffer is eaqual to the data reading from tdefile.
+    ASSERT_STREQ(buffer, buf);
+    int retval = hdfsCloseFile(fs, fin);
+    ASSERT_TRUE(retval == 0);
+    system("hadoop fs -rmr /TDE");
+    system("hadoop key delete keytde -f");
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
+


Mime
View raw message