hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [3/4] hadoop git commit: HDFS-9253. Refactor tests of libhdfs into a directory. Contributed by Haohui Mai.
Date Fri, 16 Oct 2015 18:22:15 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
new file mode 100644
index 0000000..032acbf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
@@ -0,0 +1,360 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs/hdfs.h"
+#include "native_mini_dfs.h"
+#include "os/thread.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
+#define TLH_MAX_THREADS 100
+
+#define TLH_DEFAULT_BLOCK_SIZE 134217728
+
+static struct NativeMiniDfsCluster* tlhCluster;
+
+struct tlhThreadInfo {
+    /** Thread index */
+    int threadIdx;
+    /** 0 = thread was successful; error code otherwise */
+    int success;
+    /** thread identifier */
+    thread theThread;
+};
+
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
+                                     const char *username)
+{
+    int ret;
+    tPort port;
+    hdfsFS hdfs;
+    struct hdfsBuilder *bld;
+    
+    port = (tPort)nmdGetNameNodePort(cl);
+    if (port < 0) {
+        fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
+                "returned error %d\n", port);
+        return port;
+    }
+    bld = hdfsNewBuilder();
+    if (!bld)
+        return -ENOMEM;
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    hdfsBuilderConfSetStr(bld, "dfs.blocksize",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    if (username) {
+        hdfsBuilderSetUserName(bld, username);
+    }
+    hdfs = hdfsBuilderConnect(bld);
+    if (!hdfs) {
+        ret = -errno;
+        return ret;
+    }
+    *fs = hdfs;
+    return 0;
+}
+
+static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
+{
+    int64_t blockSize;
+    int ret;
+
+    blockSize = hdfsGetDefaultBlockSize(fs);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSize got %"PRId64", but we "
+                "expected %d\n", blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+
+    blockSize = hdfsGetDefaultBlockSizeAtPath(fs, path);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) failed with "
+                "error %d\n", path, ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) got "
+                "%"PRId64", but we expected %d\n", 
+                path, blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+    return 0;
+}
+
+struct tlhPaths {
+    char prefix[256];
+    char file1[256];
+    char file2[256];
+};
+
+static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths)
+{
+    memset(paths, 0, sizeof(*paths));
+    if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d",
+                 ti->threadIdx) >= sizeof(paths->prefix)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file1, sizeof(paths->file1), "%s/file1",
+                 paths->prefix) >= sizeof(paths->file1)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file2, sizeof(paths->file2), "%s/file2",
+                 paths->prefix) >= sizeof(paths->file2)) {
+        return ENAMETOOLONG;
+    }
+    return 0;
+}
+
+static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
+                                const struct tlhPaths *paths)
+{
+    char tmp[4096];
+    hdfsFile file;
+    int ret, expected, numEntries;
+    hdfsFileInfo *fileInfo;
+    struct hdfsReadStatistics *readStats = NULL;
+
+    if (hdfsExists(fs, paths->prefix) == 0) {
+        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
+    }
+    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));
+
+    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
+
+    /* There should be no entry in the directory. */
+    errno = EACCES; // see if errno is set to 0 on success
+    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 0);
+    if (numEntries != 0) {
+        fprintf(stderr, "hdfsListDirectory set numEntries to "
+                "%d on empty directory.", numEntries);
+    }
+
+    /* There should not be any file to open for reading. */
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));
+
+    /* hdfsOpenFile should not accept mode = 3 */
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));
+
+    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+
+    /* TODO: implement writeFully and use it here */
+    expected = (int)strlen(paths->prefix);
+    ret = hdfsWrite(fs, file, paths->prefix, expected);
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
+                "it wrote %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(hdfsFlush(fs, file));
+    EXPECT_ZERO(hdfsHSync(fs, file));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+
+    /* There should be 1 entry in the directory. */
+    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
+    if (numEntries != 1) {
+        fprintf(stderr, "hdfsListDirectory set numEntries to "
+                "%d on directory containing 1 file.", numEntries);
+    }
+
+    /* Let's re-open the file for reading */
+    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    errno = 0;
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    /* TODO: implement readFully and use it here */
+    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
+                "it read %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    errno = 0;
+    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    EXPECT_ZERO(hdfsFileClearReadStatistics(file));
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    EXPECT_UINT64_EQ((uint64_t)0, readStats->totalBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+
+    // TODO: Non-recursive delete should fail?
+    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
+    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+    EXPECT_ZERO(hdfsFileIsEncrypted(fileInfo));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
+    return 0;
+}
+
+static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
+{
+    hdfsFS fs = NULL;
+    struct tlhPaths paths;
+
+    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
+        ti->threadIdx);
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(setupPaths(ti, &paths));
+    // test some operations
+    EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect as user "foo" and verify that we get permission errors
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), EACCES);
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect to do the final delete.
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    return 0;
+}
+
+static void testHdfsOperations(void *v)
+{
+    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
+    int ret = testHdfsOperationsImpl(ti);
+    ti->success = ret;
+}
+
+static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
+{
+    int i, threadsFailed = 0;
+    const char *sep = "";
+
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            threadsFailed = 1;
+        }
+    }
+    if (!threadsFailed) {
+        fprintf(stderr, "testLibHdfs: all threads succeeded.  SUCCESS.\n");
+        return EXIT_SUCCESS;
+    }
+    fprintf(stderr, "testLibHdfs: some threads failed: [");
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            fprintf(stderr, "%s%d", sep, i);
+            sep = ", "; 
+        }
+    }
+    fprintf(stderr, "].  FAILURE.\n");
+    return EXIT_FAILURE;
+}
+
+/**
+ * Test that we can write a file with libhdfs and then read it back
+ */
+int main(void)
+{
+    int i, tlhNumThreads;
+    const char *tlhNumThreadsStr;
+    struct tlhThreadInfo ti[TLH_MAX_THREADS];
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+    };
+
+    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
+    if (!tlhNumThreadsStr) {
+        tlhNumThreadsStr = "3";
+    }
+    tlhNumThreads = atoi(tlhNumThreadsStr);
+    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
+        fprintf(stderr, "testLibHdfs: must have a number of threads "
+                "between 1 and %d inclusive, not %d\n",
+                TLH_MAX_THREADS, tlhNumThreads);
+        return EXIT_FAILURE;
+    }
+    memset(&ti[0], 0, sizeof(ti));
+    for (i = 0; i < tlhNumThreads; i++) {
+        ti[i].threadIdx = i;
+    }
+
+    tlhCluster = nmdCreate(&conf);
+    EXPECT_NONNULL(tlhCluster);
+    EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
+
+    for (i = 0; i < tlhNumThreads; i++) {
+        ti[i].theThread.start = testHdfsOperations;
+        ti[i].theThread.arg = &ti[i];
+        EXPECT_ZERO(threadCreate(&ti[i].theThread));
+    }
+    for (i = 0; i < tlhNumThreads; i++) {
+        EXPECT_ZERO(threadJoin(&ti[i].theThread));
+    }
+
+    EXPECT_ZERO(nmdShutdown(tlhCluster));
+    nmdFree(tlhCluster);
+    return checkFailures(ti, tlhNumThreads);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
new file mode 100644
index 0000000..c55c8e3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs/hdfs.h" 
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *writeFileName = argv[1];
+    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+    tSize bufferSize;
+    hdfsFile writeFile;
+    char* buffer;
+    int i;
+    off_t nrRemaining;
+    tSize curSize;
+    tSize written;
+
+    if (argc != 4) {
+        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
+        exit(-1);
+    }
+    
+    fs = hdfsConnect("default", 0);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+
+    // sanity check
+    if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
+      fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX);
+      exit(-3);
+    }
+
+    // currently libhdfs writes are of tSize which is int32
+    if(tmpBufferSize > INT_MAX) {
+      fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX);
+      exit(-3);
+    }
+
+    bufferSize = (tSize)tmpBufferSize;
+
+    writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+    if (!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
+        exit(-2);
+    }
+
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize);
+    if(buffer == NULL) {
+        fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
+        return -2;
+    }
+    for (i=0; i < bufferSize; ++i) {
+        buffer[i] = 'a' + (i%26);
+    }
+
+    // write to the file
+    for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
+      if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
+        fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
+        exit(-3);
+      }
+    }
+
+    free(buffer);
+    hdfsCloseFile(fs, writeFile);
+    hdfsDisconnect(fs);
+
+    return 0;
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_zerocopy.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_zerocopy.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_zerocopy.c
new file mode 100644
index 0000000..bf529b4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_zerocopy.c
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs/hdfs.h"
+#include "native_mini_dfs.h"
+#include "platform.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
+#define TEST_FILE_NAME_LENGTH 128
+#define TEST_ZEROCOPY_FULL_BLOCK_SIZE 4096
+#define TEST_ZEROCOPY_LAST_BLOCK_SIZE 3215
+#define TEST_ZEROCOPY_NUM_BLOCKS 6
+#define SMALL_READ_LEN 16
+#define TEST_ZEROCOPY_FILE_LEN \
+  (((TEST_ZEROCOPY_NUM_BLOCKS - 1) * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + \
+    TEST_ZEROCOPY_LAST_BLOCK_SIZE)
+
+#define ZC_BUF_LEN 32768
+
+static uint8_t *getZeroCopyBlockData(int blockIdx)
+{
+    uint8_t *buf = malloc(TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    int i;
+    if (!buf) {
+        fprintf(stderr, "malloc(%d) failed\n", TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+        exit(1);
+    }
+    for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
+      buf[i] = (uint8_t)(blockIdx + (i % 17));
+    }
+    return buf;
+}
+
+static int getZeroCopyBlockLen(int blockIdx)
+{
+    if (blockIdx >= TEST_ZEROCOPY_NUM_BLOCKS) {
+        return 0;
+    } else if (blockIdx == (TEST_ZEROCOPY_NUM_BLOCKS - 1)) {
+        return TEST_ZEROCOPY_LAST_BLOCK_SIZE;
+    } else {
+        return TEST_ZEROCOPY_FULL_BLOCK_SIZE;
+    }
+}
+
+static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
+{
+    hdfsFile file = NULL;
+    struct hadoopRzOptions *opts = NULL;
+    struct hadoopRzBuffer *buffer = NULL;
+    uint8_t *block;
+
+    file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+    opts = hadoopRzOptionsAlloc();
+    EXPECT_NONNULL(opts);
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 1));
+    /* haven't read anything yet */
+    EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL));
+    block = getZeroCopyBlockData(0);
+    EXPECT_NONNULL(block);
+    /* first read is half of a block. */
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
+          hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), block,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
+    hadoopRzBufferFree(file, buffer);
+    /* read the next half of the block */
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
+          hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer),
+          block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2),
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
+    hadoopRzBufferFree(file, buffer);
+    free(block);
+    EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, 
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    /* Now let's read just a few bytes. */
+    buffer = hadoopReadZero(file, opts, SMALL_READ_LEN);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(SMALL_READ_LEN, hadoopRzBufferLength(buffer));
+    block = getZeroCopyBlockData(1);
+    EXPECT_NONNULL(block);
+    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
+    hadoopRzBufferFree(file, buffer);
+    EXPECT_INT64_EQ(
+          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+          hdfsTell(fs, file));
+    EXPECT_ZERO(expectFileStats(file,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
+
+    /* Clear 'skip checksums' and test that we can't do zero-copy reads any
+     * more.  Since there is no ByteBufferPool set, we should fail with
+     * EPROTONOSUPPORT.
+     */
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
+    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
+
+    /* Verify that setting a NULL ByteBufferPool class works. */
+    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, NULL));
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
+    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
+
+    /* Now set a ByteBufferPool and try again.  It should succeed this time. */
+    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts,
+          ELASTIC_BYTE_BUFFER_POOL_CLASS));
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(expectFileStats(file,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
+    EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, hadoopRzBufferGet(buffer),
+        TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN));
+    free(block);
+    block = getZeroCopyBlockData(2);
+    EXPECT_NONNULL(block);
+    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
+        (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Check the result of a zero-length read. */
+    buffer = hadoopReadZero(file, opts, 0);
+    EXPECT_NONNULL(buffer);
+    EXPECT_NONNULL(hadoopRzBufferGet(buffer));
+    EXPECT_INT_EQ(0, hadoopRzBufferLength(buffer));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Check the result of reading past EOF */
+    EXPECT_INT_EQ(0, hdfsSeek(fs, file, TEST_ZEROCOPY_FILE_LEN));
+    buffer = hadoopReadZero(file, opts, 1);
+    EXPECT_NONNULL(buffer);
+    EXPECT_NULL(hadoopRzBufferGet(buffer));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Cleanup */
+    free(block);
+    hadoopRzOptionsFree(opts);
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+    return 0;
+}
+
+static int createZeroCopyTestFile(hdfsFS fs, char *testFileName,
+                                  size_t testFileNameLen)
+{
+    int blockIdx, blockLen;
+    hdfsFile file;
+    uint8_t *data;
+
+    snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d",
+             getpid(), rand());
+    file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1,
+                        TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    EXPECT_NONNULL(file);
+    for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) {
+        blockLen = getZeroCopyBlockLen(blockIdx);
+        data = getZeroCopyBlockData(blockIdx);
+        EXPECT_NONNULL(data);
+        EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen));
+    }
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+    return 0;
+}
+
+static int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
+                            struct hdfsBuilder *bld) {
+    int ret;
+    tPort port;
+    const char *domainSocket;
+
+    hdfsBuilderSetNameNode(bld, "localhost");
+    port = (tPort) nmdGetNameNodePort(cl);
+    if (port < 0) {
+      fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
+      return EIO;
+    }
+    hdfsBuilderSetNameNodePort(bld, port);
+
+    domainSocket = hdfsGetDomainSocketPath(cl);
+
+    if (domainSocket) {
+      ret = hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit", "true");
+      if (ret) {
+        return ret;
+      }
+      ret = hdfsBuilderConfSetStr(bld, "dfs.domain.socket.path",
+                                  domainSocket);
+      if (ret) {
+        return ret;
+      }
+    }
+    return 0;
+}
+
+
+/**
+ * Test that we can write a file with libhdfs and then read it back
+ */
+int main(void)
+{
+    int port;
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+        0, /* webhdfsEnabled */
+        0, /* namenodeHttpPort */
+        1, /* configureShortCircuit */
+    };
+    char testFileName[TEST_FILE_NAME_LENGTH];
+    hdfsFS fs;
+    struct NativeMiniDfsCluster* cl;
+    struct hdfsBuilder *bld;
+
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    port = nmdGetNameNodePort(cl);
+    if (port < 0) {
+        fprintf(stderr, "TEST_ERROR: test_zerocopy: "
+                "nmdGetNameNodePort returned error %d\n", port);
+        return EXIT_FAILURE;
+    }
+    bld = hdfsNewBuilder();
+    EXPECT_NONNULL(bld);
+    EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld));
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    /* ensure that we'll always get our mmaps */
+    hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum",
+                          "true");
+    fs = hdfsBuilderConnect(bld);
+    EXPECT_NONNULL(fs);
+    EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName,
+          TEST_FILE_NAME_LENGTH));
+    EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+    fprintf(stderr, "TEST_SUCCESS\n"); 
+    return EXIT_SUCCESS;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_native_mini_dfs.c
new file mode 100644
index 0000000..850b0fc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_native_mini_dfs.c
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "native_mini_dfs.h"
+
+#include <errno.h>
+
+static struct NativeMiniDfsConf conf = {
+    1, /* doFormat */
+};
+
+/**
+ * Test that we can create a MiniDFSCluster and shut it down.
+ */
+int main(void) {
+    struct NativeMiniDfsCluster* cl;
+    
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/vecsum.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/vecsum.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/vecsum.c
new file mode 100644
index 0000000..74d8f1f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/vecsum.c
@@ -0,0 +1,825 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#ifdef __MACH__ // OS X does not have clock_gettime
+#include <mach/clock.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#endif
+
+#include "config.h"
+#include "hdfs/hdfs.h"
+
+#define VECSUM_CHUNK_SIZE (8 * 1024 * 1024)
+#define ZCR_READ_CHUNK_SIZE (1024 * 1024 * 8)
+#define NORMAL_READ_CHUNK_SIZE (8 * 1024 * 1024)
+#define DOUBLES_PER_LOOP_ITER 16
+
+static double timespec_to_double(const struct timespec *ts)
+{
+    double sec = ts->tv_sec;
+    double nsec = ts->tv_nsec;
+    return sec + (nsec / 1000000000L);
+}
+
+struct stopwatch {
+    struct timespec start;
+    struct timespec stop;
+};
+
+
+#ifdef __MACH__
+static int clock_gettime_mono(struct timespec * ts) {
+    static mach_timebase_info_data_t tb;
+    static uint64_t timestart = 0;
+    uint64_t t = 0;
+    if (timestart == 0) {
+        mach_timebase_info(&tb);
+        timestart = mach_absolute_time();
+    }
+    t = mach_absolute_time() - timestart;
+    t *= tb.numer;
+    t /= tb.denom;
+    ts->tv_sec = t / 1000000000ULL;
+    ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL);
+    return 0;
+}
+#else
+static int clock_gettime_mono(struct timespec * ts) {
+    return clock_gettime(CLOCK_MONOTONIC, ts);
+}
+#endif
+
+static struct stopwatch *stopwatch_create(void)
+{
+    struct stopwatch *watch;
+
+    watch = calloc(1, sizeof(struct stopwatch));
+    if (!watch) {
+        fprintf(stderr, "failed to allocate memory for stopwatch\n");
+        goto error;
+    }
+    if (clock_gettime_mono(&watch->start)) {
+        int err = errno;
+        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
+            "error %d (%s)\n", err, strerror(err));
+        goto error;
+    }
+    return watch;
+
+error:
+    free(watch);
+    return NULL;
+}
+
+static void stopwatch_stop(struct stopwatch *watch,
+        long long bytes_read)
+{
+    double elapsed, rate;
+
+    if (clock_gettime_mono(&watch->stop)) {
+        int err = errno;
+        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
+            "error %d (%s)\n", err, strerror(err));
+        goto done;
+    }
+    elapsed = timespec_to_double(&watch->stop) -
+        timespec_to_double(&watch->start);
+    rate = (bytes_read / elapsed) / (1024 * 1024 * 1024);
+    printf("stopwatch: took %.5g seconds to read %lld bytes, "
+        "for %.5g GB/s\n", elapsed, bytes_read, rate);
+    printf("stopwatch:  %.5g seconds\n", elapsed);
+done:
+    free(watch);
+}
+
+enum vecsum_type {
+    VECSUM_LOCAL = 0,
+    VECSUM_LIBHDFS,
+    VECSUM_ZCR,
+};
+
+#define VECSUM_TYPE_VALID_VALUES "libhdfs, zcr, or local"
+
+int parse_vecsum_type(const char *str)
+{
+    if (strcasecmp(str, "local") == 0)
+        return VECSUM_LOCAL;
+    else if (strcasecmp(str, "libhdfs") == 0)
+        return VECSUM_LIBHDFS;
+    else if (strcasecmp(str, "zcr") == 0)
+        return VECSUM_ZCR;
+    else
+        return -1;
+}
+
+struct options {
+    // The path to read.
+    const char *path;
+
+    // Length of the file.
+    long long length;
+
+    // The number of times to read the path.
+    int passes;
+
+    // Type of vecsum to do
+    enum vecsum_type ty;
+
+    // RPC address to use for HDFS
+    const char *rpc_address;
+};
+
+static struct options *options_create(void)
+{
+    struct options *opts = NULL;
+    const char *pass_str;
+    const char *ty_str;
+    const char *length_str;
+    int ty;
+
+    opts = calloc(1, sizeof(struct options));
+    if (!opts) {
+        fprintf(stderr, "failed to calloc options\n");
+        goto error;
+    }
+    opts->path = getenv("VECSUM_PATH");
+    if (!opts->path) {
+        fprintf(stderr, "You must set the VECSUM_PATH environment "
+            "variable to the path of the file to read.\n");
+        goto error;
+    }
+    length_str = getenv("VECSUM_LENGTH");
+    if (!length_str) {
+        length_str = "2147483648";
+    }
+    opts->length = atoll(length_str);
+    if (!opts->length) {
+        fprintf(stderr, "Can't parse VECSUM_LENGTH of '%s'.\n",
+                length_str);
+        goto error;
+    }
+    if (opts->length % VECSUM_CHUNK_SIZE) {
+        fprintf(stderr, "VECSUM_LENGTH must be a multiple of '%lld'.  The "
+                "currently specified length of '%lld' is not.\n",
+                (long long)VECSUM_CHUNK_SIZE, (long long)opts->length);
+        goto error;
+    }
+    pass_str = getenv("VECSUM_PASSES");
+    if (!pass_str) {
+        fprintf(stderr, "You must set the VECSUM_PASSES environment "
+            "variable to the number of passes to make.\n");
+        goto error;
+    }
+    opts->passes = atoi(pass_str);
+    if (opts->passes <= 0) {
+        fprintf(stderr, "Invalid value for the VECSUM_PASSES "
+            "environment variable.  You must set this to a "
+            "number greater than 0.\n");
+        goto error;
+    }
+    ty_str = getenv("VECSUM_TYPE");
+    if (!ty_str) {
+        fprintf(stderr, "You must set the VECSUM_TYPE environment "
+            "variable to " VECSUM_TYPE_VALID_VALUES "\n");
+        goto error;
+    }
+    ty = parse_vecsum_type(ty_str);
+    if (ty < 0) {
+        fprintf(stderr, "Invalid VECSUM_TYPE environment variable.  "
+            "Valid values are " VECSUM_TYPE_VALID_VALUES "\n");
+        goto error;
+    }
+    opts->ty = ty;
+    opts->rpc_address = getenv("VECSUM_RPC_ADDRESS");
+    if (!opts->rpc_address) {
+        opts->rpc_address = "default";
+    }
+    return opts;
+error:
+    free(opts);
+    return NULL;
+}
+
+static int test_file_chunk_setup(double **chunk)
+{
+    int i;
+    double *c, val;
+
+    c = malloc(VECSUM_CHUNK_SIZE);
+    if (!c) {
+        fprintf(stderr, "test_file_create: failed to malloc "
+                "a buffer of size '%lld'\n",
+                (long long) VECSUM_CHUNK_SIZE);
+        return EIO;
+    }
+    val = 0.0;
+    for (i = 0; i < VECSUM_CHUNK_SIZE / sizeof(double); i++) {
+        c[i] = val;
+        val += 0.5;
+    }
+    *chunk = c;
+    return 0;
+}
+
+static void options_free(struct options *opts)
+{
+    free(opts);
+}
+
+struct local_data {
+    int fd;
+    double *mmap;
+    long long length;
+};
+
+static int local_data_create_file(struct local_data *cdata,
+                                  const struct options *opts)
+{
+    int ret = EIO;
+    int dup_fd = -1;
+    FILE *fp = NULL;
+    double *chunk = NULL;
+    long long offset = 0;
+
+    dup_fd = dup(cdata->fd);
+    if (dup_fd < 0) {
+        ret = errno;
+        fprintf(stderr, "local_data_create_file: dup failed: %s (%d)\n",
+                strerror(ret), ret);
+        goto done;
+    }
+    fp = fdopen(dup_fd, "w");
+    if (!fp) {
+        ret = errno;
+        fprintf(stderr, "local_data_create_file: fdopen failed: %s (%d)\n",
+                strerror(ret), ret);
+        goto done;
+    }
+    ret = test_file_chunk_setup(&chunk);
+    if (ret)
+        goto done;
+    while (offset < opts->length) {
+        if (fwrite(chunk, VECSUM_CHUNK_SIZE, 1, fp) != 1) {
+            fprintf(stderr, "local_data_create_file: failed to write to "
+                    "the local file '%s' at offset %lld\n",
+                    opts->path, offset);
+            ret = EIO;
+            goto done;
+        }
+        offset += VECSUM_CHUNK_SIZE;
+    }
+    fprintf(stderr, "local_data_create_file: successfully re-wrote %s as "
+            "a file of length %lld\n", opts->path, opts->length);
+    ret = 0;
+
+done:
+    if (dup_fd >= 0) {
+        close(dup_fd);
+    }
+    if (fp) {
+        fclose(fp);
+    }
+    free(chunk);
+    return ret;
+}
+
+static struct local_data *local_data_create(const struct options *opts)
+{
+    struct local_data *cdata = NULL;
+    struct stat st_buf;
+
+    cdata = malloc(sizeof(*cdata));
+    if (!cdata) {
+        fprintf(stderr, "Failed to allocate local test data.\n");
+        goto error;
+    }
+    cdata->fd = -1;
+    cdata->mmap = MAP_FAILED;
+    cdata->length = opts->length;
+
+    cdata->fd = open(opts->path, O_RDWR | O_CREAT, 0777);
+    if (cdata->fd < 0) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: failed to open %s "
+            "for read/write: error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    if (fstat(cdata->fd, &st_buf)) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: fstat(%s) failed: "
+            "error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    if (st_buf.st_size != opts->length) {
+        int err;
+        fprintf(stderr, "local_data_create: current size of %s is %lld, but "
+                "we want %lld.  Re-writing the file.\n",
+                opts->path, (long long)st_buf.st_size,
+                (long long)opts->length);
+        err = local_data_create_file(cdata, opts);
+        if (err)
+            goto error;
+    }
+    cdata->mmap = mmap(NULL, cdata->length, PROT_READ,
+                       MAP_PRIVATE, cdata->fd, 0);
+    if (cdata->mmap == MAP_FAILED) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: mmap(%s) failed: "
+            "error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    return cdata;
+
+error:
+    if (cdata) {
+        if (cdata->fd >= 0) {
+            close(cdata->fd);
+        }
+        free(cdata);
+    }
+    return NULL;
+}
+
+static void local_data_free(struct local_data *cdata)
+{
+    close(cdata->fd);
+    munmap(cdata->mmap, cdata->length);
+}
+
+struct libhdfs_data {
+    hdfsFS fs;
+    hdfsFile file;
+    long long length;
+    double *buf;
+};
+
+static void libhdfs_data_free(struct libhdfs_data *ldata)
+{
+    if (ldata->fs) {
+        free(ldata->buf);
+        if (ldata->file) {
+            hdfsCloseFile(ldata->fs, ldata->file);
+        }
+        hdfsDisconnect(ldata->fs);
+    }
+    free(ldata);
+}
+
+static int libhdfs_data_create_file(struct libhdfs_data *ldata,
+                                    const struct options *opts)
+{
+    int ret;
+    double *chunk = NULL;
+    long long offset = 0;
+
+    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_WRONLY, 0, 1, 0);
+    if (!ldata->file) {
+        ret = errno;
+        fprintf(stderr, "libhdfs_data_create_file: hdfsOpenFile(%s, "
+            "O_WRONLY) failed: error %d (%s)\n", opts->path, ret,
+            strerror(ret));
+        goto done;
+    }
+    ret = test_file_chunk_setup(&chunk);
+    if (ret)
+        goto done;
+    while (offset < opts->length) {
+        ret = hdfsWrite(ldata->fs, ldata->file, chunk, VECSUM_CHUNK_SIZE);
+        if (ret < 0) {
+            ret = errno;
+            fprintf(stderr, "libhdfs_data_create_file: got error %d (%s) at "
+                    "offset %lld of %s\n", ret, strerror(ret),
+                    offset, opts->path);
+            goto done;
+        } else if (ret < VECSUM_CHUNK_SIZE) {
+            fprintf(stderr, "libhdfs_data_create_file: got short write "
+                    "of %d at offset %lld of %s\n", ret, offset, opts->path);
+            goto done;
+        }
+        offset += VECSUM_CHUNK_SIZE;
+    }
+    ret = 0;
+done:
+    free(chunk);
+    if (ldata->file) {
+        if (hdfsCloseFile(ldata->fs, ldata->file)) {
+            fprintf(stderr, "libhdfs_data_create_file: hdfsCloseFile error.");
+            ret = EIO;
+        }
+        ldata->file = NULL;
+    }
+    return ret;
+}
+
+static struct libhdfs_data *libhdfs_data_create(const struct options *opts)
+{
+    struct libhdfs_data *ldata = NULL;
+    struct hdfsBuilder *builder = NULL;
+    hdfsFileInfo *pinfo = NULL;
+
+    ldata = calloc(1, sizeof(struct libhdfs_data));
+    if (!ldata) {
+        fprintf(stderr, "Failed to allocate libhdfs test data.\n");
+        goto error;
+    }
+    builder = hdfsNewBuilder();
+    if (!builder) {
+        fprintf(stderr, "Failed to create builder.\n");
+        goto error;
+    }
+    hdfsBuilderSetNameNode(builder, opts->rpc_address);
+    hdfsBuilderConfSetStr(builder,
+        "dfs.client.read.shortcircuit.skip.checksum", "true");
+    ldata->fs = hdfsBuilderConnect(builder);
+    if (!ldata->fs) {
+        fprintf(stderr, "Could not connect to default namenode!\n");
+        goto error;
+    }
+    pinfo = hdfsGetPathInfo(ldata->fs, opts->path);
+    if (!pinfo) {
+        int err = errno;
+        fprintf(stderr, "hdfsGetPathInfo(%s) failed: error %d (%s).  "
+                "Attempting to re-create file.\n",
+            opts->path, err, strerror(err));
+        if (libhdfs_data_create_file(ldata, opts))
+            goto error;
+    } else if (pinfo->mSize != opts->length) {
+        fprintf(stderr, "hdfsGetPathInfo(%s) failed: length was %lld, "
+                "but we want length %lld.  Attempting to re-create file.\n",
+                opts->path, (long long)pinfo->mSize, (long long)opts->length);
+        if (libhdfs_data_create_file(ldata, opts))
+            goto error;
+    }
+    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_RDONLY, 0, 0, 0);
+    if (!ldata->file) {
+        int err = errno;
+        fprintf(stderr, "hdfsOpenFile(%s) failed: error %d (%s)\n",
+            opts->path, err, strerror(err));
+        goto error;
+    }
+    ldata->length = opts->length;
+    return ldata;
+
+error:
+    if (pinfo)
+        hdfsFreeFileInfo(pinfo, 1);
+    if (ldata)
+        libhdfs_data_free(ldata);
+    return NULL;
+}
+
+static int check_byte_size(int byte_size, const char *const str)
+{
+    if (byte_size % sizeof(double)) {
+        fprintf(stderr, "%s is not a multiple "
+            "of sizeof(double)\n", str);
+        return EINVAL;
+    }
+    if ((byte_size / sizeof(double)) % DOUBLES_PER_LOOP_ITER) {
+        fprintf(stderr, "The number of doubles contained in "
+            "%s is not a multiple of DOUBLES_PER_LOOP_ITER\n",
+            str);
+        return EINVAL;
+    }
+    return 0;
+}
+
+#ifdef HAVE_INTEL_SSE_INTRINSICS
+
+#include <emmintrin.h>
+
+static double vecsum(const double *buf, int num_doubles)
+{
+    int i;
+    double hi, lo;
+    __m128d x0, x1, x2, x3, x4, x5, x6, x7;
+    __m128d sum0 = _mm_set_pd(0.0,0.0);
+    __m128d sum1 = _mm_set_pd(0.0,0.0);
+    __m128d sum2 = _mm_set_pd(0.0,0.0);
+    __m128d sum3 = _mm_set_pd(0.0,0.0);
+    __m128d sum4 = _mm_set_pd(0.0,0.0);
+    __m128d sum5 = _mm_set_pd(0.0,0.0);
+    __m128d sum6 = _mm_set_pd(0.0,0.0);
+    __m128d sum7 = _mm_set_pd(0.0,0.0);
+    for (i = 0; i < num_doubles; i+=DOUBLES_PER_LOOP_ITER) {
+        x0 = _mm_load_pd(buf + i + 0);
+        x1 = _mm_load_pd(buf + i + 2);
+        x2 = _mm_load_pd(buf + i + 4);
+        x3 = _mm_load_pd(buf + i + 6);
+        x4 = _mm_load_pd(buf + i + 8);
+        x5 = _mm_load_pd(buf + i + 10);
+        x6 = _mm_load_pd(buf + i + 12);
+        x7 = _mm_load_pd(buf + i + 14);
+        sum0 = _mm_add_pd(sum0, x0);
+        sum1 = _mm_add_pd(sum1, x1);
+        sum2 = _mm_add_pd(sum2, x2);
+        sum3 = _mm_add_pd(sum3, x3);
+        sum4 = _mm_add_pd(sum4, x4);
+        sum5 = _mm_add_pd(sum5, x5);
+        sum6 = _mm_add_pd(sum6, x6);
+        sum7 = _mm_add_pd(sum7, x7);
+    }
+    x0 = _mm_add_pd(sum0, sum1);
+    x1 = _mm_add_pd(sum2, sum3);
+    x2 = _mm_add_pd(sum4, sum5);
+    x3 = _mm_add_pd(sum6, sum7);
+    x4 = _mm_add_pd(x0, x1);
+    x5 = _mm_add_pd(x2, x3);
+    x6 = _mm_add_pd(x4, x5);
+    _mm_storeh_pd(&hi, x6);
+    _mm_storel_pd(&lo, x6);
+    return hi + lo;
+}
+
+#else
+
+static double vecsum(const double *buf, int num_doubles)
+{
+    int i;
+    double sum = 0.0;
+    for (i = 0; i < num_doubles; i++) {
+        sum += buf[i];
+    }
+    return sum;
+}
+
+#endif
+
+static int vecsum_zcr_loop(int pass, struct libhdfs_data *ldata,
+        struct hadoopRzOptions *zopts,
+        const struct options *opts)
+{
+    int32_t len;
+    double sum = 0.0;
+    const double *buf;
+    struct hadoopRzBuffer *rzbuf = NULL;
+    int ret;
+
+    while (1) {
+        rzbuf = hadoopReadZero(ldata->file, zopts, ZCR_READ_CHUNK_SIZE);
+        if (!rzbuf) {
+            ret = errno;
+            fprintf(stderr, "hadoopReadZero failed with error "
+                "code %d (%s)\n", ret, strerror(ret));
+            goto done;
+        }
+        buf = hadoopRzBufferGet(rzbuf);
+        if (!buf) break;
+        len = hadoopRzBufferLength(rzbuf);
+        if (len < ZCR_READ_CHUNK_SIZE) {
+            fprintf(stderr, "hadoopReadZero got a partial read "
+                "of length %d\n", len);
+            ret = EINVAL;
+            goto done;
+        }
+        sum += vecsum(buf,
+            ZCR_READ_CHUNK_SIZE / sizeof(double));
+        hadoopRzBufferFree(ldata->file, rzbuf);
+    }
+    printf("finished zcr pass %d.  sum = %g\n", pass, sum);
+    ret = 0;
+
+done:
+    if (rzbuf)
+        hadoopRzBufferFree(ldata->file, rzbuf);
+    return ret;
+}
+
+static int vecsum_zcr(struct libhdfs_data *ldata,
+        const struct options *opts)
+{
+    int ret, pass;
+    struct hadoopRzOptions *zopts = NULL;
+
+    zopts = hadoopRzOptionsAlloc();
+    if (!zopts) {
+        fprintf(stderr, "hadoopRzOptionsAlloc failed.\n");
+        ret = ENOMEM;
+        goto done;
+    }
+    if (hadoopRzOptionsSetSkipChecksum(zopts, 1)) {
+        ret = errno;
+        perror("hadoopRzOptionsSetSkipChecksum failed: ");
+        goto done;
+    }
+    if (hadoopRzOptionsSetByteBufferPool(zopts, NULL)) {
+        ret = errno;
+        perror("hadoopRzOptionsSetByteBufferPool failed: ");
+        goto done;
+    }
+    for (pass = 0; pass < opts->passes; ++pass) {
+        ret = vecsum_zcr_loop(pass, ldata, zopts, opts);
+        if (ret) {
+            fprintf(stderr, "vecsum_zcr_loop pass %d failed "
+                "with error %d\n", pass, ret);
+            goto done;
+        }
+        hdfsSeek(ldata->fs, ldata->file, 0);
+    }
+    ret = 0;
+done:
+    if (zopts)
+        hadoopRzOptionsFree(zopts);
+    return ret;
+}
+
+tSize hdfsReadFully(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
+{
+    uint8_t *buf = buffer;
+    tSize ret, nread = 0;
+
+    while (length > 0) {
+        ret = hdfsRead(fs, f, buf, length);
+        if (ret < 0) {
+            if (errno != EINTR) {
+                return -1;
+            }
+        }
+        if (ret == 0) {
+            break;
+        }
+        nread += ret;
+        length -= ret;
+        buf += ret;
+    }
+    return nread;
+}
+
+static int vecsum_normal_loop(int pass, const struct libhdfs_data *ldata,
+            const struct options *opts)
+{
+    double sum = 0.0;
+
+    while (1) {
+        int res = hdfsReadFully(ldata->fs, ldata->file, ldata->buf,
+                NORMAL_READ_CHUNK_SIZE);
+        if (res == 0) // EOF
+            break;
+        if (res < 0) {
+            int err = errno;
+            fprintf(stderr, "hdfsRead failed with error %d (%s)\n",
+                err, strerror(err));
+            return err;
+        }
+        if (res < NORMAL_READ_CHUNK_SIZE) {
+            fprintf(stderr, "hdfsRead got a partial read of "
+                "length %d\n", res);
+            return EINVAL;
+        }
+        sum += vecsum(ldata->buf,
+                  NORMAL_READ_CHUNK_SIZE / sizeof(double));
+    }
+    printf("finished normal pass %d.  sum = %g\n", pass, sum);
+    return 0;
+}
+
+static int vecsum_libhdfs(struct libhdfs_data *ldata,
+            const struct options *opts)
+{
+    int pass;
+
+    ldata->buf = malloc(NORMAL_READ_CHUNK_SIZE);
+    if (!ldata->buf) {
+        fprintf(stderr, "failed to malloc buffer of size %d\n",
+            NORMAL_READ_CHUNK_SIZE);
+        return ENOMEM;
+    }
+    for (pass = 0; pass < opts->passes; ++pass) {
+        int ret = vecsum_normal_loop(pass, ldata, opts);
+        if (ret) {
+            fprintf(stderr, "vecsum_normal_loop pass %d failed "
+                "with error %d\n", pass, ret);
+            return ret;
+        }
+        hdfsSeek(ldata->fs, ldata->file, 0);
+    }
+    return 0;
+}
+
+static void vecsum_local(struct local_data *cdata, const struct options *opts)
+{
+    int pass;
+
+    for (pass = 0; pass < opts->passes; pass++) {
+        double sum = vecsum(cdata->mmap, cdata->length / sizeof(double));
+        printf("finished vecsum_local pass %d.  sum = %g\n", pass, sum);
+    }
+}
+
+static long long vecsum_length(const struct options *opts,
+                const struct libhdfs_data *ldata)
+{
+    if (opts->ty == VECSUM_LOCAL) {
+        struct stat st_buf = { 0 };
+        if (stat(opts->path, &st_buf)) {
+            int err = errno;
+            fprintf(stderr, "vecsum_length: stat(%s) failed: "
+                "error %d (%s)\n", opts->path, err, strerror(err));
+            return -EIO;
+        }
+        return st_buf.st_size;
+    } else {
+        return ldata->length;
+    }
+}
+
+/*
+ * vecsum is a microbenchmark which measures the speed of various ways of
+ * reading from HDFS.  It creates a file containing floating-point 'doubles',
+ * and computes the sum of all the doubles several times.  For some CPUs,
+ * assembly optimizations are used for the summation (SSE, etc).
+ */
+int main(void)
+{
+    int ret = 1;
+    struct options *opts = NULL;
+    struct local_data *cdata = NULL;
+    struct libhdfs_data *ldata = NULL;
+    struct stopwatch *watch = NULL;
+
+    if (check_byte_size(VECSUM_CHUNK_SIZE, "VECSUM_CHUNK_SIZE") ||
+        check_byte_size(ZCR_READ_CHUNK_SIZE,
+                "ZCR_READ_CHUNK_SIZE") ||
+        check_byte_size(NORMAL_READ_CHUNK_SIZE,
+                "NORMAL_READ_CHUNK_SIZE")) {
+        goto done;
+    }
+    opts = options_create();
+    if (!opts)
+        goto done;
+    if (opts->ty == VECSUM_LOCAL) {
+        cdata = local_data_create(opts);
+        if (!cdata)
+            goto done;
+    } else {
+        ldata = libhdfs_data_create(opts);
+        if (!ldata)
+            goto done;
+    }
+    watch = stopwatch_create();
+    if (!watch)
+        goto done;
+    switch (opts->ty) {
+    case VECSUM_LOCAL:
+        vecsum_local(cdata, opts);
+        ret = 0;
+        break;
+    case VECSUM_LIBHDFS:
+        ret = vecsum_libhdfs(ldata, opts);
+        break;
+    case VECSUM_ZCR:
+        ret = vecsum_zcr(ldata, opts);
+        break;
+    }
+    if (ret) {
+        fprintf(stderr, "vecsum failed with error %d\n", ret);
+        goto done;
+    }
+    ret = 0;
+done:
+    fprintf(stderr, "cleaning up...\n");
+    if (watch && (ret == 0)) {
+        long long length = vecsum_length(opts, ldata);
+        if (length >= 0) {
+            stopwatch_stop(watch, length * opts->passes);
+        }
+    }
+    if (cdata)
+        local_data_free(cdata);
+    if (ldata)
+        libhdfs_data_free(ldata);
+    if (opts)
+        options_free(opts);
+    return ret;
+}
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index 772a864..2535cdd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -18,6 +18,7 @@
 add_definitions(-DLIBHDFS_DLL_EXPORT)
 
 include_directories(
+    include
     ${GENERATED_JAVAH}
     ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_BINARY_DIR}
@@ -50,92 +51,24 @@ set(LIBHDFS_VERSION "0.0.0")
 set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
 
-add_executable(test_libhdfs_ops
-    test/test_libhdfs_ops.c
-)
-target_link_libraries(test_libhdfs_ops
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_executable(test_libhdfs_read
-    test/test_libhdfs_read.c
-)
-target_link_libraries(test_libhdfs_read
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_executable(test_libhdfs_write
-    test/test_libhdfs_write.c
-)
-target_link_libraries(test_libhdfs_write
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_library(native_mini_dfs
-    native_mini_dfs.c
-    common/htable.c
-    exception.c
-    jni_helper.c
-    ${OS_DIR}/mutexes.c
-    ${OS_DIR}/thread_local_storage.c
-)
-target_link_libraries(native_mini_dfs
-    ${JAVA_JVM_LIBRARY}
-    ${LIB_DL}
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_native_mini_dfs
-    test_native_mini_dfs.c
-)
-target_link_libraries(test_native_mini_dfs
-    native_mini_dfs
-)
-
-add_executable(test_libhdfs_threaded
-    expect.c
-    test_libhdfs_threaded.c
-    ${OS_DIR}/thread.c
-)
-target_link_libraries(test_libhdfs_threaded
-    hdfs_static
-    native_mini_dfs
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_libhdfs_zerocopy
-    expect.c
-    test/test_libhdfs_zerocopy.c
-)
-target_link_libraries(test_libhdfs_zerocopy
-    hdfs_static
-    native_mini_dfs
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_htable
-    common/htable.c
-    test/test_htable.c
-)
-target_link_libraries(test_htable
-    ${OS_LINK_LIBRARIES}
-)
+add_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c)
+link_libhdfs_test(test_libhdfs_ops hdfs_static ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_reads hdfs_static test_libhdfs_read.c)
+link_libhdfs_test(test_libhdfs_reads hdfs_static ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_write hdfs_static test_libhdfs_write.c)
+link_libhdfs_test(test_libhdfs_write hdfs_static ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c)
+link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs)
+add_libhdfs_test(test_libhdfs_zerocopy hdfs_static expect.c test_libhdfs_zerocopy.c)
+link_libhdfs_test(test_libhdfs_zerocopy hdfs_static native_mini_dfs ${OS_LINK_LIBRARIES})
 
 # Skip vecsum on Windows.  This could be made to work in the future by
 # introducing an abstraction layer over the sys/mman.h functions.
 if(NOT WIN32)
-    add_executable(test_libhdfs_vecsum test/vecsum.c)
+    add_libhdfs_test(test_libhdfs_vecsum hdfs vecsum.c)
     if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
-        target_link_libraries(test_libhdfs_vecsum
-            hdfs
-            pthread)
+        link_libhdfs_test(test_libhdfs_vecsum hdfs pthread)
     else()
-        target_link_libraries(test_libhdfs_vecsum
-            hdfs
-            pthread
-            rt)
+        link_libhdfs_test(test_libhdfs_vecsum hdfs pthread rt)
     endif()
 endif()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
index eb7115c..35e9d2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -17,7 +17,7 @@
  */
 
 #include "exception.h"
-#include "hdfs.h"
+#include "hdfs/hdfs.h"
 #include "jni_helper.h"
 #include "platform.h"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
deleted file mode 100644
index 576e9ef..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "hdfs.h"
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-int expectFileStats(hdfsFile file,
-      uint64_t expectedTotalBytesRead,
-      uint64_t expectedTotalLocalBytesRead,
-      uint64_t expectedTotalShortCircuitBytesRead,
-      uint64_t expectedTotalZeroCopyBytesRead)
-{
-    struct hdfsReadStatistics *stats = NULL;
-    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &stats));
-    fprintf(stderr, "expectFileStats(expectedTotalBytesRead=%"PRId64", "
-            "expectedTotalLocalBytesRead=%"PRId64", "
-            "expectedTotalShortCircuitBytesRead=%"PRId64", "
-            "expectedTotalZeroCopyBytesRead=%"PRId64", "
-            "totalBytesRead=%"PRId64", "
-            "totalLocalBytesRead=%"PRId64", "
-            "totalShortCircuitBytesRead=%"PRId64", "
-            "totalZeroCopyBytesRead=%"PRId64")\n",
-            expectedTotalBytesRead,
-            expectedTotalLocalBytesRead,
-            expectedTotalShortCircuitBytesRead,
-            expectedTotalZeroCopyBytesRead,
-            stats->totalBytesRead,
-            stats->totalLocalBytesRead,
-            stats->totalShortCircuitBytesRead,
-            stats->totalZeroCopyBytesRead);
-    if (expectedTotalBytesRead != UINT64_MAX) {
-        EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
-    }
-    if (expectedTotalLocalBytesRead != UINT64_MAX) {
-        EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
-                      stats->totalLocalBytesRead);
-    }
-    if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
-        EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
-                      stats->totalShortCircuitBytesRead);
-    }
-    if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
-        EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
-                      stats->totalZeroCopyBytesRead);
-    }
-    hdfsFileFreeReadStatistics(stats);
-    return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
deleted file mode 100644
index 49aa285..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H
-#define LIBHDFS_NATIVE_TESTS_EXPECT_H
-
-#include <inttypes.h>
-#include <stdio.h>
-
-struct hdfsFile_internal;
-
-#define EXPECT_ZERO(x) \
-    do { \
-        int __my_ret__ = x; \
-        if (__my_ret__) { \
-            int __my_errno__ = errno; \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-		    "code %d (errno: %d): got nonzero from %s\n", \
-		    __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
-            return __my_ret__; \
-        } \
-    } while (0);
-
-#define EXPECT_NULL(x) \
-    do { \
-        const void* __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != NULL) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
-		    "got non-NULL value %p from %s\n", \
-		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NULL_WITH_ERRNO(x, e) \
-    do { \
-        const void* __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != NULL) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
-		    "got non-NULL value %p from %s\n", \
-		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
-            return -1; \
-        } \
-        if (__my_errno__ != e) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
-		    "got expected NULL without expected errno %d from %s\n", \
-		    __FILE__, __LINE__, __my_errno__, e, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NONNULL(x) \
-    do { \
-        const void* __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ == NULL) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
-		    "got NULL from %s\n", __FILE__, __LINE__, __my_errno__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NEGATIVE_ONE_WITH_ERRNO(x, e) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != -1) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-                "code %d (errno: %d): expected -1 from %s\n", \
-                    __FILE__, __LINE__, \
-                __my_ret__, __my_errno__, #x); \
-            return -1; \
-        } \
-        if (__my_errno__ != e) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-                "code %d (errno: %d): expected errno = %d from %s\n", \
-                __FILE__, __LINE__, __my_ret__, __my_errno__, e, #x); \
-            return -1; \
-	} \
-    } while (0);
-
-#define EXPECT_NONZERO(x) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (!__my_ret__) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-              "code %d (errno: %d): got zero from %s\n", __FILE__, __LINE__, \
-              __my_ret__, __my_errno__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NONNEGATIVE(x) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ < 0) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-                "code %d (errno: %d): got negative return from %s\n", \
-                __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
-            return __my_ret__; \
-        } \
-    } while (0);
-
-#define EXPECT_INT_EQ(x, y) \
-    do { \
-        int __my_ret__ = y; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != (x)) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-              "code %d (errno: %d): expected %d\n", \
-               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_INT64_EQ(x, y) \
-    do { \
-        int64_t __my_ret__ = y; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != (x)) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-              "value %"PRId64" (errno: %d): expected %"PRId64"\n", \
-               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_UINT64_EQ(x, y) \
-    do { \
-        uint64_t __my_ret__ = y; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != (x)) { \
-            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
-              "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
-               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
-            return -1; \
-        } \
-    } while (0);
-
-#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
-    ret = expr; \
-    if (!ret) \
-        break; \
-    ret = -errno; \
-    } while (ret == -EINTR);
-
-/**
- * Test that an HDFS file has the given statistics.
- *
- * Any parameter can be set to UINT64_MAX to avoid checking it.
- *
- * @return 0 on success; error code otherwise
- */
-int expectFileStats(struct hdfsFile_internal *file,
-      uint64_t expectedTotalBytesRead,
-      uint64_t expectedTotalLocalBytesRead,
-      uint64_t expectedTotalShortCircuitBytesRead,
-      uint64_t expectedTotalZeroCopyBytesRead);
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index a3769fc..c5aad1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -17,7 +17,7 @@
  */
 
 #include "exception.h"
-#include "hdfs.h"
+#include "hdfs/hdfs.h"
 #include "jni_helper.h"
 #include "platform.h"
 


Mime
View raw message