hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [10/19] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai.
Date Wed, 07 Oct 2015 07:16:12 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
new file mode 100644
index 0000000..80a64b4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
@@ -0,0 +1,825 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#ifdef __MACH__ // OS X does not have clock_gettime
+#include <mach/clock.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#endif
+
+#include "config.h"
+#include "hdfs.h"
+
+#define VECSUM_CHUNK_SIZE (8 * 1024 * 1024)
+#define ZCR_READ_CHUNK_SIZE (1024 * 1024 * 8)
+#define NORMAL_READ_CHUNK_SIZE (8 * 1024 * 1024)
+#define DOUBLES_PER_LOOP_ITER 16
+
+static double timespec_to_double(const struct timespec *ts)
+{
+    double sec = ts->tv_sec;
+    double nsec = ts->tv_nsec;
+    return sec + (nsec / 1000000000L);
+}
+
+struct stopwatch {
+    struct timespec start;
+    struct timespec stop;
+};
+
+
+#ifdef __MACH__
+static int clock_gettime_mono(struct timespec * ts) {
+    static mach_timebase_info_data_t tb;
+    static uint64_t timestart = 0;
+    uint64_t t = 0;
+    if (timestart == 0) {
+        mach_timebase_info(&tb);
+        timestart = mach_absolute_time();
+    }
+    t = mach_absolute_time() - timestart;
+    t *= tb.numer;
+    t /= tb.denom;
+    ts->tv_sec = t / 1000000000ULL;
+    ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL);
+    return 0;
+}
+#else
+static int clock_gettime_mono(struct timespec * ts) {
+    return clock_gettime(CLOCK_MONOTONIC, ts);
+}
+#endif
+
+static struct stopwatch *stopwatch_create(void)
+{
+    struct stopwatch *watch;
+
+    watch = calloc(1, sizeof(struct stopwatch));
+    if (!watch) {
+        fprintf(stderr, "failed to allocate memory for stopwatch\n");
+        goto error;
+    }
+    if (clock_gettime_mono(&watch->start)) {
+        int err = errno;
+        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
+            "error %d (%s)\n", err, strerror(err));
+        goto error;
+    }
+    return watch;
+
+error:
+    free(watch);
+    return NULL;
+}
+
+static void stopwatch_stop(struct stopwatch *watch,
+        long long bytes_read)
+{
+    double elapsed, rate;
+
+    if (clock_gettime_mono(&watch->stop)) {
+        int err = errno;
+        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
+            "error %d (%s)\n", err, strerror(err));
+        goto done;
+    }
+    elapsed = timespec_to_double(&watch->stop) -
+        timespec_to_double(&watch->start);
+    rate = (bytes_read / elapsed) / (1024 * 1024 * 1024);
+    printf("stopwatch: took %.5g seconds to read %lld bytes, "
+        "for %.5g GB/s\n", elapsed, bytes_read, rate);
+    printf("stopwatch:  %.5g seconds\n", elapsed);
+done:
+    free(watch);
+}
+
+enum vecsum_type {
+    VECSUM_LOCAL = 0,
+    VECSUM_LIBHDFS,
+    VECSUM_ZCR,
+};
+
+#define VECSUM_TYPE_VALID_VALUES "libhdfs, zcr, or local"
+
+int parse_vecsum_type(const char *str)
+{
+    if (strcasecmp(str, "local") == 0)
+        return VECSUM_LOCAL;
+    else if (strcasecmp(str, "libhdfs") == 0)
+        return VECSUM_LIBHDFS;
+    else if (strcasecmp(str, "zcr") == 0)
+        return VECSUM_ZCR;
+    else
+        return -1;
+}
+
+struct options {
+    // The path to read.
+    const char *path;
+
+    // Length of the file.
+    long long length;
+
+    // The number of times to read the path.
+    int passes;
+
+    // Type of vecsum to do
+    enum vecsum_type ty;
+
+    // RPC address to use for HDFS
+    const char *rpc_address;
+};
+
+static struct options *options_create(void)
+{
+    struct options *opts = NULL;
+    const char *pass_str;
+    const char *ty_str;
+    const char *length_str;
+    int ty;
+
+    opts = calloc(1, sizeof(struct options));
+    if (!opts) {
+        fprintf(stderr, "failed to calloc options\n");
+        goto error;
+    }
+    opts->path = getenv("VECSUM_PATH");
+    if (!opts->path) {
+        fprintf(stderr, "You must set the VECSUM_PATH environment "
+            "variable to the path of the file to read.\n");
+        goto error;
+    }
+    length_str = getenv("VECSUM_LENGTH");
+    if (!length_str) {
+        length_str = "2147483648";
+    }
+    opts->length = atoll(length_str);
+    if (!opts->length) {
+        fprintf(stderr, "Can't parse VECSUM_LENGTH of '%s'.\n",
+                length_str);
+        goto error;
+    }
+    if (opts->length % VECSUM_CHUNK_SIZE) {
+        fprintf(stderr, "VECSUM_LENGTH must be a multiple of '%lld'.  The "
+                "currently specified length of '%lld' is not.\n",
+                (long long)VECSUM_CHUNK_SIZE, (long long)opts->length);
+        goto error;
+    }
+    pass_str = getenv("VECSUM_PASSES");
+    if (!pass_str) {
+        fprintf(stderr, "You must set the VECSUM_PASSES environment "
+            "variable to the number of passes to make.\n");
+        goto error;
+    }
+    opts->passes = atoi(pass_str);
+    if (opts->passes <= 0) {
+        fprintf(stderr, "Invalid value for the VECSUM_PASSES "
+            "environment variable.  You must set this to a "
+            "number greater than 0.\n");
+        goto error;
+    }
+    ty_str = getenv("VECSUM_TYPE");
+    if (!ty_str) {
+        fprintf(stderr, "You must set the VECSUM_TYPE environment "
+            "variable to " VECSUM_TYPE_VALID_VALUES "\n");
+        goto error;
+    }
+    ty = parse_vecsum_type(ty_str);
+    if (ty < 0) {
+        fprintf(stderr, "Invalid VECSUM_TYPE environment variable.  "
+            "Valid values are " VECSUM_TYPE_VALID_VALUES "\n");
+        goto error;
+    }
+    opts->ty = ty;
+    opts->rpc_address = getenv("VECSUM_RPC_ADDRESS");
+    if (!opts->rpc_address) {
+        opts->rpc_address = "default";
+    }
+    return opts;
+error:
+    free(opts);
+    return NULL;
+}
+
+static int test_file_chunk_setup(double **chunk)
+{
+    int i;
+    double *c, val;
+
+    c = malloc(VECSUM_CHUNK_SIZE);
+    if (!c) {
+        fprintf(stderr, "test_file_create: failed to malloc "
+                "a buffer of size '%lld'\n",
+                (long long) VECSUM_CHUNK_SIZE);
+        return EIO;
+    }
+    val = 0.0;
+    for (i = 0; i < VECSUM_CHUNK_SIZE / sizeof(double); i++) {
+        c[i] = val;
+        val += 0.5;
+    }
+    *chunk = c;
+    return 0;
+}
+
+static void options_free(struct options *opts)
+{
+    free(opts);
+}
+
+struct local_data {
+    int fd;
+    double *mmap;
+    long long length;
+};
+
+static int local_data_create_file(struct local_data *cdata,
+                                  const struct options *opts)
+{
+    int ret = EIO;
+    int dup_fd = -1;
+    FILE *fp = NULL;
+    double *chunk = NULL;
+    long long offset = 0;
+
+    dup_fd = dup(cdata->fd);
+    if (dup_fd < 0) {
+        ret = errno;
+        fprintf(stderr, "local_data_create_file: dup failed: %s (%d)\n",
+                strerror(ret), ret);
+        goto done;
+    }
+    fp = fdopen(dup_fd, "w");
+    if (!fp) {
+        ret = errno;
+        fprintf(stderr, "local_data_create_file: fdopen failed: %s (%d)\n",
+                strerror(ret), ret);
+        goto done;
+    }
+    ret = test_file_chunk_setup(&chunk);
+    if (ret)
+        goto done;
+    while (offset < opts->length) {
+        if (fwrite(chunk, VECSUM_CHUNK_SIZE, 1, fp) != 1) {
+            fprintf(stderr, "local_data_create_file: failed to write to "
+                    "the local file '%s' at offset %lld\n",
+                    opts->path, offset);
+            ret = EIO;
+            goto done;
+        }
+        offset += VECSUM_CHUNK_SIZE;
+    }
+    fprintf(stderr, "local_data_create_file: successfully re-wrote %s as "
+            "a file of length %lld\n", opts->path, opts->length);
+    ret = 0;
+
+done:
+    if (dup_fd >= 0) {
+        close(dup_fd);
+    }
+    if (fp) {
+        fclose(fp);
+    }
+    free(chunk);
+    return ret;
+}
+
+static struct local_data *local_data_create(const struct options *opts)
+{
+    struct local_data *cdata = NULL;
+    struct stat st_buf;
+
+    cdata = malloc(sizeof(*cdata));
+    if (!cdata) {
+        fprintf(stderr, "Failed to allocate local test data.\n");
+        goto error;
+    }
+    cdata->fd = -1;
+    cdata->mmap = MAP_FAILED;
+    cdata->length = opts->length;
+
+    cdata->fd = open(opts->path, O_RDWR | O_CREAT, 0777);
+    if (cdata->fd < 0) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: failed to open %s "
+            "for read/write: error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    if (fstat(cdata->fd, &st_buf)) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: fstat(%s) failed: "
+            "error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    if (st_buf.st_size != opts->length) {
+        int err;
+        fprintf(stderr, "local_data_create: current size of %s is %lld, but "
+                "we want %lld.  Re-writing the file.\n",
+                opts->path, (long long)st_buf.st_size,
+                (long long)opts->length);
+        err = local_data_create_file(cdata, opts);
+        if (err)
+            goto error;
+    }
+    cdata->mmap = mmap(NULL, cdata->length, PROT_READ,
+                       MAP_PRIVATE, cdata->fd, 0);
+    if (cdata->mmap == MAP_FAILED) {
+        int err = errno;
+        fprintf(stderr, "local_data_create: mmap(%s) failed: "
+            "error %d (%s)\n", opts->path, err, strerror(err));
+        goto error;
+    }
+    return cdata;
+
+error:
+    if (cdata) {
+        if (cdata->fd >= 0) {
+            close(cdata->fd);
+        }
+        free(cdata);
+    }
+    return NULL;
+}
+
+static void local_data_free(struct local_data *cdata)
+{
+    close(cdata->fd);
+    munmap(cdata->mmap, cdata->length);
+}
+
+struct libhdfs_data {
+    hdfsFS fs;
+    hdfsFile file;
+    long long length;
+    double *buf;
+};
+
+static void libhdfs_data_free(struct libhdfs_data *ldata)
+{
+    if (ldata->fs) {
+        free(ldata->buf);
+        if (ldata->file) {
+            hdfsCloseFile(ldata->fs, ldata->file);
+        }
+        hdfsDisconnect(ldata->fs);
+    }
+    free(ldata);
+}
+
+static int libhdfs_data_create_file(struct libhdfs_data *ldata,
+                                    const struct options *opts)
+{
+    int ret;
+    double *chunk = NULL;
+    long long offset = 0;
+
+    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_WRONLY, 0, 1, 0);
+    if (!ldata->file) {
+        ret = errno;
+        fprintf(stderr, "libhdfs_data_create_file: hdfsOpenFile(%s, "
+            "O_WRONLY) failed: error %d (%s)\n", opts->path, ret,
+            strerror(ret));
+        goto done;
+    }
+    ret = test_file_chunk_setup(&chunk);
+    if (ret)
+        goto done;
+    while (offset < opts->length) {
+        ret = hdfsWrite(ldata->fs, ldata->file, chunk, VECSUM_CHUNK_SIZE);
+        if (ret < 0) {
+            ret = errno;
+            fprintf(stderr, "libhdfs_data_create_file: got error %d (%s) at "
+                    "offset %lld of %s\n", ret, strerror(ret),
+                    offset, opts->path);
+            goto done;
+        } else if (ret < VECSUM_CHUNK_SIZE) {
+            fprintf(stderr, "libhdfs_data_create_file: got short write "
+                    "of %d at offset %lld of %s\n", ret, offset, opts->path);
+            goto done;
+        }
+        offset += VECSUM_CHUNK_SIZE;
+    }
+    ret = 0;
+done:
+    free(chunk);
+    if (ldata->file) {
+        if (hdfsCloseFile(ldata->fs, ldata->file)) {
+            fprintf(stderr, "libhdfs_data_create_file: hdfsCloseFile error.");
+            ret = EIO;
+        }
+        ldata->file = NULL;
+    }
+    return ret;
+}
+
+static struct libhdfs_data *libhdfs_data_create(const struct options *opts)
+{
+    struct libhdfs_data *ldata = NULL;
+    struct hdfsBuilder *builder = NULL;
+    hdfsFileInfo *pinfo = NULL;
+
+    ldata = calloc(1, sizeof(struct libhdfs_data));
+    if (!ldata) {
+        fprintf(stderr, "Failed to allocate libhdfs test data.\n");
+        goto error;
+    }
+    builder = hdfsNewBuilder();
+    if (!builder) {
+        fprintf(stderr, "Failed to create builder.\n");
+        goto error;
+    }
+    hdfsBuilderSetNameNode(builder, opts->rpc_address);
+    hdfsBuilderConfSetStr(builder,
+        "dfs.client.read.shortcircuit.skip.checksum", "true");
+    ldata->fs = hdfsBuilderConnect(builder);
+    if (!ldata->fs) {
+        fprintf(stderr, "Could not connect to default namenode!\n");
+        goto error;
+    }
+    pinfo = hdfsGetPathInfo(ldata->fs, opts->path);
+    if (!pinfo) {
+        int err = errno;
+        fprintf(stderr, "hdfsGetPathInfo(%s) failed: error %d (%s).  "
+                "Attempting to re-create file.\n",
+            opts->path, err, strerror(err));
+        if (libhdfs_data_create_file(ldata, opts))
+            goto error;
+    } else if (pinfo->mSize != opts->length) {
+        fprintf(stderr, "hdfsGetPathInfo(%s) failed: length was %lld, "
+                "but we want length %lld.  Attempting to re-create file.\n",
+                opts->path, (long long)pinfo->mSize, (long long)opts->length);
+        if (libhdfs_data_create_file(ldata, opts))
+            goto error;
+    }
+    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_RDONLY, 0, 0, 0);
+    if (!ldata->file) {
+        int err = errno;
+        fprintf(stderr, "hdfsOpenFile(%s) failed: error %d (%s)\n",
+            opts->path, err, strerror(err));
+        goto error;
+    }
+    ldata->length = opts->length;
+    return ldata;
+
+error:
+    if (pinfo)
+        hdfsFreeFileInfo(pinfo, 1);
+    if (ldata)
+        libhdfs_data_free(ldata);
+    return NULL;
+}
+
+static int check_byte_size(int byte_size, const char *const str)
+{
+    if (byte_size % sizeof(double)) {
+        fprintf(stderr, "%s is not a multiple "
+            "of sizeof(double)\n", str);
+        return EINVAL;
+    }
+    if ((byte_size / sizeof(double)) % DOUBLES_PER_LOOP_ITER) {
+        fprintf(stderr, "The number of doubles contained in "
+            "%s is not a multiple of DOUBLES_PER_LOOP_ITER\n",
+            str);
+        return EINVAL;
+    }
+    return 0;
+}
+
+#ifdef HAVE_INTEL_SSE_INTRINSICS
+
+#include <emmintrin.h>
+
+static double vecsum(const double *buf, int num_doubles)
+{
+    int i;
+    double hi, lo;
+    __m128d x0, x1, x2, x3, x4, x5, x6, x7;
+    __m128d sum0 = _mm_set_pd(0.0,0.0);
+    __m128d sum1 = _mm_set_pd(0.0,0.0);
+    __m128d sum2 = _mm_set_pd(0.0,0.0);
+    __m128d sum3 = _mm_set_pd(0.0,0.0);
+    __m128d sum4 = _mm_set_pd(0.0,0.0);
+    __m128d sum5 = _mm_set_pd(0.0,0.0);
+    __m128d sum6 = _mm_set_pd(0.0,0.0);
+    __m128d sum7 = _mm_set_pd(0.0,0.0);
+    for (i = 0; i < num_doubles; i+=DOUBLES_PER_LOOP_ITER) {
+        x0 = _mm_load_pd(buf + i + 0);
+        x1 = _mm_load_pd(buf + i + 2);
+        x2 = _mm_load_pd(buf + i + 4);
+        x3 = _mm_load_pd(buf + i + 6);
+        x4 = _mm_load_pd(buf + i + 8);
+        x5 = _mm_load_pd(buf + i + 10);
+        x6 = _mm_load_pd(buf + i + 12);
+        x7 = _mm_load_pd(buf + i + 14);
+        sum0 = _mm_add_pd(sum0, x0);
+        sum1 = _mm_add_pd(sum1, x1);
+        sum2 = _mm_add_pd(sum2, x2);
+        sum3 = _mm_add_pd(sum3, x3);
+        sum4 = _mm_add_pd(sum4, x4);
+        sum5 = _mm_add_pd(sum5, x5);
+        sum6 = _mm_add_pd(sum6, x6);
+        sum7 = _mm_add_pd(sum7, x7);
+    }
+    x0 = _mm_add_pd(sum0, sum1);
+    x1 = _mm_add_pd(sum2, sum3);
+    x2 = _mm_add_pd(sum4, sum5);
+    x3 = _mm_add_pd(sum6, sum7);
+    x4 = _mm_add_pd(x0, x1);
+    x5 = _mm_add_pd(x2, x3);
+    x6 = _mm_add_pd(x4, x5);
+    _mm_storeh_pd(&hi, x6);
+    _mm_storel_pd(&lo, x6);
+    return hi + lo;
+}
+
+#else
+
+static double vecsum(const double *buf, int num_doubles)
+{
+    int i;
+    double sum = 0.0;
+    for (i = 0; i < num_doubles; i++) {
+        sum += buf[i];
+    }
+    return sum;
+}
+
+#endif
+
+static int vecsum_zcr_loop(int pass, struct libhdfs_data *ldata,
+        struct hadoopRzOptions *zopts,
+        const struct options *opts)
+{
+    int32_t len;
+    double sum = 0.0;
+    const double *buf;
+    struct hadoopRzBuffer *rzbuf = NULL;
+    int ret;
+
+    while (1) {
+        rzbuf = hadoopReadZero(ldata->file, zopts, ZCR_READ_CHUNK_SIZE);
+        if (!rzbuf) {
+            ret = errno;
+            fprintf(stderr, "hadoopReadZero failed with error "
+                "code %d (%s)\n", ret, strerror(ret));
+            goto done;
+        }
+        buf = hadoopRzBufferGet(rzbuf);
+        if (!buf) break;
+        len = hadoopRzBufferLength(rzbuf);
+        if (len < ZCR_READ_CHUNK_SIZE) {
+            fprintf(stderr, "hadoopReadZero got a partial read "
+                "of length %d\n", len);
+            ret = EINVAL;
+            goto done;
+        }
+        sum += vecsum(buf,
+            ZCR_READ_CHUNK_SIZE / sizeof(double));
+        hadoopRzBufferFree(ldata->file, rzbuf);
+    }
+    printf("finished zcr pass %d.  sum = %g\n", pass, sum);
+    ret = 0;
+
+done:
+    if (rzbuf)
+        hadoopRzBufferFree(ldata->file, rzbuf);
+    return ret;
+}
+
+static int vecsum_zcr(struct libhdfs_data *ldata,
+        const struct options *opts)
+{
+    int ret, pass;
+    struct hadoopRzOptions *zopts = NULL;
+
+    zopts = hadoopRzOptionsAlloc();
+    if (!zopts) {
+        fprintf(stderr, "hadoopRzOptionsAlloc failed.\n");
+        ret = ENOMEM;
+        goto done;
+    }
+    if (hadoopRzOptionsSetSkipChecksum(zopts, 1)) {
+        ret = errno;
+        perror("hadoopRzOptionsSetSkipChecksum failed: ");
+        goto done;
+    }
+    if (hadoopRzOptionsSetByteBufferPool(zopts, NULL)) {
+        ret = errno;
+        perror("hadoopRzOptionsSetByteBufferPool failed: ");
+        goto done;
+    }
+    for (pass = 0; pass < opts->passes; ++pass) {
+        ret = vecsum_zcr_loop(pass, ldata, zopts, opts);
+        if (ret) {
+            fprintf(stderr, "vecsum_zcr_loop pass %d failed "
+                "with error %d\n", pass, ret);
+            goto done;
+        }
+        hdfsSeek(ldata->fs, ldata->file, 0);
+    }
+    ret = 0;
+done:
+    if (zopts)
+        hadoopRzOptionsFree(zopts);
+    return ret;
+}
+
+tSize hdfsReadFully(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
+{
+    uint8_t *buf = buffer;
+    tSize ret, nread = 0;
+
+    while (length > 0) {
+        ret = hdfsRead(fs, f, buf, length);
+        if (ret < 0) {
+            if (errno != EINTR) {
+                return -1;
+            }
+        }
+        if (ret == 0) {
+            break;
+        }
+        nread += ret;
+        length -= ret;
+        buf += ret;
+    }
+    return nread;
+}
+
+static int vecsum_normal_loop(int pass, const struct libhdfs_data *ldata,
+            const struct options *opts)
+{
+    double sum = 0.0;
+
+    while (1) {
+        int res = hdfsReadFully(ldata->fs, ldata->file, ldata->buf,
+                NORMAL_READ_CHUNK_SIZE);
+        if (res == 0) // EOF
+            break;
+        if (res < 0) {
+            int err = errno;
+            fprintf(stderr, "hdfsRead failed with error %d (%s)\n",
+                err, strerror(err));
+            return err;
+        }
+        if (res < NORMAL_READ_CHUNK_SIZE) {
+            fprintf(stderr, "hdfsRead got a partial read of "
+                "length %d\n", res);
+            return EINVAL;
+        }
+        sum += vecsum(ldata->buf,
+                  NORMAL_READ_CHUNK_SIZE / sizeof(double));
+    }
+    printf("finished normal pass %d.  sum = %g\n", pass, sum);
+    return 0;
+}
+
+static int vecsum_libhdfs(struct libhdfs_data *ldata,
+            const struct options *opts)
+{
+    int pass;
+
+    ldata->buf = malloc(NORMAL_READ_CHUNK_SIZE);
+    if (!ldata->buf) {
+        fprintf(stderr, "failed to malloc buffer of size %d\n",
+            NORMAL_READ_CHUNK_SIZE);
+        return ENOMEM;
+    }
+    for (pass = 0; pass < opts->passes; ++pass) {
+        int ret = vecsum_normal_loop(pass, ldata, opts);
+        if (ret) {
+            fprintf(stderr, "vecsum_normal_loop pass %d failed "
+                "with error %d\n", pass, ret);
+            return ret;
+        }
+        hdfsSeek(ldata->fs, ldata->file, 0);
+    }
+    return 0;
+}
+
+static void vecsum_local(struct local_data *cdata, const struct options *opts)
+{
+    int pass;
+
+    for (pass = 0; pass < opts->passes; pass++) {
+        double sum = vecsum(cdata->mmap, cdata->length / sizeof(double));
+        printf("finished vecsum_local pass %d.  sum = %g\n", pass, sum);
+    }
+}
+
+static long long vecsum_length(const struct options *opts,
+                const struct libhdfs_data *ldata)
+{
+    if (opts->ty == VECSUM_LOCAL) {
+        struct stat st_buf = { 0 };
+        if (stat(opts->path, &st_buf)) {
+            int err = errno;
+            fprintf(stderr, "vecsum_length: stat(%s) failed: "
+                "error %d (%s)\n", opts->path, err, strerror(err));
+            return -EIO;
+        }
+        return st_buf.st_size;
+    } else {
+        return ldata->length;
+    }
+}
+
+/*
+ * vecsum is a microbenchmark which measures the speed of various ways of
+ * reading from HDFS.  It creates a file containing floating-point 'doubles',
+ * and computes the sum of all the doubles several times.  For some CPUs,
+ * assembly optimizations are used for the summation (SSE, etc).
+ */
+int main(void)
+{
+    int ret = 1;
+    struct options *opts = NULL;
+    struct local_data *cdata = NULL;
+    struct libhdfs_data *ldata = NULL;
+    struct stopwatch *watch = NULL;
+
+    if (check_byte_size(VECSUM_CHUNK_SIZE, "VECSUM_CHUNK_SIZE") ||
+        check_byte_size(ZCR_READ_CHUNK_SIZE,
+                "ZCR_READ_CHUNK_SIZE") ||
+        check_byte_size(NORMAL_READ_CHUNK_SIZE,
+                "NORMAL_READ_CHUNK_SIZE")) {
+        goto done;
+    }
+    opts = options_create();
+    if (!opts)
+        goto done;
+    if (opts->ty == VECSUM_LOCAL) {
+        cdata = local_data_create(opts);
+        if (!cdata)
+            goto done;
+    } else {
+        ldata = libhdfs_data_create(opts);
+        if (!ldata)
+            goto done;
+    }
+    watch = stopwatch_create();
+    if (!watch)
+        goto done;
+    switch (opts->ty) {
+    case VECSUM_LOCAL:
+        vecsum_local(cdata, opts);
+        ret = 0;
+        break;
+    case VECSUM_LIBHDFS:
+        ret = vecsum_libhdfs(ldata, opts);
+        break;
+    case VECSUM_ZCR:
+        ret = vecsum_zcr(ldata, opts);
+        break;
+    }
+    if (ret) {
+        fprintf(stderr, "vecsum failed with error %d\n", ret);
+        goto done;
+    }
+    ret = 0;
+done:
+    fprintf(stderr, "cleaning up...\n");
+    if (watch && (ret == 0)) {
+        long long length = vecsum_length(opts, ldata);
+        if (length >= 0) {
+            stopwatch_stop(watch, length * opts->passes);
+        }
+    }
+    if (cdata)
+        local_data_free(cdata);
+    if (ldata)
+        libhdfs_data_free(ldata);
+    if (opts)
+        options_free(opts);
+    return ret;
+}
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
new file mode 100644
index 0000000..702430c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
@@ -0,0 +1,360 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs.h"
+#include "native_mini_dfs.h"
+#include "os/thread.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
+#define TLH_MAX_THREADS 100
+
+#define TLH_DEFAULT_BLOCK_SIZE 134217728
+
+static struct NativeMiniDfsCluster* tlhCluster;
+
+struct tlhThreadInfo {
+    /** Thread index */
+    int threadIdx;
+    /** 0 = thread was successful; error code otherwise */
+    int success;
+    /** thread identifier */
+    thread theThread;
+};
+
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
+                                     const char *username)
+{
+    int ret;
+    tPort port;
+    hdfsFS hdfs;
+    struct hdfsBuilder *bld;
+    
+    port = (tPort)nmdGetNameNodePort(cl);
+    if (port < 0) {
+        fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
+                "returned error %d\n", port);
+        return port;
+    }
+    bld = hdfsNewBuilder();
+    if (!bld)
+        return -ENOMEM;
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    hdfsBuilderConfSetStr(bld, "dfs.blocksize",
+                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    if (username) {
+        hdfsBuilderSetUserName(bld, username);
+    }
+    hdfs = hdfsBuilderConnect(bld);
+    if (!hdfs) {
+        ret = -errno;
+        return ret;
+    }
+    *fs = hdfs;
+    return 0;
+}
+
+static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
+{
+    int64_t blockSize;
+    int ret;
+
+    blockSize = hdfsGetDefaultBlockSize(fs);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSize got %"PRId64", but we "
+                "expected %d\n", blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+
+    blockSize = hdfsGetDefaultBlockSizeAtPath(fs, path);
+    if (blockSize < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) failed with "
+                "error %d\n", path, ret);
+        return ret;
+    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
+        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) got "
+                "%"PRId64", but we expected %d\n", 
+                path, blockSize, TLH_DEFAULT_BLOCK_SIZE);
+        return EIO;
+    }
+    return 0;
+}
+
+struct tlhPaths {
+    char prefix[256];
+    char file1[256];
+    char file2[256];
+};
+
+static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths)
+{
+    memset(paths, 0, sizeof(*paths));
+    if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d",
+                 ti->threadIdx) >= sizeof(paths->prefix)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file1, sizeof(paths->file1), "%s/file1",
+                 paths->prefix) >= sizeof(paths->file1)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file2, sizeof(paths->file2), "%s/file2",
+                 paths->prefix) >= sizeof(paths->file2)) {
+        return ENAMETOOLONG;
+    }
+    return 0;
+}
+
+static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
+                                const struct tlhPaths *paths)
+{
+    char tmp[4096];
+    hdfsFile file;
+    int ret, expected, numEntries;
+    hdfsFileInfo *fileInfo;
+    struct hdfsReadStatistics *readStats = NULL;
+
+    if (hdfsExists(fs, paths->prefix) == 0) {
+        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
+    }
+    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));
+
+    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
+
+    /* There should be no entry in the directory. */
+    errno = EACCES; // see if errno is set to 0 on success
+    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 0);
+    if (numEntries != 0) {
+        fprintf(stderr, "hdfsListDirectory set numEntries to "
+                "%d on empty directory.", numEntries);
+    }
+
+    /* There should not be any file to open for reading. */
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));
+
+    /* hdfsOpenFile should not accept mode = 3 */
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));
+
+    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+
+    /* TODO: implement writeFully and use it here */
+    expected = (int)strlen(paths->prefix);
+    ret = hdfsWrite(fs, file, paths->prefix, expected);
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
+                "it wrote %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(hdfsFlush(fs, file));
+    EXPECT_ZERO(hdfsHSync(fs, file));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+
+    /* There should be 1 entry in the directory. */
+    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
+    if (numEntries != 1) {
+        fprintf(stderr, "hdfsListDirectory set numEntries to "
+                "%d on directory containing 1 file.", numEntries);
+    }
+
+    /* Let's re-open the file for reading */
+    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    errno = 0;
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    /* TODO: implement readFully and use it here */
+    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
+                "it read %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    errno = 0;
+    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    EXPECT_ZERO(hdfsFileClearReadStatistics(file));
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
+    EXPECT_UINT64_EQ((uint64_t)0, readStats->totalBytesRead);
+    hdfsFileFreeReadStatistics(readStats);
+    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+
+    // TODO: Non-recursive delete should fail?
+    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
+    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+    EXPECT_ZERO(hdfsFileIsEncrypted(fileInfo));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+
+    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
+    return 0;
+}
+
+static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
+{
+    hdfsFS fs = NULL;
+    struct tlhPaths paths;
+
+    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
+        ti->threadIdx);
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(setupPaths(ti, &paths));
+    // test some operations
+    EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect as user "foo" and verify that we get permission errors
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), EACCES);
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect to do the final delete.
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    return 0;
+}
+
+static void testHdfsOperations(void *v)
+{
+    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
+    int ret = testHdfsOperationsImpl(ti);
+    ti->success = ret;
+}
+
+static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
+{
+    int i, threadsFailed = 0;
+    const char *sep = "";
+
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            threadsFailed = 1;
+        }
+    }
+    if (!threadsFailed) {
+        fprintf(stderr, "testLibHdfs: all threads succeeded.  SUCCESS.\n");
+        return EXIT_SUCCESS;
+    }
+    fprintf(stderr, "testLibHdfs: some threads failed: [");
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            fprintf(stderr, "%s%d", sep, i);
+            sep = ", "; 
+        }
+    }
+    fprintf(stderr, "].  FAILURE.\n");
+    return EXIT_FAILURE;
+}
+
+/**
+ * Test that we can write a file with libhdfs and then read it back
+ */
+int main(void)
+{
+    int i, tlhNumThreads;
+    const char *tlhNumThreadsStr;
+    struct tlhThreadInfo ti[TLH_MAX_THREADS];
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+    };
+
+    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
+    if (!tlhNumThreadsStr) {
+        tlhNumThreadsStr = "3";
+    }
+    tlhNumThreads = atoi(tlhNumThreadsStr);
+    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
+        fprintf(stderr, "testLibHdfs: must have a number of threads "
+                "between 1 and %d inclusive, not %d\n",
+                TLH_MAX_THREADS, tlhNumThreads);
+        return EXIT_FAILURE;
+    }
+    memset(&ti[0], 0, sizeof(ti));
+    for (i = 0; i < tlhNumThreads; i++) {
+        ti[i].threadIdx = i;
+    }
+
+    tlhCluster = nmdCreate(&conf);
+    EXPECT_NONNULL(tlhCluster);
+    EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
+
+    for (i = 0; i < tlhNumThreads; i++) {
+        ti[i].theThread.start = testHdfsOperations;
+        ti[i].theThread.arg = &ti[i];
+        EXPECT_ZERO(threadCreate(&ti[i].theThread));
+    }
+    for (i = 0; i < tlhNumThreads; i++) {
+        EXPECT_ZERO(threadJoin(&ti[i].theThread));
+    }
+
+    EXPECT_ZERO(nmdShutdown(tlhCluster));
+    nmdFree(tlhCluster);
+    return checkFailures(ti, tlhNumThreads);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
new file mode 100644
index 0000000..850b0fc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "native_mini_dfs.h"
+
+#include <errno.h>
+
+static struct NativeMiniDfsConf conf = {
+    1, /* doFormat */
+};
+
+/**
+ * Test that we can create a MiniDFSCluster and shut it down.
+ */
+int main(void) {
+    struct NativeMiniDfsCluster* cl;
+    
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8a3bec8..0c1f935 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -671,6 +671,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9155. OEV should treat .XML files as XML even when the file name
     extension is uppercase (nijel via cmccabe)
 
+    HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 0ca878c..9da6cce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -33,8 +33,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <properties>
     <hadoop.component>hdfs</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
-    <require.fuse>false</require.fuse>
-    <require.libwebhdfs>false</require.libwebhdfs>
   </properties>
 
   <dependencies>
@@ -423,171 +421,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <profiles>
     <profile>
-      <id>native-win</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-        <os>
-          <family>windows</family>
-        </os>
-      </activation>
-      <properties>
-        <runningWithNative>true</runningWithNative>
-      </properties>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-enforcer-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>enforce-os</id>
-                <goals>
-                  <goal>enforce</goal>
-                </goals>
-                <configuration>
-                  <rules>
-                    <requireOS>
-                      <family>windows</family>
-                      <message>native-win build only supported on Windows</message>
-                    </requireOS>
-                  </rules>
-                  <fail>true</fail>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>make</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <condition property="generator" value="Visual Studio 10" else="Visual Studio 10 Win64">
-                      <equals arg1="Win32" arg2="${env.PLATFORM}" />
-                    </condition>
-                    <mkdir dir="${project.build.directory}/native"/>
-                    <exec executable="cmake" dir="${project.build.directory}/native"
-                        failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G '${generator}'"/>
-                    </exec>
-                    <exec executable="msbuild" dir="${project.build.directory}/native"
-                        failonerror="true">
-                      <arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=RelWithDebInfo /p:LinkIncremental=false"/>
-                    </exec>
-                    <!-- Copy for inclusion in distribution. -->
-                    <copy todir="${project.build.directory}/bin">
-                      <fileset dir="${project.build.directory}/native/target/bin/RelWithDebInfo"/>
-                    </copy>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>native_tests</id>
-                <phase>test</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <skip>${skipTests}</skip>
-                  <target>
-                    <property name="compile_classpath" refid="maven.compile.classpath"/>
-                    <property name="test_classpath" refid="maven.test.classpath"/>
-                    <macrodef name="run-test">
-                      <attribute name="test"/>
-                      <sequential>
-                        <echo message="Running @{test}"/>
-                        <exec executable="${project.build.directory}/native/RelWithDebInfo/@{test}" failonerror="true" dir="${project.build.directory}/native/">
-                          <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
-                          <!-- HADOOP_HOME required to find winutils. -->
-                          <env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
-                          <!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
-                          <env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
-                        </exec>
-                        <echo message="Finished @{test}"/>
-                      </sequential>
-                    </macrodef>
-                    <run-test test="test_libhdfs_threaded"/>
-                    <echo message="Skipping test_libhdfs_zerocopy"/>
-                    <run-test test="test_native_mini_dfs"/>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>native</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <properties>
-        <runningWithNative>true</runningWithNative>
-      </properties>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>make</id>
-                <phase>compile</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}/native"/>
-                    <exec executable="cmake" dir="${project.build.directory}/native" 
-                        failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse}"/>
-                    </exec>
-                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
-                      <arg line="VERBOSE=1"/>
-                    </exec>
-                    <!-- The second make is a workaround for HADOOP-9215.  It can
-                         be removed when version 2.6 of cmake is no longer supported . -->
-                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>native_tests</id>
-                <phase>test</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <skip>${skipTests}</skip>
-                  <target>
-                    <property name="compile_classpath" refid="maven.compile.classpath"/>
-                    <property name="test_classpath" refid="maven.test.classpath"/>
-                    <macrodef name="run-test">
-                      <attribute name="test"/>
-                      <sequential>
-                        <echo message="Running @{test}"/>
-                        <exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
-                          <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
-                          <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
-                          <env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
-                        </exec>
-                        <echo message="Finished @{test}"/>
-                      </sequential>
-                    </macrodef>
-                    <run-test test="test_libhdfs_threaded"/>
-                    <run-test test="test_libhdfs_zerocopy"/>
-                    <run-test test="test_native_mini_dfs"/>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
       <id>parallel-tests</id>
       <build>
         <plugins>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
deleted file mode 100644
index 2f8620b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ /dev/null
@@ -1,206 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
-
-list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common)
-include(HadoopCommon)
-
-#
-# Main configuration
-#
-
-# The caller must specify where the generated headers have been placed.
-if(NOT GENERATED_JAVAH)
-    message(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
-endif()
-
-# Check to see if our compiler and linker support the __thread attribute.
-# On Linux and some other operating systems, this is a more efficient
-# alternative to POSIX thread local storage.
-include(CheckCSourceCompiles)
-check_c_source_compiles("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
-
-# Check to see if we have Intel SSE intrinsics.
-check_c_source_compileS("#include <emmintrin.h>\nint main(void) { __m128d sum0 = _mm_set_pd(0.0,0.0); return 0; }" HAVE_INTEL_SSE_INTRINSICS)
-
-# Check if we need to link dl library to get dlopen.
-# dlopen on Linux is in separate library but on FreeBSD its in libc
-include(CheckLibraryExists)
-check_library_exists(dl dlopen "" NEED_LINK_DL)
-
-if(WIN32)
-    # Set the optimizer level.
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
-    # Set warning level 4.
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
-    # Skip "unreferenced formal parameter".
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
-    # Skip "conditional expression is constant".
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
-    # Skip deprecated POSIX function warnings.
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
-    # Skip CRT non-secure function warnings.  If we can convert usage of
-    # strerror, getenv and ctime to their secure CRT equivalents, then we can
-    # re-enable the CRT non-secure function warnings.
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
-    # Omit unneeded headers.
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
-    set(OS_DIR main/native/libhdfs/os/windows)
-    set(OUT_DIR target/bin)
-else()
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
-    set(OS_DIR main/native/libhdfs/os/posix)
-    set(OUT_DIR target/usr/local/lib)
-endif()
-
-# Configure JNI.
-include(HadoopJNI)
-
-add_definitions(-DLIBHDFS_DLL_EXPORT)
-
-include_directories(
-    ${GENERATED_JAVAH}
-    ${CMAKE_CURRENT_SOURCE_DIR}
-    ${CMAKE_BINARY_DIR}
-    ${JNI_INCLUDE_DIRS}
-    main/native
-    main/native/libhdfs
-    ${OS_DIR}
-)
-
-set(_FUSE_DFS_VERSION 0.1.0)
-configure_file(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
-
-hadoop_add_dual_library(hdfs
-    main/native/libhdfs/exception.c
-    main/native/libhdfs/jni_helper.c
-    main/native/libhdfs/hdfs.c
-    main/native/libhdfs/common/htable.c
-    ${OS_DIR}/mutexes.c
-    ${OS_DIR}/thread_local_storage.c
-)
-if(NEED_LINK_DL)
-   set(LIB_DL dl)
-endif()
-
-hadoop_target_link_dual_libraries(hdfs
-    ${JAVA_JVM_LIBRARY}
-    ${LIB_DL}
-    ${OS_LINK_LIBRARIES}
-)
-
-hadoop_dual_output_directory(hdfs ${OUT_DIR})
-set(LIBHDFS_VERSION "0.0.0")
-set_target_properties(hdfs PROPERTIES
-    SOVERSION ${LIBHDFS_VERSION})
-
-add_executable(test_libhdfs_ops
-    main/native/libhdfs/test/test_libhdfs_ops.c
-)
-target_link_libraries(test_libhdfs_ops
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_executable(test_libhdfs_read
-    main/native/libhdfs/test/test_libhdfs_read.c
-)
-target_link_libraries(test_libhdfs_read
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_executable(test_libhdfs_write
-    main/native/libhdfs/test/test_libhdfs_write.c
-)
-target_link_libraries(test_libhdfs_write
-    hdfs_static
-    ${JAVA_JVM_LIBRARY}
-)
-
-add_library(native_mini_dfs
-    main/native/libhdfs/native_mini_dfs.c
-    main/native/libhdfs/common/htable.c
-    main/native/libhdfs/exception.c
-    main/native/libhdfs/jni_helper.c
-    ${OS_DIR}/mutexes.c
-    ${OS_DIR}/thread_local_storage.c
-)
-target_link_libraries(native_mini_dfs
-    ${JAVA_JVM_LIBRARY}
-    ${LIB_DL}
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_native_mini_dfs
-    main/native/libhdfs/test_native_mini_dfs.c
-)
-target_link_libraries(test_native_mini_dfs
-    native_mini_dfs
-)
-
-add_executable(test_libhdfs_threaded
-    main/native/libhdfs/expect.c
-    main/native/libhdfs/test_libhdfs_threaded.c
-    ${OS_DIR}/thread.c
-)
-target_link_libraries(test_libhdfs_threaded
-    hdfs_static
-    native_mini_dfs
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_libhdfs_zerocopy
-    main/native/libhdfs/expect.c
-    main/native/libhdfs/test/test_libhdfs_zerocopy.c
-)
-target_link_libraries(test_libhdfs_zerocopy
-    hdfs_static
-    native_mini_dfs
-    ${OS_LINK_LIBRARIES}
-)
-
-add_executable(test_htable
-    main/native/libhdfs/common/htable.c
-    main/native/libhdfs/test/test_htable.c
-)
-target_link_libraries(test_htable
-    ${OS_LINK_LIBRARIES}
-)
-
-# Skip vecsum on Windows.  This could be made to work in the future by
-# introducing an abstraction layer over the sys/mman.h functions.
-if(NOT WIN32)
-    add_executable(test_libhdfs_vecsum main/native/libhdfs/test/vecsum.c)
-    if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
-        target_link_libraries(test_libhdfs_vecsum
-            hdfs
-            pthread)
-    else()
-        target_link_libraries(test_libhdfs_vecsum
-            hdfs
-            pthread
-            rt)
-    endif()
-endif()
-
-if(REQUIRE_LIBWEBHDFS)
-    add_subdirectory(contrib/libwebhdfs)
-endif()
-add_subdirectory(main/native/fuse-dfs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
deleted file mode 100644
index 0d11fc4..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-#ifndef CONFIG_H
-#define CONFIG_H
-
-#cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
-
-#cmakedefine HAVE_BETTER_TLS
-
-#cmakedefine HAVE_INTEL_SSE_INTRINSICS
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
deleted file mode 100644
index dc74feb..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-find_package(CURL REQUIRED)
-
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
-    "${CMAKE_SOURCE_DIR}/contrib/libwebhdfs/resources/")
-
-find_package(Jansson REQUIRED)
-include_directories(${JANSSON_INCLUDE_DIR})
-
-hadoop_add_dual_library(webhdfs
-    src/hdfs_web.c
-    src/hdfs_http_client.c
-    src/hdfs_http_query.c
-    src/hdfs_json_parser.c
-    ../../main/native/libhdfs/exception.c
-    ../../main/native/libhdfs/jni_helper.c
-    ../../${OS_DIR}/mutexes.c
-    ../../${OS_DIR}/thread_local_storage.c
-    ../../main/native/libhdfs/common/htable.c
-)
-hadoop_target_link_dual_libraries(webhdfs
-    ${JAVA_JVM_LIBRARY}
-    ${CURL_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
-)
-hadoop_dual_output_directory(webhdfs target)
-set(LIBWEBHDFS_VERSION "0.0.0")
-set_target_properties(webhdfs PROPERTIES
-    SOVERSION ${LIBWEBHDFS_VERSION})
-
-add_executable(test_libwebhdfs_ops
-    src/test_libwebhdfs_ops.c
-)
-target_link_libraries(test_libwebhdfs_ops
-    webhdfs
-    native_mini_dfs
-)
-
-add_executable(test_libwebhdfs_read
-    src/test_libwebhdfs_read.c
-)
-target_link_libraries(test_libwebhdfs_read
-    webhdfs
-)
-
-add_executable(test_libwebhdfs_write
-    src/test_libwebhdfs_write.c
-)
-target_link_libraries(test_libwebhdfs_write
-    webhdfs
-)
-
-add_executable(test_libwebhdfs_threaded
-    src/test_libwebhdfs_threaded.c
-)
-target_link_libraries(test_libwebhdfs_threaded
-    webhdfs
-    native_mini_dfs
-    pthread
-)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/resources/FindJansson.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/resources/FindJansson.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/resources/FindJansson.cmake
deleted file mode 100644
index b8c67ea..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/resources/FindJansson.cmake
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-# - Try to find Jansson
-# Once done this will define
-#  JANSSON_FOUND - System has Jansson
-#  JANSSON_INCLUDE_DIRS - The Jansson include directories
-#  JANSSON_LIBRARIES - The libraries needed to use Jansson
-#  JANSSON_DEFINITIONS - Compiler switches required for using Jansson
-
-find_path(JANSSON_INCLUDE_DIR jansson.h
-          /usr/include
-          /usr/include/jansson
-          /usr/local/include )
-
-find_library(JANSSON_LIBRARY NAMES jansson
-             PATHS /usr/lib /usr/local/lib )
-
-set(JANSSON_LIBRARIES ${JANSSON_LIBRARY} )
-set(JANSSON_INCLUDE_DIRS ${JANSSON_INCLUDE_DIR} )
-
-include(FindPackageHandleStandardArgs)
-# handle the QUIETLY and REQUIRED arguments and set JANSSON_FOUND to TRUE
-# if all listed variables are TRUE
-find_package_handle_standard_args(Jansson  DEFAULT_MSG
-                                  JANSSON_LIBRARY JANSSON_INCLUDE_DIR)
-
-mark_as_advanced(JANSSON_INCLUDE_DIR JANSSON_LIBRARY )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c
deleted file mode 100644
index dc5ca41..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <curl/curl.h>
-
-#include "hdfs_http_client.h"
-#include "exception.h"
-
-static pthread_mutex_t curlInitMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int curlGlobalInited = 0;
-
-const char *hdfs_strerror(int errnoval)
-{
-#if defined(__sun)
-// MT-Safe under Solaris which doesn't support sys_errlist/sys_nerr
-  return strerror(errnoval);
-#else
-  if ((errnoval < 0) || (errnoval >= sys_nerr)) {
-    return "unknown error.";
-  }
-  return sys_errlist[errnoval];
-#endif
-}
-
-int initResponseBuffer(struct ResponseBuffer **buffer)
-{
-    struct ResponseBuffer *info = NULL;
-    int ret = 0;
-    info = calloc(1, sizeof(struct ResponseBuffer));
-    if (!info) {
-        ret = ENOMEM;
-    }
-    *buffer = info;
-    return ret;
-}
-
-void freeResponseBuffer(struct ResponseBuffer *buffer)
-{
-    if (buffer) {
-        if (buffer->content) {
-            free(buffer->content);
-        }
-        free(buffer);
-        buffer = NULL;
-    }
-}
-
-void freeResponse(struct Response *resp)
-{
-    if (resp) {
-        freeResponseBuffer(resp->body);
-        freeResponseBuffer(resp->header);
-        free(resp);
-        resp = NULL;
-    }
-}
-
-/** 
- * Callback used by libcurl for allocating local buffer and 
- * reading data to local buffer
- */
-static size_t writefunc(void *ptr, size_t size,
-                        size_t nmemb, struct ResponseBuffer *rbuffer)
-{
-    void *temp = NULL;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    if (!rbuffer) {
-        fprintf(stderr,
-                "ERROR: ResponseBuffer is NULL for the callback writefunc.\n");
-        return 0;
-    }
-    
-    if (rbuffer->remaining < size * nmemb) {
-        temp = realloc(rbuffer->content, rbuffer->offset + size * nmemb + 1);
-        if (temp == NULL) {
-            fprintf(stderr, "ERROR: fail to realloc in callback writefunc.\n");
-            return 0;
-        }
-        rbuffer->content = temp;
-        rbuffer->remaining = size * nmemb;
-    }
-    memcpy(rbuffer->content + rbuffer->offset, ptr, size * nmemb);
-    rbuffer->offset += size * nmemb;
-    (rbuffer->content)[rbuffer->offset] = '\0';
-    rbuffer->remaining -= size * nmemb;
-    return size * nmemb;
-}
-
-/**
- * Callback used by libcurl for reading data into buffer provided by user,
- * thus no need to reallocate buffer.
- */
-static size_t writeFuncWithUserBuffer(void *ptr, size_t size,
-                                   size_t nmemb, struct ResponseBuffer *rbuffer)
-{
-    size_t toCopy = 0;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    if (!rbuffer || !rbuffer->content) {
-        fprintf(stderr,
-                "ERROR: buffer to read is NULL for the "
-                "callback writeFuncWithUserBuffer.\n");
-        return 0;
-    }
-    
-    toCopy = rbuffer->remaining < (size * nmemb) ?
-                            rbuffer->remaining : (size * nmemb);
-    memcpy(rbuffer->content + rbuffer->offset, ptr, toCopy);
-    rbuffer->offset += toCopy;
-    rbuffer->remaining -= toCopy;
-    return toCopy;
-}
-
-/**
- * Callback used by libcurl for writing data to remote peer
- */
-static size_t readfunc(void *ptr, size_t size, size_t nmemb, void *stream)
-{
-    struct webhdfsBuffer *wbuffer = NULL;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    
-    wbuffer = stream;
-    pthread_mutex_lock(&wbuffer->writeMutex);
-    while (wbuffer->remaining == 0) {
-        /*
-         * The current remainning bytes to write is 0,
-         * check closeFlag to see whether need to finish the transfer.
-         * if yes, return 0; else, wait
-         */
-        if (wbuffer->closeFlag) { // We can close the transfer now
-            //For debug
-            fprintf(stderr, "CloseFlag is set, ready to close the transfer\n");
-            pthread_mutex_unlock(&wbuffer->writeMutex);
-            return 0;
-        } else {
-            // remaining == 0 but closeFlag is not set
-            // indicates that user's buffer has been transferred
-            pthread_cond_signal(&wbuffer->transfer_finish);
-            pthread_cond_wait(&wbuffer->newwrite_or_close,
-                                    &wbuffer->writeMutex);
-        }
-    }
-    
-    if (wbuffer->remaining > 0 && !wbuffer->closeFlag) {
-        size_t copySize = wbuffer->remaining < size * nmemb ?
-                                wbuffer->remaining : size * nmemb;
-        memcpy(ptr, wbuffer->wbuffer + wbuffer->offset, copySize);
-        wbuffer->offset += copySize;
-        wbuffer->remaining -= copySize;
-        pthread_mutex_unlock(&wbuffer->writeMutex);
-        return copySize;
-    } else {
-        fprintf(stderr, "ERROR: webhdfsBuffer's remaining is %ld, "
-                "it should be a positive value!\n", wbuffer->remaining);
-        pthread_mutex_unlock(&wbuffer->writeMutex);
-        return 0;
-    }
-}
-
-/**
- * Initialize the global libcurl environment
- */
-static void initCurlGlobal()
-{
-    if (!curlGlobalInited) {
-        pthread_mutex_lock(&curlInitMutex);
-        if (!curlGlobalInited) {
-            curl_global_init(CURL_GLOBAL_ALL);
-            curlGlobalInited = 1;
-        }
-        pthread_mutex_unlock(&curlInitMutex);
-    }
-}
-
-/**
- * Launch simple commands (commands without file I/O) and return response
- *
- * @param url       Target URL
- * @param method    HTTP method (GET/PUT/POST)
- * @param followloc Whether or not need to set CURLOPT_FOLLOWLOCATION
- * @param response  Response from remote service
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchCmd(const char *url, enum HttpHeader method,
-                     enum Redirect followloc, struct Response **response)
-{
-    CURL *curl = NULL;
-    CURLcode curlCode;
-    int ret = 0;
-    struct Response *resp = NULL;
-    
-    resp = calloc(1, sizeof(struct Response));
-    if (!resp) {
-        return ENOMEM;
-    }
-    ret = initResponseBuffer(&(resp->body));
-    if (ret) {
-        goto done;
-    }
-    ret = initResponseBuffer(&(resp->header));
-    if (ret) {
-        goto done;
-    }
-    initCurlGlobal();
-    curl = curl_easy_init();
-    if (!curl) {
-        ret = ENOMEM;       // curl_easy_init does not return error code,
-                            // and most of its errors are caused by malloc()
-        fprintf(stderr, "ERROR in curl_easy_init.\n");
-        goto done;
-    }
-    /* Set callback function for reading data from remote service */
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    switch(method) {
-        case GET:
-            break;
-        case PUT:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
-            break;
-        case POST:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
-            break;
-        case DELETE:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE");
-            break;
-        default:
-            ret = EINVAL;
-            fprintf(stderr, "ERROR: Invalid HTTP method\n");
-            goto done;
-    }
-    if (followloc == YES) {
-        curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-    }
-    /* Now run the curl handler */
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-done:
-    if (curl != NULL) {
-        curl_easy_cleanup(curl);
-    }
-    if (ret) {
-        free(resp);
-        resp = NULL;
-    }
-    *response = resp;
-    return ret;
-}
-
-/**
- * Launch the read request. The request is sent to the NameNode and then 
- * redirected to corresponding DataNode
- *
- * @param url   The URL for the read request
- * @param resp  The response containing the buffer provided by user
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchReadInternal(const char *url, struct Response* resp)
-{
-    CURL *curl;
-    CURLcode curlCode;
-    int ret = 0;
-    
-    if (!resp || !resp->body || !resp->body->content) {
-        fprintf(stderr,
-                "ERROR: invalid user-provided buffer!\n");
-        return EINVAL;
-    }
-    
-    initCurlGlobal();
-    /* get a curl handle */
-    curl = curl_easy_init();
-    if (!curl) {
-        fprintf(stderr, "ERROR in curl_easy_init.\n");
-        return ENOMEM;
-    }
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeFuncWithUserBuffer);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-    
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK && curlCode != CURLE_PARTIAL_FILE) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-    
-    curl_easy_cleanup(curl);
-    return ret;
-}
-
-/**
- * The function does the write operation by connecting to a DataNode. 
- * The function keeps the connection with the DataNode until 
- * the closeFlag is set. Whenever the current data has been sent out, 
- * the function blocks waiting for further input from user or close.
- *
- * @param url           URL of the remote DataNode
- * @param method        PUT for create and POST for append
- * @param uploadBuffer  Buffer storing user's data to write
- * @param response      Response from remote service
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchWrite(const char *url, enum HttpHeader method,
-                       struct webhdfsBuffer *uploadBuffer,
-                       struct Response **response)
-{
-    CURLcode curlCode;
-    struct Response* resp = NULL;
-    struct curl_slist *chunk = NULL;
-    CURL *curl = NULL;
-    int ret = 0;
-    
-    if (!uploadBuffer) {
-        fprintf(stderr, "ERROR: upload buffer is NULL!\n");
-        return EINVAL;
-    }
-    
-    initCurlGlobal();
-    resp = calloc(1, sizeof(struct Response));
-    if (!resp) {
-        return ENOMEM;
-    }
-    ret = initResponseBuffer(&(resp->body));
-    if (ret) {
-        goto done;
-    }
-    ret = initResponseBuffer(&(resp->header));
-    if (ret) {
-        goto done;
-    }
-    
-    // Connect to the datanode in order to create the lease in the namenode
-    curl = curl_easy_init();
-    if (!curl) {
-        fprintf(stderr, "ERROR: failed to initialize the curl handle.\n");
-        return ENOMEM;
-    }
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_READFUNCTION, readfunc);
-    curl_easy_setopt(curl, CURLOPT_READDATA, uploadBuffer);
-    curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);
-    
-    chunk = curl_slist_append(chunk, "Transfer-Encoding: chunked");
-    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-    chunk = curl_slist_append(chunk, "Expect:");
-    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-    
-    switch(method) {
-        case PUT:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
-            break;
-        case POST:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
-            break;
-        default:
-            ret = EINVAL;
-            fprintf(stderr, "ERROR: Invalid HTTP method\n");
-            goto done;
-    }
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-    
-done:
-    if (chunk != NULL) {
-        curl_slist_free_all(chunk);
-    }
-    if (curl != NULL) {
-        curl_easy_cleanup(curl);
-    }
-    if (ret) {
-        free(resp);
-        resp = NULL;
-    }
-    *response = resp;
-    return ret;
-}
-
-int launchMKDIR(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchRENAME(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchGFS(const char *url, struct Response **resp)
-{
-    return launchCmd(url, GET, NO, resp);
-}
-
-int launchLS(const char *url, struct Response **resp)
-{
-    return launchCmd(url, GET, NO, resp);
-}
-
-int launchCHMOD(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchCHOWN(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchDELETE(const char *url, struct Response **resp)
-{
-    return launchCmd(url, DELETE, NO, resp);
-}
-
-int launchOPEN(const char *url, struct Response* resp)
-{
-    return launchReadInternal(url, resp);
-}
-
-int launchUTIMES(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchNnWRITE(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchNnAPPEND(const char *url, struct Response **resp)
-{
-    return launchCmd(url, POST, NO, resp);
-}
-
-int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
-                               struct Response **resp)
-{
-    return launchWrite(url, PUT, buffer, resp);
-}
-
-int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
-                                struct Response **resp)
-{
-    return launchWrite(url, POST, buffer, resp);
-}
-
-int launchSETREPLICATION(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/960b19ed/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
deleted file mode 100644
index 8d1c3db..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-#ifndef _HDFS_HTTP_CLIENT_H_
-#define _HDFS_HTTP_CLIENT_H_
-
-#include "hdfs.h" /* for tSize */
-
-#include <pthread.h> /* for pthread_t */
-#include <unistd.h> /* for size_t */
-
-/** enum indicating the type of hdfs stream */
-enum hdfsStreamType
-{
-    UNINITIALIZED = 0,
-    INPUT = 1,
-    OUTPUT = 2,
-};
-
-/**
- * webhdfsBuffer - used for hold the data for read/write from/to http connection
- */
-struct webhdfsBuffer {
-    const char *wbuffer;  /* The user's buffer for uploading */
-    size_t remaining;     /* Length of content */
-    size_t offset;        /* offset for reading */
-    /* Check whether the hdfsOpenFile has been called before */
-    int openFlag;
-    /* Whether to close the http connection for writing */
-    int closeFlag;
-    /* Synchronization between the curl and hdfsWrite threads */
-    pthread_mutex_t writeMutex;
-    /* 
-     * Transferring thread waits for this condition
-     * when there is no more content for transferring in the buffer
-     */
-    pthread_cond_t newwrite_or_close;
-    /* Condition used to indicate finishing transferring (one buffer) */
-    pthread_cond_t transfer_finish;
-};
-
-/** File handle for webhdfs */
-struct webhdfsFileHandle {
-    char *absPath;        /* Absolute path of file */
-    int bufferSize;       /* Size of buffer */
-    short replication;    /* Number of replication */
-    tSize blockSize;      /* Block size */
-    char *datanode;       /* URL of the DataNode */
-    /* webhdfsBuffer handle used to store the upload data */
-    struct webhdfsBuffer *uploadBuffer;
-    /* The thread used for data transferring */
-    pthread_t connThread;
-};
-
-/** Type of http header */
-enum HttpHeader {
-    GET,
-    PUT,
-    POST,
-    DELETE
-};
-
-/** Whether to redirect */
-enum Redirect {
-    YES,
-    NO
-};
-
-/** Buffer used for holding response */
-struct ResponseBuffer {
-    char *content;
-    size_t remaining;
-    size_t offset;
-};
-
-/**
- * The response got through webhdfs
- */
-struct Response {
-    struct ResponseBuffer *body;
-    struct ResponseBuffer *header;
-};
-
-/**
- * Create and initialize a ResponseBuffer
- *
- * @param buffer Pointer pointing to new created ResponseBuffer handle
- * @return 0 for success, non-zero value to indicate error
- */
-int initResponseBuffer(struct ResponseBuffer **buffer) __attribute__ ((warn_unused_result));
-
-/**
- * Free the given ResponseBuffer
- *
- * @param buffer The ResponseBuffer to free
- */
-void freeResponseBuffer(struct ResponseBuffer *buffer);
-
-/**
- * Free the given Response
- *
- * @param resp The Response to free
- */
-void freeResponse(struct Response *resp);
-
-/**
- * Send the MKDIR request to NameNode using the given URL. 
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for MKDIR operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchMKDIR(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the RENAME request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for RENAME operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchRENAME(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the CHMOD request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for CHMOD operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchCHMOD(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the GetFileStatus request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for GetFileStatus operation
- * @param response Response handle to store response returned from the NameNode,
- *                 containing either file status or exception information
- * @return 0 for success, non-zero value to indicate error
- */
-int launchGFS(const char *url,
-              struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the LS (LISTSTATUS) request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for LISTSTATUS operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchLS(const char *url,
-             struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the DELETE request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for DELETE operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDELETE(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the CHOWN request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for CHOWN operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchCHOWN(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the OPEN request to NameNode using the given URL, 
- * asking for reading a file (within a range). 
- * The NameNode first redirects the request to the datanode
- * that holds the corresponding first block of the file (within a range),
- * and the datanode returns the content of the file through the HTTP connection.
- *
- * @param url The URL for OPEN operation
- * @param resp The response holding user's buffer. 
-               The file content will be written into the buffer.
- * @return 0 for success, non-zero value to indicate error
- */
-int launchOPEN(const char *url,
-               struct Response* resp) __attribute__ ((warn_unused_result));
-
-/**
- * Send the SETTIMES request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for SETTIMES operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchUTIMES(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE/CREATE request to NameNode using the given URL.
- * The NameNode will choose the writing target datanodes 
- * and return the first datanode in the pipeline as response
- *
- * @param url The URL for WRITE/CREATE operation connecting to NameNode
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchNnWRITE(const char *url,
-                  struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE request along with to-write content to 
- * the corresponding DataNode using the given URL. 
- * The DataNode will write the data and return the response.
- *
- * @param url The URL for WRITE operation connecting to DataNode
- * @param buffer The webhdfsBuffer containing data to be written to hdfs
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
-                  struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE (APPEND) request to NameNode using the given URL. 
- * The NameNode determines the DataNode for appending and 
- * sends its URL back as response.
- *
- * @param url The URL for APPEND operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchNnAPPEND(const char *url, struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the SETREPLICATION request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for SETREPLICATION operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchSETREPLICATION(const char *url,
-                         struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the APPEND request along with the content to DataNode.
- * The DataNode will do the appending and return the result as response.
- *
- * @param url The URL for APPEND operation connecting to DataNode
- * @param buffer The webhdfsBuffer containing data to be appended
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
-                   struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Thread-safe strerror alternative.
- *
- * @param errnoval  The error code value
- * @return          The error message string mapped to the given error code
- */
-const char *hdfs_strerror(int errnoval);
-
-#endif //_HDFS_HTTP_CLIENT_H_


Mime
View raw message