hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [14/19] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai.
Date Wed, 07 Oct 2015 07:16:22 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
new file mode 100644
index 0000000..50c89ea
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
@@ -0,0 +1,287 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+struct htable_pair {
+    void *key;
+    void *val;
+};
+
+/**
+ * A hash table which uses linear probing.
+ */
+struct htable {
+    uint32_t capacity;
+    uint32_t used;
+    htable_hash_fn_t hash_fun;
+    htable_eq_fn_t eq_fun;
+    struct htable_pair *elem;
+};
+
+/**
+ * An internal function for inserting a value into the hash table.
+ *
+ * Note: this function assumes that you have made enough space in the table.
+ *
+ * @param nelem         The new element to insert.
+ * @param capacity      The capacity of the hash table.
+ * @param hash_fun      The hash function to use.
+ * @param key           The key to insert.
+ * @param val           The value to insert.
+ */
+static void htable_insert_internal(struct htable_pair *nelem, 
+        uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
+        void *val)
+{
+    uint32_t i;
+
+    i = hash_fun(key, capacity);
+    while (1) {
+        if (!nelem[i].key) {
+            nelem[i].key = key;
+            nelem[i].val = val;
+            return;
+        }
+        i++;
+        if (i == capacity) {
+            i = 0;
+        }
+    }
+}
+
+static int htable_realloc(struct htable *htable, uint32_t new_capacity)
+{
+    struct htable_pair *nelem;
+    uint32_t i, old_capacity = htable->capacity;
+    htable_hash_fn_t hash_fun = htable->hash_fun;
+
+    nelem = calloc(new_capacity, sizeof(struct htable_pair));
+    if (!nelem) {
+        return ENOMEM;
+    }
+    for (i = 0; i < old_capacity; i++) {
+        struct htable_pair *pair = htable->elem + i;
+        if (pair->key) {
+            htable_insert_internal(nelem, new_capacity, hash_fun,
+                                   pair->key, pair->val);
+        }
+    }
+    free(htable->elem);
+    htable->elem = nelem;
+    htable->capacity = new_capacity;
+    return 0;
+}
+
+static uint32_t round_up_to_power_of_2(uint32_t i)
+{
+    if (i == 0) {
+        return 1;
+    }
+    i--;
+    i |= i >> 1;
+    i |= i >> 2;
+    i |= i >> 4;
+    i |= i >> 8;
+    i |= i >> 16;
+    i++;
+    return i;
+}
+
+struct htable *htable_alloc(uint32_t size,
+                htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
+{
+    struct htable *htable;
+
+    htable = calloc(1, sizeof(*htable));
+    if (!htable) {
+        return NULL;
+    }
+    size = round_up_to_power_of_2(size);
+    if (size < HTABLE_MIN_SIZE) {
+        size = HTABLE_MIN_SIZE;
+    }
+    htable->hash_fun = hash_fun;
+    htable->eq_fun = eq_fun;
+    htable->used = 0;
+    if (htable_realloc(htable, size)) {
+        free(htable);
+        return NULL;
+    }
+    return htable;
+}
+
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
+{
+    uint32_t i;
+
+    for (i = 0; i != htable->capacity; ++i) {
+        struct htable_pair *elem = htable->elem + i;
+        if (elem->key) {
+            fun(ctx, elem->key, elem->val);
+        }
+    }
+}
+
+void htable_free(struct htable *htable)
+{
+    if (htable) {
+        free(htable->elem);
+        free(htable);
+    }
+}
+
+int htable_put(struct htable *htable, void *key, void *val)
+{
+    int ret;
+    uint32_t nused;
+
+    // NULL is not a valid key value.
+    // This helps us implement htable_get_internal efficiently, since we know
+    // that we can stop when we encounter the first NULL key.
+    if (!key) {
+        return EINVAL;
+    }
+    // NULL is not a valid value.  Otherwise the results of htable_get would
+    // be confusing (does a NULL return mean entry not found, or that the
+    // entry was found and was NULL?) 
+    if (!val) {
+        return EINVAL;
+    }
+    // Re-hash if we have used more than half of the hash table
+    nused = htable->used + 1;
+    if (nused >= (htable->capacity / 2)) {
+        ret = htable_realloc(htable, htable->capacity * 2);
+        if (ret)
+            return ret;
+    }
+    htable_insert_internal(htable->elem, htable->capacity,
+                                htable->hash_fun, key, val);
+    htable->used++;
+    return 0;
+}
+
+static int htable_get_internal(const struct htable *htable,
+                               const void *key, uint32_t *out)
+{
+    uint32_t start_idx, idx;
+
+    start_idx = htable->hash_fun(key, htable->capacity);
+    idx = start_idx;
+    while (1) {
+        struct htable_pair *pair = htable->elem + idx;
+        if (!pair->key) {
+            // We always maintain the invariant that the entries corresponding
+            // to a given key are stored in a contiguous block, not separated
+            // by any NULLs.  So if we encounter a NULL, our search is over.
+            return ENOENT;
+        } else if (htable->eq_fun(pair->key, key)) {
+            *out = idx;
+            return 0;
+        }
+        idx++;
+        if (idx == htable->capacity) {
+            idx = 0;
+        }
+        if (idx == start_idx) {
+            return ENOENT;
+        }
+    }
+}
+
+void *htable_get(const struct htable *htable, const void *key)
+{
+    uint32_t idx;
+
+    if (htable_get_internal(htable, key, &idx)) {
+        return NULL;
+    }
+    return htable->elem[idx].val;
+}
+
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val)
+{
+    uint32_t hole, i;
+    const void *nkey;
+
+    if (htable_get_internal(htable, key, &hole)) {
+        *found_key = NULL;
+        *found_val = NULL;
+        return;
+    }
+    i = hole;
+    htable->used--;
+    // We need to maintain the compactness invariant used in
+    // htable_get_internal.  This invariant specifies that the entries for any
+    // given key are never separated by NULLs (although they may be separated
+    // by entries for other keys.)
+    while (1) {
+        i++;
+        if (i == htable->capacity) {
+            i = 0;
+        }
+        nkey = htable->elem[i].key;
+        if (!nkey) {
+            *found_key = htable->elem[hole].key;
+            *found_val = htable->elem[hole].val;
+            htable->elem[hole].key = NULL;
+            htable->elem[hole].val = NULL;
+            return;
+        } else if (htable->eq_fun(key, nkey)) {
+            htable->elem[hole].key = htable->elem[i].key;
+            htable->elem[hole].val = htable->elem[i].val;
+            hole = i;
+        }
+    }
+}
+
+uint32_t htable_used(const struct htable *htable)
+{
+    return htable->used;
+}
+
+uint32_t htable_capacity(const struct htable *htable)
+{
+    return htable->capacity;
+}
+
+uint32_t ht_hash_string(const void *str, uint32_t max)
+{
+    const char *s = str;
+    uint32_t hash = 0;
+
+    while (*s) {
+        hash = (hash * 31) + *s;
+        s++;
+    }
+    return hash % max;
+}
+
+int ht_compare_string(const void *a, const void *b)
+{
+    return strcmp(a, b) == 0;
+}
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
new file mode 100644
index 0000000..33f1229
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_HASH_TABLE
+#define HADOOP_CORE_COMMON_HASH_TABLE
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#define HTABLE_MIN_SIZE 4
+
+struct htable;
+
+/**
+ * An HTable hash function.
+ *
+ * @param key       The key.
+ * @param capacity  The total capacity.
+ *
+ * @return          The hash slot.  Must be less than the capacity.
+ */
+typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
+
+/**
+ * An HTable equality function.  Compares two keys.
+ *
+ * @param a         First key.
+ * @param b         Second key.
+ *
+ * @return          nonzero if the keys are equal.
+ */
+typedef int (*htable_eq_fn_t)(const void *a, const void *b);
+
+/**
+ * Allocate a new hash table.
+ *
+ * @param capacity  The minimum suggested starting capacity.
+ * @param hash_fun  The hash function to use in this hash table.
+ * @param eq_fun    The equals function to use in this hash table.
+ *
+ * @return          The new hash table on success; NULL on OOM.
+ */
+struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
+                            htable_eq_fn_t eq_fun);
+
+typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
+
+/**
+ * Visit all of the entries in the hash table.
+ *
+ * @param htable    The hash table.
+ * @param fun       The callback function to invoke on each key and value.
+ * @param ctx       Context pointer to pass to the callback.
+ */
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
+
+/**
+ * Free the hash table.
+ *
+ * It is up the calling code to ensure that the keys and values inside the
+ * table are de-allocated, if that is necessary.
+ *
+ * @param htable    The hash table.
+ */
+void htable_free(struct htable *htable);
+
+/**
+ * Add an entry to the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to add.  This cannot be NULL.
+ * @param fun       The value to add.  This cannot be NULL.
+ *
+ * @return          0 on success;
+ *                  EEXIST if the value already exists in the table;
+ *                  ENOMEM if there is not enough memory to add the element.
+ *                  EFBIG if the hash table has too many entries to fit in 32
+ *                      bits.
+ */
+int htable_put(struct htable *htable, void *key, void *val);
+
+/**
+ * Get an entry from the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to find.
+ *
+ * @return          NULL if there is no such entry; the entry otherwise.
+ */
+void *htable_get(const struct htable *htable, const void *key);
+
+/**
+ * Get an entry from the hash table and remove it.
+ *
+ * @param htable    The hash table.
+ * @param key       The key for the entry find and remove.
+ * @param found_key (out param) NULL if the entry was not found; the found key
+ *                      otherwise.
+ * @param found_val (out param) NULL if the entry was not found; the found
+ *                      value otherwise.
+ */
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val);
+
+/**
+ * Get the number of entries used in the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The number of entries used in the hash table.
+ */
+uint32_t htable_used(const struct htable *htable);
+
+/**
+ * Get the capacity of the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The capacity of the hash table.
+ */
+uint32_t htable_capacity(const struct htable *htable);
+
+/**
+ * Hash a string.
+ *
+ * @param str       The string.
+ * @param max       Maximum hash value
+ *
+ * @return          A number less than max.
+ */
+uint32_t ht_hash_string(const void *str, uint32_t max);
+
+/**
+ * Compare two strings.
+ *
+ * @param a         The first string.
+ * @param b         The second string.
+ *
+ * @return          1 if the strings are identical; 0 otherwise.
+ */
+int ht_compare_string(const void *a, const void *b);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
new file mode 100644
index 0000000..eb7115c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exception.h"
+#include "hdfs.h"
+#include "jni_helper.h"
+#include "platform.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define EXCEPTION_INFO_LEN (sizeof(gExceptionInfo)/sizeof(gExceptionInfo[0]))
+
+struct ExceptionInfo {
+    const char * const name;
+    int noPrintFlag;
+    int excErrno;
+};
+
+static const struct ExceptionInfo gExceptionInfo[] = {
+    {
+        "java.io.FileNotFoundException",
+        NOPRINT_EXC_FILE_NOT_FOUND,
+        ENOENT,
+    },
+    {
+        "org.apache.hadoop.security.AccessControlException",
+        NOPRINT_EXC_ACCESS_CONTROL,
+        EACCES,
+    },
+    {
+        "org.apache.hadoop.fs.UnresolvedLinkException",
+        NOPRINT_EXC_UNRESOLVED_LINK,
+        ENOLINK,
+    },
+    {
+        "org.apache.hadoop.fs.ParentNotDirectoryException",
+        NOPRINT_EXC_PARENT_NOT_DIRECTORY,
+        ENOTDIR,
+    },
+    {
+        "java.lang.IllegalArgumentException",
+        NOPRINT_EXC_ILLEGAL_ARGUMENT,
+        EINVAL,
+    },
+    {
+        "java.lang.OutOfMemoryError",
+        0,
+        ENOMEM,
+    },
+    {
+        "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
+        0,
+        EROFS,
+    },
+    {
+        "org.apache.hadoop.fs.FileAlreadyExistsException",
+        0,
+        EEXIST,
+    },
+    {
+        "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
+        0,
+        EDQUOT,
+    },
+    {
+        "java.lang.UnsupportedOperationException",
+        0,
+        ENOTSUP,
+    },
+    {
+        "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
+        0,
+        ESTALE,
+    },
+};
+
+void getExceptionInfo(const char *excName, int noPrintFlags,
+                      int *excErrno, int *shouldPrint)
+{
+    int i;
+
+    for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
+        if (strstr(gExceptionInfo[i].name, excName)) {
+            break;
+        }
+    }
+    if (i < EXCEPTION_INFO_LEN) {
+        *shouldPrint = !(gExceptionInfo[i].noPrintFlag & noPrintFlags);
+        *excErrno = gExceptionInfo[i].excErrno;
+    } else {
+        *shouldPrint = 1;
+        *excErrno = EINTERNAL;
+    }
+}
+
+int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
+        const char *fmt, va_list ap)
+{
+    int i, noPrint, excErrno;
+    char *className = NULL;
+    jstring jStr = NULL;
+    jvalue jVal;
+    jthrowable jthr;
+    const char *stackTrace;
+
+    jthr = classNameOfObject(exc, env, &className);
+    if (jthr) {
+        fprintf(stderr, "PrintExceptionAndFree: error determining class name "
+            "of exception.\n");
+        className = strdup("(unknown)");
+        destroyLocalReference(env, jthr);
+    }
+    for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
+        if (!strcmp(gExceptionInfo[i].name, className)) {
+            break;
+        }
+    }
+    if (i < EXCEPTION_INFO_LEN) {
+        noPrint = (gExceptionInfo[i].noPrintFlag & noPrintFlags);
+        excErrno = gExceptionInfo[i].excErrno;
+    } else {
+        noPrint = 0;
+        excErrno = EINTERNAL;
+    }
+    if (!noPrint) {
+        vfprintf(stderr, fmt, ap);
+        fprintf(stderr, " error:\n");
+
+        // We don't want to  use ExceptionDescribe here, because that requires a
+        // pending exception.  Instead, use ExceptionUtils.
+        jthr = invokeMethod(env, &jVal, STATIC, NULL, 
+            "org/apache/commons/lang/exception/ExceptionUtils",
+            "getStackTrace", "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
+        if (jthr) {
+            fprintf(stderr, "(unable to get stack trace for %s exception: "
+                    "ExceptionUtils::getStackTrace error.)\n", className);
+            destroyLocalReference(env, jthr);
+        } else {
+            jStr = jVal.l;
+            stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
+            if (!stackTrace) {
+                fprintf(stderr, "(unable to get stack trace for %s exception: "
+                        "GetStringUTFChars error.)\n", className);
+            } else {
+                fprintf(stderr, "%s", stackTrace);
+                (*env)->ReleaseStringUTFChars(env, jStr, stackTrace);
+            }
+        }
+    }
+    destroyLocalReference(env, jStr);
+    destroyLocalReference(env, exc);
+    free(className);
+    return excErrno;
+}
+
+int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
+        const char *fmt, ...)
+{
+    va_list ap;
+    int ret;
+
+    va_start(ap, fmt);
+    ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
+    va_end(ap);
+    return ret;
+}
+
+int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
+        const char *fmt, ...)
+{
+    va_list ap;
+    int ret;
+    jthrowable exc;
+
+    exc = (*env)->ExceptionOccurred(env);
+    if (!exc) {
+        va_start(ap, fmt);
+        vfprintf(stderr, fmt, ap);
+        va_end(ap);
+        fprintf(stderr, " error: (no exception)");
+        ret = 0;
+    } else {
+        (*env)->ExceptionClear(env);
+        va_start(ap, fmt);
+        ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
+        va_end(ap);
+    }
+    return ret;
+}
+
+jthrowable getPendingExceptionAndClear(JNIEnv *env)
+{
+    jthrowable jthr = (*env)->ExceptionOccurred(env);
+    if (!jthr)
+        return NULL;
+    (*env)->ExceptionClear(env);
+    return jthr;
+}
+
+jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
+{
+    char buf[512];
+    jobject out, exc;
+    jstring jstr;
+    va_list ap;
+
+    va_start(ap, fmt);
+    vsnprintf(buf, sizeof(buf), fmt, ap);
+    va_end(ap);
+    jstr = (*env)->NewStringUTF(env, buf);
+    if (!jstr) {
+        // We got an out of memory exception rather than a RuntimeException.
+        // Too bad...
+        return getPendingExceptionAndClear(env);
+    }
+    exc = constructNewObjectOfClass(env, &out, "RuntimeException",
+        "(java/lang/String;)V", jstr);
+    (*env)->DeleteLocalRef(env, jstr);
+    // Again, we'll either get an out of memory exception or the
+    // RuntimeException we wanted.
+    return (exc) ? exc : out;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.h
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.h
new file mode 100644
index 0000000..5fa7fa6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.h
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_EXCEPTION_H
+#define LIBHDFS_EXCEPTION_H
+
+/**
+ * Exception handling routines for libhdfs.
+ *
+ * The convention we follow here is to clear pending exceptions as soon as they
+ * are raised.  Never assume that the caller of your function will clean up
+ * after you-- do it yourself.  Unhandled exceptions can lead to memory leaks
+ * and other undefined behavior.
+ *
+ * If you encounter an exception, return a local reference to it.  The caller is
+ * responsible for freeing the local reference, by calling a function like
+ * PrintExceptionAndFree.  (You can also free exceptions directly by calling
+ * DeleteLocalRef.  However, that would not produce an error message, so it's
+ * usually not what you want.)
+ */
+
+#include "platform.h"
+
+#include <jni.h>
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <stdarg.h>
+#include <search.h>
+#include <errno.h>
+
+/**
+ * Exception noprint flags
+ *
+ * Theses flags determine which exceptions should NOT be printed to stderr by
+ * the exception printing routines.  For example, if you expect to see
+ * FileNotFound, you might use NOPRINT_EXC_FILE_NOT_FOUND, to avoid filling the
+ * logs with messages about routine events.
+ *
+ * On the other hand, if you don't expect any failures, you might pass
+ * PRINT_EXC_ALL.
+ *
+ * You can OR these flags together to avoid printing multiple classes of
+ * exceptions.
+ */
+#define PRINT_EXC_ALL                           0x00
+#define NOPRINT_EXC_FILE_NOT_FOUND              0x01
+#define NOPRINT_EXC_ACCESS_CONTROL              0x02
+#define NOPRINT_EXC_UNRESOLVED_LINK             0x04
+#define NOPRINT_EXC_PARENT_NOT_DIRECTORY        0x08
+#define NOPRINT_EXC_ILLEGAL_ARGUMENT            0x10
+
+/**
+ * Get information about an exception.
+ *
+ * @param excName         The Exception name.
+ *                        This is a Java class name in JNI format.
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param excErrno        (out param) The POSIX error number associated with the
+ *                        exception.
+ * @param shouldPrint     (out param) Nonzero if we should print this exception,
+ *                        based on the noPrintFlags and its name. 
+ */
+void getExceptionInfo(const char *excName, int noPrintFlags,
+                      int *excErrno, int *shouldPrint);
+
+/**
+ * Print out information about an exception and free it.
+ *
+ * @param env             The JNI environment
+ * @param exc             The exception to print and free
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ap              Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
+        const char *fmt, va_list ap);
+
+/**
+ * Print out information about an exception and free it.
+ *
+ * @param env             The JNI environment
+ * @param exc             The exception to print and free
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
+        const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
+
+/**
+ * Print out information about the pending exception and free it.
+ *
+ * @param env             The JNI environment
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
+        const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
+
+/**
+ * Get a local reference to the pending exception and clear it.
+ *
+ * Once it is cleared, the exception will no longer be pending.  The caller will
+ * have to decide what to do with the exception object.
+ *
+ * @param env             The JNI environment
+ *
+ * @return                The exception, or NULL if there was no exception
+ */
+jthrowable getPendingExceptionAndClear(JNIEnv *env);
+
+/**
+ * Create a new runtime error.
+ *
+ * This creates (but does not throw) a new RuntimeError.
+ *
+ * @param env             The JNI environment
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                A local reference to a RuntimeError
+ */
+jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
+        TYPE_CHECKED_PRINTF_FORMAT(2, 3);
+
+#undef TYPE_CHECKED_PRINTF_FORMAT
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
new file mode 100644
index 0000000..576e9ef
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.c
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int expectFileStats(hdfsFile file,
+      uint64_t expectedTotalBytesRead,
+      uint64_t expectedTotalLocalBytesRead,
+      uint64_t expectedTotalShortCircuitBytesRead,
+      uint64_t expectedTotalZeroCopyBytesRead)
+{
+    struct hdfsReadStatistics *stats = NULL;
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &stats));
+    fprintf(stderr, "expectFileStats(expectedTotalBytesRead=%"PRId64", "
+            "expectedTotalLocalBytesRead=%"PRId64", "
+            "expectedTotalShortCircuitBytesRead=%"PRId64", "
+            "expectedTotalZeroCopyBytesRead=%"PRId64", "
+            "totalBytesRead=%"PRId64", "
+            "totalLocalBytesRead=%"PRId64", "
+            "totalShortCircuitBytesRead=%"PRId64", "
+            "totalZeroCopyBytesRead=%"PRId64")\n",
+            expectedTotalBytesRead,
+            expectedTotalLocalBytesRead,
+            expectedTotalShortCircuitBytesRead,
+            expectedTotalZeroCopyBytesRead,
+            stats->totalBytesRead,
+            stats->totalLocalBytesRead,
+            stats->totalShortCircuitBytesRead,
+            stats->totalZeroCopyBytesRead);
+    if (expectedTotalBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
+    }
+    if (expectedTotalLocalBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
+                      stats->totalLocalBytesRead);
+    }
+    if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
+                      stats->totalShortCircuitBytesRead);
+    }
+    if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
+                      stats->totalZeroCopyBytesRead);
+    }
+    hdfsFileFreeReadStatistics(stats);
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
new file mode 100644
index 0000000..49aa285
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/expect.h
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H
+#define LIBHDFS_NATIVE_TESTS_EXPECT_H
+
+#include <inttypes.h>
+#include <stdio.h>
+
+struct hdfsFile_internal;
+
+#define EXPECT_ZERO(x) \
+    do { \
+        int __my_ret__ = x; \
+        if (__my_ret__) { \
+            int __my_errno__ = errno; \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+		    "code %d (errno: %d): got nonzero from %s\n", \
+		    __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
+            return __my_ret__; \
+        } \
+    } while (0);
+
+#define EXPECT_NULL(x) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got non-NULL value %p from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NULL_WITH_ERRNO(x, e) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got non-NULL value %p from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
+            return -1; \
+        } \
+        if (__my_errno__ != e) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got expected NULL without expected errno %d from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, e, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NONNULL(x) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ == NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got NULL from %s\n", __FILE__, __LINE__, __my_errno__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NEGATIVE_ONE_WITH_ERRNO(x, e) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != -1) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): expected -1 from %s\n", \
+                    __FILE__, __LINE__, \
+                __my_ret__, __my_errno__, #x); \
+            return -1; \
+        } \
+        if (__my_errno__ != e) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): expected errno = %d from %s\n", \
+                __FILE__, __LINE__, __my_ret__, __my_errno__, e, #x); \
+            return -1; \
+	} \
+    } while (0);
+
+#define EXPECT_NONZERO(x) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (!__my_ret__) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): got zero from %s\n", __FILE__, __LINE__, \
+              __my_ret__, __my_errno__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NONNEGATIVE(x) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ < 0) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): got negative return from %s\n", \
+                __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
+            return __my_ret__; \
+        } \
+    } while (0);
+
+#define EXPECT_INT_EQ(x, y) \
+    do { \
+        int __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_INT64_EQ(x, y) \
+    do { \
+        int64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRId64" (errno: %d): expected %"PRId64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_UINT64_EQ(x, y) \
+    do { \
+        uint64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
+    ret = expr; \
+    if (!ret) \
+        break; \
+    ret = -errno; \
+    } while (ret == -EINTR);
+
+/**
+ * Test that an HDFS file has the given statistics.
+ *
+ * Any parameter can be set to UINT64_MAX to avoid checking it.
+ *
+ * @return 0 on success; error code otherwise
+ */
+int expectFileStats(struct hdfsFile_internal *file,
+      uint64_t expectedTotalBytesRead,
+      uint64_t expectedTotalLocalBytesRead,
+      uint64_t expectedTotalShortCircuitBytesRead,
+      uint64_t expectedTotalZeroCopyBytesRead);
+
+#endif


Mime
View raw message