hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject svn commit: r1602280 [2/6] - in /hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native: ./ common/ fs/ jni/ ndfs/ rpc/ test/ test/common/ test/common/conf/ test/fs/
Date Thu, 12 Jun 2014 19:56:25 GMT
Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.h?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.h (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.h Thu Jun 12 19:56:23 2014
@@ -34,6 +34,19 @@
 void hex_buf_print(FILE *fp, const void *buf, int32_t buf_len,
         const char *fmt, ...) __attribute__((format(printf, 4, 5)));
 
+/**
+ * Duplicate a string. 
+ *
+ * @param dst       (out param) the destination address.
+ *                    We will put either NULL (if src == NULL) or a malloc'ed
+ *                    string here.  If a malloc'ed string is here, we will free
+ *                    or realloc it.
+ * @param src       The string to duplicate, or NULL to set *dst to NULL.
+ *
+ * @return          0 on success; ENOMEM on OOM.
+ */
+int strdupto(char **dst, const char *src);
+
 #endif
 
 // vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri-unit.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri-unit.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri-unit.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri-unit.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/uri.h"
+#include "test/test.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <uv.h>
+
+static int check_uri_params(const char *scheme, const char *escheme,
+                            const char *user, const char *euser,
+                            const char *auth, const char *eauth,
+                            int port, int eport,
+                            const char *path, const char *epath)
+{
+    EXPECT_STR_EQ(escheme, scheme);
+    EXPECT_STR_EQ(euser, user);
+    EXPECT_STR_EQ(eauth, auth);
+    EXPECT_STR_EQ(epath, path);
+    EXPECT_INT_EQ(eport, port);
+    return 0;
+}
+
+static struct hadoop_err *test_parse_uri(const char *uri_str,
+            const char *escheme, const char *euser, const char *eauth,
+            int eport, const char *epath)
+{
+    UriParserStateA base_uri_state, uri_state;
+    UriUriA base_uri, uri;
+    struct hadoop_err *err = NULL;
+    char *scheme = NULL, *user = NULL, *auth = NULL;
+    char *path = NULL;
+    uint16_t port;
+
+    memset(&uri_state, 0, sizeof(uri_state));
+    err = uri_parse_abs("hdfs:///home/cmccabe/", &base_uri_state,
+            &base_uri, "hdfs");
+    if (err)
+        goto done;
+    err = uri_parse(uri_str, &uri_state, &uri, &base_uri);
+    if (err)
+        goto done;
+    err = uri_get_scheme(&uri, &scheme);
+    if (err)
+        goto done;
+    err = uri_get_user_info(&uri, &user);
+    if (err)
+        goto done;
+    // Get the authority, which we typically treat as a hostname.
+    err = uri_get_authority(&uri, &auth);
+    if (err)
+        goto done;
+    err = uri_get_path(&uri, &path);
+    if (err)
+        goto done;
+    err = uri_get_port(&uri, &port);
+    if (err)
+        goto done;
+//    fprintf(stderr, "test_parse_uri(%s): "
+//            "scheme=%s, user=%s, auth=%s, path=%s\n",
+//            uri_str, scheme, user, auth, path);
+    err = NULL;
+    if (check_uri_params(scheme, escheme,
+                         user, euser,
+                         auth, eauth,
+                        port, eport,
+                         path, epath)) {
+        err = hadoop_lerr_alloc(EINVAL, "check_uri_params: failed.");
+        if (err)
+            goto done;
+    }
+
+done:
+    if (base_uri_state.uri) {
+        uriFreeUriMembersA(&base_uri);
+    }
+    if (uri_state.uri) {
+        uriFreeUriMembersA(&uri);
+    }
+    free(scheme);
+    free(user);
+    free(auth);
+    free(path);
+    return err;
+}
+
+int main(void)
+{
+    struct hadoop_err *err;
+
+    //EXPECT_NO_HADOOP_ERR(test_parse_uri("localhost:6000"));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("a/b/c/d",
+                    "hdfs", "", "", 0, "/home/cmccabe/a/b/c/d"));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("hdfs://localhost:6000",
+                    "hdfs", "", "localhost", 6000, ""));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("file:///a/b/c/d",
+                    "file", "", "", 0, "/a/b/c/d"));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("s3://jeffbezos:6000",
+                    "s3", "", "jeffbezos", 6000, ""));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("s3:///foo",
+                    "s3", "", "", 0, "/foo"));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("hdfs://nn1.example.com/foo/bar",
+                    "hdfs", "", "nn1.example.com", 0, "/foo/bar"));
+    EXPECT_NO_HADOOP_ERR(test_parse_uri("hdfs://user:password@hdfshost:9000/a/b/c",
+                    "hdfs", "user:password", "hdfshost", 9000, "/a/b/c"));
+    err = test_parse_uri("://user:password@hdfshost:9000/a/b/c",
+                    "", "", "", 0, "");
+    EXPECT_NONNULL(strstr(hadoop_err_msg(err), "failed to parse"));
+    hadoop_err_free(err);
+
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/uri.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct hadoop_err *uri_err_to_hadoop_err(int err)
+{
+    switch (err) {
+    case URI_SUCCESS:
+        return NULL;
+    case URI_ERROR_SYNTAX:
+        return hadoop_lerr_alloc(EINVAL, "invalid URI format");
+    case URI_ERROR_NULL:
+        return hadoop_lerr_alloc(EINVAL, "unexpected NULL pointer "
+                                 "passed as parameter to uriparse");
+    case URI_ERROR_MALLOC:
+        return hadoop_lerr_alloc(ENOMEM, "out of memory");
+    case URI_ERROR_OUTPUT_TOO_LARGE:
+        return hadoop_lerr_alloc(ENAMETOOLONG, "data too big for uriparse "
+                                 "buffer");
+    case URI_ERROR_NOT_IMPLEMENTED:
+        return hadoop_lerr_alloc(ENOTSUP, "uriparse function not "
+                                 "implemented.");
+    case URI_ERROR_ADDBASE_REL_BASE:
+        return hadoop_lerr_alloc(EINVAL, "given add base is not absolute");
+    case URI_ERROR_REMOVEBASE_REL_BASE:
+        return hadoop_lerr_alloc(EINVAL, "given remove base is not absolute");
+    case URI_ERROR_REMOVEBASE_REL_SOURCE:
+        return hadoop_lerr_alloc(EINVAL, "given remove source is not "
+                                 "absolute");
+    case URI_ERROR_RANGE_INVALID:
+        return hadoop_lerr_alloc(ERANGE, "invalid range in uriparse.");
+    default:
+        return hadoop_lerr_alloc(EIO, "unknown uri error.");
+    }
+}
+
+struct hadoop_err *uri_parse_abs(const char *str, UriParserStateA *state,
+            UriUriA *uri, const char *def_scheme)
+{
+    int ret;
+    struct hadoop_err *err = NULL;
+    size_t str_len;
+    const char *effective_str = NULL;
+    char *malloced_str = NULL, *nmalloced_str;
+
+    // If the URI doesn't end with a slash, append one.
+    // This is necessary to get AddBaseUri to act like we expect when using
+    // this absolute URI as a base.
+    state->uri = NULL;
+    str_len = strlen(str);
+    if ((str_len == 0) || (str[str_len - 1] != '/')) {
+        if (asprintf(&malloced_str, "%s/", str) < 0) {
+            err = hadoop_lerr_alloc(ENOMEM, "uri_parse_abs: OOM");
+            malloced_str = NULL;
+            goto done;
+        }
+        effective_str = malloced_str;
+    } else {
+        effective_str = str;
+    }
+    state->uri = uri;
+    ret = uriParseUriA(state, effective_str);
+    if (ret) {
+        state->uri = NULL;
+        err = hadoop_err_prepend(uri_err_to_hadoop_err(ret),
+            0, "uri_parse: failed to parse '%s' as URI",
+            effective_str);
+        goto done;
+    }
+    if (uri->scheme.first == NULL) {
+        // If the URI doesn't have a scheme, prepend the default one to the
+        // string, and re-parse.  This is necessary because AddBaseUri refuses
+        // to rebase URIs on absolute URIs without a scheme.
+        if (asprintf(&nmalloced_str, "%s://%s", def_scheme,
+                     effective_str) < 0) {
+            err = hadoop_lerr_alloc(ENOMEM, "uri_parse_abs: OOM");
+            goto done;
+        }
+        free(malloced_str);
+        malloced_str = nmalloced_str;
+        effective_str = malloced_str;
+        uriFreeUriMembersA(uri);
+        state->uri = uri;
+        ret = uriParseUriA(state, effective_str);
+        if (ret) {
+            state->uri = NULL;
+            err = hadoop_err_prepend(uri_err_to_hadoop_err(ret),
+                0, "uri_parse: failed to parse '%s' as URI",
+                effective_str);
+            goto done;
+        }
+    }
+    err = NULL;
+done:
+    if (err) {
+        if (state->uri) {
+            uriFreeUriMembersA(state->uri);
+            state->uri = NULL;
+        }
+    }
+    return err;
+}
+
+struct hadoop_err *uri_parse(const char *str, UriParserStateA *state,
+            UriUriA *uri, UriUriA *base_uri)
+{
+    int ret;
+    struct hadoop_err *err = NULL;
+    UriUriA first_uri;
+
+    state->uri = &first_uri;
+    ret = uriParseUriA(state, str);
+    if (ret) {
+        state->uri = NULL;
+        err = hadoop_err_prepend(uri_err_to_hadoop_err(ret),
+            0, "uri_parse: failed to parse '%s' as a URI", str);
+        goto done;
+    }
+//    fprintf(stderr, "str=%s, base_path=%s, base_uri->absolutePath=%d\n",
+//            str, base_path, base_uri.absolutePath);
+//        fprintf(stderr, "uriAddBaseUriA base_path=%s, str=%s, ret %d\n", base_path, str, ret); 
+    ret = uriAddBaseUriA(uri, &first_uri, base_uri);
+    if (ret) {
+        err = hadoop_err_prepend(uri_err_to_hadoop_err(ret),
+            0, "uri_parse: failed to add base URI");
+        goto done;
+    }
+    uriFreeUriMembersA(&first_uri);
+    state->uri = uri;
+    ret = uriNormalizeSyntaxA(uri);
+    if (ret) {
+        err = hadoop_err_prepend(uri_err_to_hadoop_err(ret),
+            0, "uri_parse: failed to normalize URI");
+        goto done;
+    }
+done:
+    if (err) {
+        if (state->uri) {
+            uriFreeUriMembersA(uri);
+            state->uri = NULL;
+        }
+    }
+    return err;
+}
+
+static struct hadoop_err *text_range_to_str(struct UriTextRangeStructA *text,
+                                            char **out, const char *def)
+{
+    struct hadoop_err *err = NULL;
+    char *str = NULL;
+    const char *c;
+    size_t len = 0;
+
+    if (!text->first) {
+        str = strdup(def);
+        if (!str) {
+            err = hadoop_lerr_alloc(ENOMEM, "text_range_to_str: out of memory "
+                "trying to allocate a %zd-byte default string.",
+                strlen(def) + 1);
+        }
+        goto done;
+    }
+    for (c = text->first; c != text->afterLast; c++) {
+        ++len;
+    }
+    str = malloc(len + 1);
+    if (!str) {
+        err = hadoop_lerr_alloc(ENOMEM, "text_range_to_str: out of memory "
+            "trying to allocate a %zd-byte string.", len + 1);
+        goto done;
+    }
+    memcpy(str, text->first, len);
+    str[len] = '\0';
+    err = NULL;
+
+done:
+    if (err) {
+        free(str);
+        return err;
+    }
+    *out = str;
+    return NULL;
+}
+
+struct hadoop_err *uri_get_scheme(UriUriA *uri, char **out)
+{
+    struct hadoop_err *err;
+    char *scheme = NULL;
+
+    err = text_range_to_str(&uri->scheme, &scheme, "");
+    if (err)
+        return err;
+    *out = scheme;
+    return NULL;
+}
+
+struct hadoop_err *uri_get_user_info(UriUriA *uri, char **user_info)
+{
+    return text_range_to_str(&uri->userInfo, user_info, "");
+}
+
+struct hadoop_err *uri_get_authority(UriUriA *uri, char **authority)
+{
+    return text_range_to_str(&uri->hostText, authority, "");
+}
+
+struct hadoop_err *uri_get_port(UriUriA *uri, uint16_t *out)
+{
+    struct hadoop_err *err;
+    char *port_str = NULL;
+    int port;
+
+    err = text_range_to_str(&uri->portText, &port_str, "");
+    if (err)
+        return err;
+    port = atoi(port_str);
+    free(port_str);
+    if (port < 0 || port > 0xffff) {
+        return hadoop_lerr_alloc(EINVAL, "uri_get_port: invalid "
+                                 "port number %d\n", port);
+    }
+    *out = port;
+    return NULL;
+}
+
+struct hadoop_err *uri_get_path(UriUriA *uri, char **out)
+{
+    struct UriPathSegmentStructA *cur;
+    size_t i = 0, path_len = 0;
+    char *path = NULL;
+    int absolute = 0;
+
+    if (uri->absolutePath) {
+        absolute = 1;
+    } else if (uri->pathHead && uri->scheme.first) {
+        // Hadoop treats all URIs with a path as absolute, if they have a
+        // non-empty path.
+        // So hdfs://mynamenode/ maps to the root path, for example.  But as a
+        // special case, hdfs://mynamenode (no terminating slash) maps to "."
+        absolute = 1;
+    }
+    // The URI parser library splits paths up into lots of PathSegment
+    // structures-- one per path segment.  We need to reconstruct the full
+    // path.  The first step is figuring out the upper bound on the path
+    // length.
+    for (cur = uri->pathHead; cur; cur = cur->next) {
+        const char *c;
+        path_len++; // +1 for the leading slash.
+        for (c = cur->text.first; c != cur->text.afterLast; c++) {
+            path_len++;
+        }
+    }
+    path = malloc(path_len + 1); // +1 for the NULL terminator
+    if (!path) {
+        return hadoop_lerr_alloc(ENOMEM, "uri_get_path: OOM copying "
+                                 "%zd byte path.", path_len);
+    }
+    // The next step is copying over the path.
+    for (cur = uri->pathHead; cur; cur = cur->next) {
+        const char *c;
+        size_t copy_len = 0;
+        if ((i != 0) || absolute) {
+            path[i++] = '/';
+        }
+        for (c = cur->text.first; c != cur->text.afterLast; c++) {
+            copy_len++;
+        }
+        memcpy(path + i, cur->text.first, copy_len);
+        i += copy_len;
+    }
+    path[i] = '\0';
+    *out = path;
+    return NULL;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.h?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.h (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.h Thu Jun 12 19:56:23 2014
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_URI
+#define HADOOP_CORE_COMMON_URI
+
+#include <uriparser/Uri.h>
+
+/**
+ * Parse an absolute URI.
+ *
+ * @param str           The string to parse.  If there is not a slash at the
+ *                          end, one will be added.
+ * @param state         (inout) The URI parser state to use.
+ *                          On success, state->uri will be set to a non-NULL
+ *                          value.
+ * @param uri           (out param) The URI object to fill.
+ * @param def_scheme    The default scheme to add if there is no scheme.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_parse_abs(const char *str, UriParserStateA *state,
+            UriUriA *uri, const char *def_scheme);
+
+/**
+ * Parse a relative or absolute URI.
+ *
+ * @param str           The string to parse.
+ * @param state         (inout) The URI parser state to use.
+ *                          On success, state->uri will be set to a non-NULL
+ *                          value.
+ * @param uri           (out param) The URI object to fill.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_parse(const char *str, UriParserStateA *state,
+            UriUriA *uri, UriUriA *base_uri);
+
+/**
+ * Get the scheme of a URI.
+ *
+ * We disallow schemes with non-ASCII characters.
+ *
+ * @param uri           The Uri object.
+ * @param scheme        (out param) the scheme.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_get_scheme(UriUriA *uri, char **scheme);
+
+/**
+ * Get the user_info of a URI.
+ *
+ * @param uri           The Uri object.
+ * @param user_info     (out param) the user_info.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_get_user_info(UriUriA *uri, char **user_info);
+
+/**
+ * Get the authority of a URI.
+ *
+ * @param uri           The Uri object.
+ * @param authority     (out param) the authority.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_get_authority(UriUriA *uri, char **authority);
+
+/**
+ * Get the port of a URI.
+ *
+ * @param uri           The Uri object.
+ * @param port          (out param) the port, or 0 if there was no port.
+ *
+ * @return              NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_get_port(UriUriA *uri, uint16_t *port);
+
+/**
+ * Get the path of a URI.
+ *
+ * @param uri       The Uri object.
+ * @param path      (out param) the path.
+ *
+ * @return          NULL on success; the URI parsing problem otherwise.
+ */
+struct hadoop_err *uri_get_path(UriUriA *uri, char **path);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/config.h.cmake
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/config.h.cmake?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/config.h.cmake (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/config.h.cmake Thu Jun 12 19:56:23 2014
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#ifndef CONFIG_H
+#define CONFIG_H
+
+/**
+ * Defined if we can use the __thread keyword to get faster thread-local 
+ * storage.
+ */
+#cmakedefine HAVE_BETTER_TLS
+
+/**
+ * Short version of JNI library name.  
+ * This varies by platform.
+ *
+ * Example: "libvjm.so"
+ */
+#cmakedefine JNI_LIBRARY_NAME "@JNI_LIBRARY_NAME@"
+
+/**
+ * Where to find the test XML files we use in hconf-unit.
+ */
+#cmakedefine HCONF_XML_TEST_PATH "@HCONF_XML_TEST_PATH@"
+
+#endif

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/string.h"
+#include "common/uri.h"
+#include "common/user.h"
+#include "fs/fs.h"
+#include "fs/hdfs.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <uriparser/Uri.h>
+
+void release_file_info_entry(hdfsFileInfo *hdfsFileInfo)
+{
+    free(hdfsFileInfo->mName);
+    free(hdfsFileInfo->mOwner);
+    free(hdfsFileInfo->mGroup);
+    memset(&hdfsFileInfo, 0, sizeof(hdfsFileInfo));
+}
+
+int hadoopfs_errno_and_retcode(struct hadoop_err *err)
+{
+    if (err) {
+        fputs(hadoop_err_msg(err), stderr);
+        errno = hadoop_err_code(err);
+        hadoop_err_free(err);
+        return -1;
+    }
+    return 0;
+}
+
+void *hadoopfs_errno_and_retptr(struct hadoop_err *err, void *ptr)
+{
+    if (err) {
+        fputs(hadoop_err_msg(err), stderr);
+        errno = hadoop_err_code(err);
+        hadoop_err_free(err);
+        return NULL;
+    }
+    return ptr;
+}
+
+// vim: ts=4:sw=4:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.h?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.h (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.h Thu Jun 12 19:56:23 2014
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_NATIVE_CORE_FS_COMMON_H
+#define HADOOP_NATIVE_CORE_FS_COMMON_H
+
+struct file_info;
+struct hadoop_err;
+struct hdfsBuilder;
+
+/**
+ * Release the memory used inside an hdfsFileInfo structure.
+ * Does not free the structure itself.
+ *
+ * @param hdfsFileInfo          The hdfsFileInfo structure.
+ */
+void release_file_info_entry(struct file_info *hdfsFileInfo);
+
+/**
+ * Sets errno and logs a message appropriately on encountering a Hadoop error.
+ *
+ * @param err                   The hadoop error, or NULL if there is none.
+ *
+ * @return                      -1 on error; 0 otherwise.  Errno will be set on
+ *                              error.
+ */
+int hadoopfs_errno_and_retcode(struct hadoop_err *err);
+
+/**
+ * Sets errno and logs a message appropriately on encountering a Hadoop error.
+ *
+ * @param err                   The hadoop error, or NULL if there is none.
+ * @param ptr                   The pointer to return if err is NULL.
+ *
+ * @return                      NULL on error; ptr otherwise.  Errno will be set
+ *                              on error.
+ */
+void *hadoopfs_errno_and_retptr(struct hadoop_err *err, void *ptr);
+
+#endif
+
+// vim: ts=4:sw=4:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,734 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/hconf.h"
+#include "common/string.h"
+#include "common/uri.h"
+#include "common/user.h"
+#include "fs/common.h"
+#include "fs/fs.h"
+#include "fs/hdfs.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <uriparser/Uri.h>
+
+#define DEFAULT_SCHEME "hdfs"
+
+#define DEFAULT_NATIVE_HANDLERS "ndfs,jnifs"
+
+const char* const HDFS_XML_NAMES[] = {
+    "core-default.xml",
+    "core-site.xml",
+    "hdfs-default.xml",
+    "hdfs-site.xml",
+    NULL
+};
+
+const struct hadoop_fs_ops g_jni_ops;
+const struct hadoop_fs_ops g_ndfs_ops;
+
+static const struct hadoop_fs_ops *g_ops[] = {
+    &g_jni_ops,
+    &g_ndfs_ops,
+    NULL
+};
+
+int hdfsFileIsOpenForRead(hdfsFile file)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->file_is_open_for_read(file);
+}
+
+int hdfsFileIsOpenForWrite(hdfsFile file)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->file_is_open_for_write(file);
+}
+
+int hdfsFileGetReadStatistics(hdfsFile file, struct hdfsReadStatistics **stats)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->get_read_statistics(file, stats);
+}
+
+int64_t hdfsReadStatisticsGetRemoteBytesRead(
+                            const struct hdfsReadStatistics *stats)
+{
+    return stats->totalBytesRead - stats->totalLocalBytesRead;
+}
+
+void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats)
+{
+    free(stats);
+}
+
+hdfsFS hdfsConnect(const char* host, tPort port)
+{
+    struct hdfsBuilder *bld = hdfsNewBuilder();
+    if (!bld)
+        return NULL;
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    return hdfsBuilderConnect(bld);
+}
+
+hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
+{
+    struct hdfsBuilder *bld = hdfsNewBuilder();
+    if (!bld)
+        return NULL;
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetForceNewInstance(bld);
+    return hdfsBuilderConnect(bld);
+}
+
+hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
+{
+    struct hdfsBuilder *bld = hdfsNewBuilder();
+    if (!bld)
+        return NULL;
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetUserName(bld, user);
+    return hdfsBuilderConnect(bld);
+}
+
+hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
+        const char *user)
+{
+    struct hdfsBuilder *bld = hdfsNewBuilder();
+    if (!bld)
+        return NULL;
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetUserName(bld, user);
+    return hdfsBuilderConnect(bld);
+}
+
+static const struct hadoop_fs_ops *find_fs_impl_by_name(const char *name)
+{
+    const struct hadoop_fs_ops *ops;
+    int i = 0;
+
+    while (1) {
+        ops = g_ops[i++];
+        if (!ops) {
+            fprintf(stderr, "hdfsBuilderConnect: we don't support the '%s' "
+                    "native fs implementation.\n", name);
+            return NULL;
+        }
+        if (strcmp(ops->name, name) == 0) {
+            return ops;
+        }
+    }
+}
+
+static struct hadoop_err *hdfs_builder_load_conf(struct hdfsBuilder *hdfs_bld)
+{
+    struct hadoop_err *err;
+    struct hconf_builder *conf_bld = NULL;
+    const char *classpath;
+    struct hdfsBuilderConfOpt *opt;
+
+    err = hconf_builder_alloc(&conf_bld);
+    if (err) {
+        goto done;
+    }
+
+    // Load the XML files.
+    classpath = getenv("CLASSPATH");
+    if (!classpath) {
+        classpath = ".";
+    }
+    err = hconf_builder_load_xmls(conf_bld, HDFS_XML_NAMES, classpath);
+    if (err) {
+        goto done;
+    }
+
+    // Add the options that were specified by hdfsBuilderConfSetStr.
+    for (opt = hdfs_bld->opts; opt; opt = opt->next) {
+        hconf_builder_set(conf_bld, opt->key, opt->val);
+    }
+
+    // Create the conf object.
+    err = hconf_build(conf_bld, &hdfs_bld->hconf);
+    conf_bld = NULL;
+    err = NULL;
+done:
+    if (conf_bld) {
+        hconf_builder_free(conf_bld);
+    }
+    if (err)
+        return err;
+    return NULL;
+}
+
+static struct hadoop_err *hdfs_builder_parse_conn_uri(
+                                    struct hdfsBuilder *hdfs_bld)
+{
+    int ret;
+    uint16_t port;
+    const char *uri_str;
+    char *malloced_uri_str = NULL;
+    UriParserStateA uri_state;
+    UriUriA uri;
+    struct hadoop_err *err = NULL;
+
+    memset(&uri_state, 0, sizeof(uri_state));
+    uri_str = hdfs_bld->nn;
+    if (uri_str) {
+        // If the connection URI was set via hdfsBuilderSetNameNode, it may
+        // not be a real URI, but just a <hostname>:<port> pair.  This won't
+        // parse correctly unless we add a hdfs:// scheme in front of it.
+        if ((!index(uri_str, '/')) && (index(uri_str, ':'))) {
+            if (asprintf(&malloced_uri_str, "hdfs://%s", uri_str) < 0) {
+                malloced_uri_str = NULL;
+                err = hadoop_lerr_alloc(ENOMEM, "uri_parse: OOM "
+                                        "adding default scheme");
+                goto done;
+            }
+            uri_str = malloced_uri_str;
+        }
+    } else {
+        uri_str = hconf_get(hdfs_bld->hconf, "fs.defaultFS");
+        if (!uri_str) {
+            uri_str = "file:///";
+        }
+    }
+    err = uri_parse_abs(uri_str, &uri_state, &uri, DEFAULT_SCHEME);
+    if (err)
+        goto done;
+    err = uri_get_scheme(&uri, &hdfs_bld->uri_scheme);
+    if (err)
+        goto done; 
+    // Get the user_info.  We default to the userName passed in to the hdfs
+    // builder.
+    err = uri_get_user_info(&uri, &hdfs_bld->uri_user_info);
+    if (err)
+        goto done;
+    if (hdfs_bld->uri_user_info[0] == '\0') {
+        // If we still don't have an authority, fill in the authority from the
+        // current user name.
+        free(hdfs_bld->uri_user_info);
+        hdfs_bld->uri_user_info = NULL;
+        ret = geteuid_string(&hdfs_bld->uri_user_info);
+        if (ret) {
+            err = hadoop_lerr_alloc(ret, "geteuid_string failed: error "
+                                    "%d", ret);
+            goto done;
+        }
+    }
+    // Get the authority, which we typically treat as a hostname.
+    err = uri_get_authority(&uri, &hdfs_bld->uri_authority);
+    if (err)
+        goto done;
+    // Get the port, or 0.
+    err = uri_get_port(&uri, &port);
+    if (err)
+        goto done;
+    fprintf(stderr, "hdfs_builder_parse_conn_uri: "
+            "uri_scheme=%s, uri_user_info=%s, "
+            "uri_authority=%s, port=%d\n",
+            hdfs_bld->uri_scheme, hdfs_bld->uri_user_info,
+            hdfs_bld->uri_authority, port);
+    // The URI's port overrides the port supplied via
+    // hdfsBuilderSetNameNodePort.
+    if (port) {
+        hdfs_bld->port = port;
+    }
+    err = NULL;
+
+done:
+    free(malloced_uri_str);
+    if (uri_state.uri) {
+        uriFreeUriMembersA(&uri);
+    }
+    return err;
+}
+
+hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
+{
+    struct hadoop_err *err;
+    hdfsFS fs = NULL;
+    const char *fs_list_val;
+    char *fs_list_key = NULL, *fs_list_val_copy = NULL, *ptr;
+    char *fs_impl_name;
+
+    //
+    // Load the configuration from XML.
+    //
+    // The hconf object will be available to all native FS implementations to
+    // use in their connect methods.
+    //
+    err = hdfs_builder_load_conf(bld);
+    if (err)
+        goto done;
+
+    //
+    // Determine the URI we should connect to.  It gets a bit complicated
+    // because of all the defaults.
+    //
+    err = hdfs_builder_parse_conn_uri(bld);
+    if (err)
+        goto done;
+
+    // Find out the native filesystems we should use for this URI.
+    if (asprintf(&fs_list_key, "%s.native.handler.", bld->uri_scheme) < 0) {
+        fs_list_key = NULL;
+        err = hadoop_lerr_alloc(ENOMEM, "hdfsBuilderConnect: OOM");
+        goto done;
+    }
+    fs_list_val = hconf_get(bld->hconf, fs_list_key);
+    if (!fs_list_val) {
+        fs_list_val = hconf_get(bld->hconf, "default.native.handler");
+        if (!fs_list_val) {
+            fs_list_val = DEFAULT_NATIVE_HANDLERS;
+        }
+    }
+    fs_list_val_copy = strdup(fs_list_val);
+    if (!fs_list_val_copy) {
+        err = hadoop_lerr_alloc(ENOMEM, "hdfsBuilderConnect: OOM");
+        goto done;
+    }
+    // Give each native filesystem implementation a shot at connecting.
+    for (fs_impl_name = strtok_r(fs_list_val_copy, ",", &ptr); fs_impl_name;
+                fs_impl_name = strtok_r(NULL, ",", &ptr)) {
+        const struct hadoop_fs_ops *ops = find_fs_impl_by_name(fs_impl_name);
+        if (!ops)
+            continue;
+        if (err)
+            hadoop_err_free(err);
+        err = ops->connect(bld, &fs);
+        if (!err) {
+            break;
+        }
+        fprintf(stderr, "hdfsBuilderConnect: %s failed to connect: "
+                "%s (error %d)\n", fs_impl_name, hadoop_err_msg(err),
+                hadoop_err_code(err));
+    }
+
+done:
+    hdfsFreeBuilder(bld);
+    free(fs_list_key);
+    free(fs_list_val_copy);
+    return hadoopfs_errno_and_retptr(err, fs);
+}
+
+struct hdfsBuilder *hdfsNewBuilder(void)
+{
+    struct hdfsBuilder *bld = calloc(1, sizeof(struct hdfsBuilder));
+    if (!bld) {
+        errno = ENOMEM;
+        return NULL;
+    }
+    return bld;
+}
+
+void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld __attribute__((unused)))
+{
+    // Does nothing-- present only for compatibility
+}
+
+void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
+{
+    bld->nn = nn;
+}
+
+void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port)
+{
+    bld->port = port;
+}
+
+void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName)
+{
+    bld->userName = userName;
+}
+
+void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
+                                       const char *kerbTicketCachePath)
+{
+    bld->kerbTicketCachePath = kerbTicketCachePath;
+}
+
+void hdfsFreeBuilder(struct hdfsBuilder *bld)
+{
+    struct hdfsBuilderConfOpt *cur, *next;
+
+    if (!bld)
+        return;
+    cur = bld->opts;
+    for (cur = bld->opts; cur; ) {
+        next = cur->next;
+        free(cur);
+        cur = next;
+    }
+    hconf_free(bld->hconf);
+    free(bld);
+}
+
+int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                          const char *val)
+{
+    struct hdfsBuilderConfOpt *opt, *next;
+
+    opt = calloc(1, sizeof(struct hdfsBuilderConfOpt));
+    if (!opt)
+        return -ENOMEM;
+    next = bld->opts;
+    bld->opts = opt;
+    opt->next = next;
+    opt->key = key;
+    opt->val = val;
+    return 0;
+}
+
+int hdfsConfGetStr(const char *key __attribute__((unused)),
+                   char **val __attribute__((unused)))
+{
+    // FIXME: add configuration stuff
+    errno = ENOTSUP;
+    return -1;
+}
+
+int hdfsConfGetInt(const char *key, int32_t *val)
+{
+    char *str = NULL;
+    int ret;
+
+    ret = hdfsConfGetStr(key, &str);
+    if (ret)
+        return ret;
+    *val = atoi(str);
+    hdfsConfStrFree(str);
+    return 0;
+}
+
+void hdfsConfStrFree(char *val)
+{
+    free(val);
+}
+
+int hdfsDisconnect(hdfsFS fs)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->disconnect(fs);
+}
+
+hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags,
+                      int bufferSize, short replication, tSize blocksize)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->open(fs, path, flags, bufferSize,
+                                 replication, blocksize);
+}
+
+int hdfsCloseFile(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->close(fs, file);
+}
+
+int hdfsExists(hdfsFS fs, const char *path)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->exists(fs, path);
+}
+
+int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->seek(fs, file, desiredPos);
+}
+
+tOffset hdfsTell(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->tell(fs, file);
+}
+
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->read(fs, file, buffer, length);
+}
+
+tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
+                    void* buffer, tSize length)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->pread(fs, file, position, buffer, length);
+}
+
+tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
+                tSize length)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->write(fs, file, buffer, length);
+}
+
+int hdfsFlush(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->flush(fs, file);
+}
+
+int hdfsHFlush(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->hflush(fs, file);
+}
+
+int hdfsHSync(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->hsync(fs, file);
+}
+
+int hdfsAvailable(hdfsFS fs, hdfsFile file)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->available(fs, file);
+}
+
+int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    struct hadoop_fs_base *src_base = (struct hadoop_fs_base*)srcFS;
+    struct hadoop_fs_base *dst_base = (struct hadoop_fs_base*)dstFS;
+
+    if (src_base->ty != dst_base->ty) {
+        errno = EINVAL;
+        return -1;
+    }
+    return g_ops[src_base->ty]->copy(srcFS, src, dstFS, dst);
+}
+
+int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    struct hadoop_fs_base *src_base = (struct hadoop_fs_base*)srcFS;
+    struct hadoop_fs_base *dst_base = (struct hadoop_fs_base*)dstFS;
+
+    if (src_base->ty != dst_base->ty) {
+        errno = EINVAL;
+        return -1;
+    }
+    return g_ops[src_base->ty]->move(srcFS, src, dstFS, dst);
+}
+
+int hdfsDelete(hdfsFS fs, const char* path, int recursive)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->unlink(fs, path, recursive);
+}
+
+int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->rename(fs, oldPath, newPath);
+}
+
+char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_working_directory(fs, buffer, bufferSize);
+}
+
+int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->set_working_directory(fs, path);
+}
+
+int hdfsCreateDirectory(hdfsFS fs, const char* path)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->mkdir(fs, path);
+}
+
+int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->set_replication(fs, path, replication);
+}
+
+hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->list_directory(fs, path, numEntries);
+}
+
+hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_path_info(fs, path);
+}
+
+void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
+{
+    //Free the mName, mOwner, and mGroup
+    int i;
+    for (i=0; i < numEntries; ++i) {
+        release_file_info_entry(hdfsFileInfo + i);
+    }
+
+    //Free entire block
+    free(hdfsFileInfo);
+}
+
+char*** hdfsGetHosts(hdfsFS fs, const char* path, 
+          tOffset start, tOffset length)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_hosts(fs, path, start, length);
+}
+
+void hdfsFreeHosts(char ***blockHosts)
+{
+    int i, j;
+    for (i=0; blockHosts[i]; i++) {
+        for (j=0; blockHosts[i][j]; j++) {
+            free(blockHosts[i][j]);
+        }
+        free(blockHosts[i]);
+    }
+    free(blockHosts);
+}
+
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_default_block_size(fs);
+}
+
+tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_default_block_size_at_path(fs, path);
+}
+
+tOffset hdfsGetCapacity(hdfsFS fs)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_capacity(fs);
+}
+
+tOffset hdfsGetUsed(hdfsFS fs)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->get_used(fs);
+}
+
+int hdfsChown(hdfsFS fs, const char* path, const char *owner,
+              const char *group)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->chown(fs, path, owner, group);
+}
+
+int hdfsChmod(hdfsFS fs, const char* path, short mode)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->chmod(fs, path, mode);
+}
+
+int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
+{
+    struct hadoop_fs_base *base = (struct hadoop_fs_base*)fs;
+    return g_ops[base->ty]->utime(fs, path, mtime, atime);
+}
+
+struct hadoopRzOptions *hadoopRzOptionsAlloc(void)
+{
+    struct hadoopRzOptions *opts;
+    opts = calloc(1, sizeof(*opts));
+    if (!opts) {
+        errno = ENOMEM;
+        return NULL;
+    }
+    return opts;
+}
+
+int hadoopRzOptionsSetSkipChecksum(struct hadoopRzOptions *opts, int skip)
+{
+    opts->skip_checksums = skip;
+    return 0;
+}
+
+int hadoopRzOptionsSetByteBufferPool(
+            struct hadoopRzOptions *opts, const char *className)
+{
+    return strdupto(&opts->pool_name, className);
+}
+
+void hadoopRzOptionsFree(struct hadoopRzOptions *opts)
+{
+    if (opts) {
+        if (opts->cache_teardown_cb) {
+            opts->cache_teardown_cb(opts->cache);
+            opts->cache = NULL;
+        }
+        free(opts->pool_name);
+        free(opts);
+    }
+}
+
+struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
+            struct hadoopRzOptions *opts, int32_t maxLength)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->read_zero(file, opts, maxLength);
+}
+
+int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buf)
+{
+    struct hadoop_rz_buffer_base *bbuf = (struct hadoop_rz_buffer_base *)buf;
+    return bbuf->length;
+}
+
+const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buf)
+{
+    struct hadoop_rz_buffer_base *bbuf = (struct hadoop_rz_buffer_base *)buf;
+    return bbuf->ptr;
+}
+
+void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    g_ops[base->ty]->rz_buffer_free(file, buffer);
+}
+
+int hdfsFileUsesDirectRead(struct hdfsFile_internal *file)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->file_uses_direct_read(file);
+}
+
+void hdfsFileDisableDirectRead(struct hdfsFile_internal *file)
+{
+    struct hadoop_file_base *base = (struct hadoop_file_base*)file;
+    return g_ops[base->ty]->file_disable_direct_read(file);
+}
+
+// vim: ts=4:sw=4:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.h?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.h (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.h Thu Jun 12 19:56:23 2014
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_NATIVE_CORE_FS_H
+#define HADOOP_NATIVE_CORE_FS_H
+
+#include "fs/hdfs.h"
+
+#include <inttypes.h>
+
+struct hadoop_err;
+struct hconf;
+
+/**
+ * fs.h
+ *
+ * This is the __private__ API for native Hadoop filesystems.  (The public API
+ * is in hdfs.h.) Native Hadoop filesystems such as JniFS or NDFS implement the
+ * APIs in this file to provide a uniform experience to users.
+ *
+ * The general pattern here is:
+ * 1. The client makes a call to libhdfs API
+ * 2. fs.c locates the appropriate function in hadoop_fs_ops and calls it.
+ * 3. Some filesystem-specific code implements the operation.
+ *
+ * In C, it is always safe to typecast a structure to the type of the first
+ * element.  This allows fs.c to treat hdfsFS instances as if they were
+ * instances of struct hadoop_file_base.  Other structures with "base" in the
+ * name are intended to be used similarly.  This functionality is similar in
+ * many ways to how "base classes" operate in Java.  The derived class contains
+ * all the elements of the base class, plus some more.
+ *
+ * The structure definitions in this file are private, and users of this library
+ * will not be able to access them.  This file will not be packaged or
+ * distributed... only hdfs.h will.  Thus, it is safe to change any of the APIs
+ * or types in this file without creating compatibility problems.
+ */
+
+/**
+ * Hadoop filesystem types.
+ */
+enum hadoop_fs_ty {
+    HADOOP_FS_TY_JNI = 0,
+    HADOOP_FS_TY_NDFS = 1,
+    HADOOP_FS_TY_NUM,
+};
+
+/**
+ * Base data for Hadoop files.
+ */
+struct hadoop_file_base {
+    // The type of filesystem this file was created by.
+    enum hadoop_fs_ty ty;
+};
+
+/**
+ * Base data for Hadoop FileSystem objects.
+ */
+struct hadoop_fs_base {
+    // The type of this filesystem.
+    enum hadoop_fs_ty ty;
+};
+
+/**
+ * Base data for Hadoop Zero-Copy Read objects.
+ */
+struct hadoopRzOptions {
+    // The name of the ByteBufferPool class we should use when doing a zero-copy
+    // read.
+    char *pool_name;
+
+    // Non-zero to always skip checksums.
+    int skip_checksums;
+
+    // If non-null, this callback will be invoked to tear down the cached data
+    // inside this options structure during hadoopRzOptionsFree.
+    void (*cache_teardown_cb)(void *);
+
+    // The cached data inside this options structure. 
+    void *cache;
+};
+
+/**
+ * Base data for Hadoop Zero-Copy Read buffers.
+ */
+struct hadoop_rz_buffer_base {
+    // The base address the client can start reading at.
+    void *ptr;
+
+    // The maximum valid length of this buffer.
+    int32_t length;
+};
+
+struct hdfsBuilderConfOpt {
+    struct hdfsBuilderConfOpt *next;
+    const char *key;
+    const char *val;
+};
+
+/**
+ * A builder used to create Hadoop filesystem instances.
+ */
+struct hdfsBuilder {
+    const char *nn;
+    uint16_t port;
+    const char *kerbTicketCachePath;
+    const char *userName;
+    struct hdfsBuilderConfOpt *opts;
+    struct hconf *hconf;
+    char *uri_scheme;
+    char *uri_user_info;
+    char *uri_authority;
+    uint16_t uri_port;
+};
+
+/**
+ * Operations which a libhadoopfs filesystem must implement.
+ */
+struct hadoop_fs_ops {
+    const char * const name;
+    int (*file_is_open_for_read)(struct hdfsFile_internal *file);
+    int (*file_is_open_for_write)(struct hdfsFile_internal * file);
+    int (*get_read_statistics)(struct hdfsFile_internal *file, 
+            struct hdfsReadStatistics **stats);
+    struct hadoop_err *(*connect)(struct hdfsBuilder *bld,
+                                  struct hdfs_internal **fs);
+    int (*disconnect)(struct hdfs_internal *fs);
+    struct hdfsFile_internal *(*open)(struct hdfs_internal *fs,
+            const char* uri, int flags, int bufferSize, short replication,
+            int32_t blocksize);
+    int (*close)(struct hdfs_internal *fs, struct hdfsFile_internal *file);
+    int (*exists)(struct hdfs_internal *fs, const char *uri);
+    int (*seek)(struct hdfs_internal *fs, struct hdfsFile_internal *file, 
+            int64_t desiredPos);
+    int64_t (*tell)(struct hdfs_internal *fs, struct hdfsFile_internal *file);
+    int32_t (*read)(struct hdfs_internal *fs, struct hdfsFile_internal *file,
+            void* buffer, int32_t length);
+    int32_t (*pread)(struct hdfs_internal *fs, struct hdfsFile_internal *file,
+            int64_t position, void *buffer, int32_t length);
+    int32_t (*write)(struct hdfs_internal *fs, struct hdfsFile_internal *file,
+            const void* buffer, int32_t length);
+    int (*flush)(struct hdfs_internal *fs, struct hdfsFile_internal *file);
+    int (*hflush)(struct hdfs_internal *fs, struct hdfsFile_internal *file);
+    int (*hsync)(struct hdfs_internal *fs, struct hdfsFile_internal *file);
+    int (*available)(struct hdfs_internal * fs, struct hdfsFile_internal *file);
+    int (*copy)(struct hdfs_internal *srcFS, const char *src,
+            struct hdfs_internal *dstFS, const char *dst);
+    int (*move)(struct hdfs_internal *srcFS, const char *src,
+            struct hdfs_internal *dstFS, const char *dst);
+    int (*unlink)(struct hdfs_internal *fs, const char *path, int recursive);
+    int (*rename)(struct hdfs_internal *fs, const char *old_uri,
+            const char* new_uri);
+    char* (*get_working_directory)(struct hdfs_internal *fs, char *buffer,
+            size_t bufferSize);
+    int (*set_working_directory)(struct hdfs_internal *fs, const char* uri);
+    int (*mkdir)(struct hdfs_internal *fs, const char* uri);
+    int (*set_replication)(struct hdfs_internal *fs, const char* uri,
+            int16_t replication);
+    hdfsFileInfo *(*list_directory)(struct hdfs_internal *fs,
+            const char* uri, int *numEntries);
+    hdfsFileInfo *(*get_path_info)(struct hdfs_internal *fs, const char* uri);
+    hdfsFileInfo *(*stat)(struct hdfs_internal *fs, const char* uri);
+    void (*free_file_info)(hdfsFileInfo *, int numEntries);
+    char*** (*get_hosts)(struct hdfs_internal *fs, const char* uri, 
+            int64_t start, int64_t length);
+    int64_t (*get_default_block_size)(struct hdfs_internal *fs);
+    int64_t (*get_default_block_size_at_path)(struct hdfs_internal *fs,
+            const char *uri);
+    int64_t (*get_capacity)(struct hdfs_internal *fs);
+    int64_t (*get_used)(struct hdfs_internal *fs);
+    int (*chown)(struct hdfs_internal *fs, const char *uri, const char *owner,
+            const char *group);
+    int (*chmod)(struct hdfs_internal *fs, const char* uri, short mode);
+    int (*utime)(struct hdfs_internal *fs, const char* uri,
+            int64_t mtime, int64_t atime);
+    struct hadoopRzBuffer* (*read_zero)(struct hdfsFile_internal *file,
+                struct hadoopRzOptions *opts, int32_t maxLength);
+    void (*rz_buffer_free)(struct hdfsFile_internal *file,
+                        struct hadoopRzBuffer *buffer);
+
+    // For testing
+    int (*file_uses_direct_read)(struct hdfsFile_internal *fs);
+    void (*file_disable_direct_read)(struct hdfsFile_internal *file);
+};
+
+#endif
+
+// vim: ts=4:sw=4:et



Mime
View raw message