hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [17/19] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai.
Date Wed, 07 Oct 2015 07:16:25 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
new file mode 100644
index 0000000..87550ae
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
@@ -0,0 +1,552 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h"
+#include "native_mini_dfs.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static struct NativeMiniDfsCluster *cluster;
+
+void permission_disp(short permissions, char *rtr)
+{
+    rtr[9] = '\0';
+    int i;
+    short perm;
+    for(i = 2; i >= 0; i--)
+    {
+        perm = permissions >> (i * 3);
+        rtr[0] = perm & 4 ? 'r' : '-';
+        rtr[1] = perm & 2 ? 'w' : '-';
+        rtr[2] = perm & 1 ? 'x' : '-';
+        rtr += 3;
+    }
+}
+
+int main(int argc, char **argv)
+{
+    char buffer[32];
+    tSize num_written_bytes;
+    const char* slashTmp = "/tmp";
+    int nnPort;
+    char *rwTemplate, *rwTemplate2, *newDirTemplate,
+    *appendTemplate, *userTemplate, *rwPath = NULL;
+    const char* fileContents = "Hello, World!";
+    const char* nnHost = NULL;
+    
+    if (argc != 2) {
+        fprintf(stderr, "usage: test_libwebhdfs_ops <username>\n");
+        exit(1);
+    }
+    
+    struct NativeMiniDfsConf conf = {
+        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+    };
+    cluster = nmdCreate(&conf);
+    if (!cluster) {
+        fprintf(stderr, "Failed to create the NativeMiniDfsCluster.\n");
+        exit(1);
+    }
+    if (nmdWaitClusterUp(cluster)) {
+        fprintf(stderr, "Error when waiting for cluster to be ready.\n");
+        exit(1);
+    }
+    if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+        fprintf(stderr, "Error when retrieving namenode host address.\n");
+        exit(1);
+    }
+    
+    hdfsFS fs = hdfsConnectAsUserNewInstance(nnHost, nnPort, argv[1]);
+    if(!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    }
+    
+    {
+        // Write tests
+        rwTemplate = strdup("/tmp/helloWorldXXXXXX");
+        if (!rwTemplate) {
+            fprintf(stderr, "Failed to create rwTemplate!\n");
+            exit(1);
+        }
+        rwPath = mktemp(rwTemplate);
+        // hdfsOpenFile
+        hdfsFile writeFile = hdfsOpenFile(fs, rwPath,
+                                          O_WRONLY|O_CREAT, 0, 0, 0);
+
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", rwPath);
+            exit(1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", rwPath);
+        // hdfsWrite
+        num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents,
+                                      (int) strlen(fileContents) + 1);
+        if (num_written_bytes != strlen(fileContents) + 1) {
+            fprintf(stderr, "Failed to write correct number of bytes - "
+                    "expected %d, got %d\n",
+                    (int)(strlen(fileContents) + 1), (int) num_written_bytes);
+            exit(1);
+        }
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+        
+        // hdfsTell
+        tOffset currentPos = -1;
+        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
+            fprintf(stderr,
+                    "Failed to get current file position correctly. Got %"
+                    PRId64 "!\n", currentPos);
+            exit(1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+        
+        hdfsCloseFile(fs, writeFile);
+        // Done test write
+    }
+    
+    sleep(1);
+    
+    {
+        //Read tests
+        int available = 0, exists = 0;
+        
+        // hdfsExists
+        exists = hdfsExists(fs, rwPath);
+        if (exists) {
+            fprintf(stderr, "Failed to validate existence of %s\n", rwPath);
+            exists = hdfsExists(fs, rwPath);
+            if (exists) {
+                fprintf(stderr,
+                        "Still failed to validate existence of %s\n", rwPath);
+                exit(1);
+            }
+        }
+        
+        hdfsFile readFile = hdfsOpenFile(fs, rwPath, O_RDONLY, 0, 0, 0);
+        if (!readFile) {
+            fprintf(stderr, "Failed to open %s for reading!\n", rwPath);
+            exit(1);
+        }
+        if (!hdfsFileIsOpenForRead(readFile)) {
+            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
+                    "with O_RDONLY, and it did not show up as 'open for "
+                    "read'\n");
+            exit(1);
+        }
+        
+        available = hdfsAvailable(fs, readFile);
+        fprintf(stderr, "hdfsAvailable: %d\n", available);
+        
+        // hdfsSeek, hdfsTell
+        tOffset seekPos = 1;
+        if(hdfsSeek(fs, readFile, seekPos)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+            exit(1);
+        }
+        
+        tOffset currentPos = -1;
+        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
+            fprintf(stderr,
+                    "Failed to get current file position correctly! Got %"
+                    PRId64 "!\n", currentPos);
+
+            exit(1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+        
+        if(hdfsSeek(fs, readFile, 0)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+            exit(1);
+        }
+        
+        // hdfsRead
+        memset(buffer, 0, sizeof(buffer));
+        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to read (direct). "
+                    "Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            exit(1);
+        }
+        fprintf(stderr, "Read following %d bytes:\n%s\n",
+                num_read_bytes, buffer);
+        
+        if (hdfsSeek(fs, readFile, 0L)) {
+            fprintf(stderr, "Failed to seek to file start!\n");
+            exit(1);
+        }
+        
+        // hdfsPread
+        memset(buffer, 0, strlen(fileContents + 1));
+        num_read_bytes = hdfsPread(fs, readFile, 0, buffer, sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n",
+                num_read_bytes, buffer);
+        
+        hdfsCloseFile(fs, readFile);
+        // Done test read
+    }
+    
+    int totalResult = 0;
+    int result = 0;
+    {
+        //Generic file-system operations
+        char *srcPath = rwPath;
+        char buffer[256];
+        const char *resp;
+        rwTemplate2 = strdup("/tmp/helloWorld2XXXXXX");
+        if (!rwTemplate2) {
+            fprintf(stderr, "Failed to create rwTemplate2!\n");
+            exit(1);
+        }
+        char *dstPath = mktemp(rwTemplate2);
+        newDirTemplate = strdup("/tmp/newdirXXXXXX");
+        if (!newDirTemplate) {
+            fprintf(stderr, "Failed to create newDirTemplate!\n");
+            exit(1);
+        }
+        char *newDirectory = mktemp(newDirTemplate);
+        
+        // hdfsRename
+        fprintf(stderr, "hdfsRename: %s\n",
+                ((result = hdfsRename(fs, rwPath, dstPath)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsRename back: %s\n",
+                ((result = hdfsRename(fs, dstPath, srcPath)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        // hdfsCreateDirectory
+        fprintf(stderr, "hdfsCreateDirectory: %s\n",
+                ((result = hdfsCreateDirectory(fs, newDirectory)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        // hdfsSetReplication
+        fprintf(stderr, "hdfsSetReplication: %s\n",
+                ((result = hdfsSetReplication(fs, srcPath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+
+        // hdfsGetWorkingDirectory, hdfsSetWorkingDirectory
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+                 buffer : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+
+        const char* path[] = {"/foo", "/foo/bar", "foobar", "//foo/bar//foobar",
+                              "foo//bar", "foo/bar///", "/", "////"};
+        int i;
+        for (i = 0; i < 8; i++) {
+            fprintf(stderr, "hdfsSetWorkingDirectory: %s, %s\n",
+                    ((result = hdfsSetWorkingDirectory(fs, path[i])) ?
+                     "Failed!" : "Success!"),
+                    hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)));
+            totalResult += result;
+        }
+
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n",
+                ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+                 buffer : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+
+        // hdfsGetPathInfo
+        hdfsFileInfo *fileInfo = NULL;
+        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
+            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
+            fprintf(stderr, "Name: %s, ", fileInfo->mName);
+            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
+            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
+            fprintf(stderr, "BlockSize: %"PRId64", ", fileInfo->mBlockSize);
+            fprintf(stderr, "Size: %"PRId64", ", fileInfo->mSize);
+            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
+            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
+            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
+            char permissions[10];
+            permission_disp(fileInfo->mPermissions, permissions);
+            fprintf(stderr, "Permissions: %d (%s)\n",
+                    fileInfo->mPermissions, permissions);
+            hdfsFreeFileInfo(fileInfo, 1);
+        } else {
+            totalResult++;
+            fprintf(stderr, "hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
+        }
+        
+        // hdfsListDirectory
+        hdfsFileInfo *fileList = 0;
+        int numEntries = 0;
+        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
+            int i = 0;
+            for(i=0; i < numEntries; ++i) {
+                fprintf(stderr, "Name: %s, ", fileList[i].mName);
+                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
+                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
+                fprintf(stderr, "BlockSize: %"PRId64", ", fileList[i].mBlockSize);
+                fprintf(stderr, "Size: %"PRId64", ", fileList[i].mSize);
+                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
+                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
+                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
+                char permissions[10];
+                permission_disp(fileList[i].mPermissions, permissions);
+                fprintf(stderr, "Permissions: %d (%s)\n",
+                        fileList[i].mPermissions, permissions);
+            }
+            hdfsFreeFileInfo(fileList, numEntries);
+        } else {
+            if (errno) {
+                totalResult++;
+                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
+            } else {
+                fprintf(stderr, "Empty directory!\n");
+            }
+        }
+        
+        char *newOwner = "root";
+        // Setting tmp dir to 777 so later when connectAsUser nobody,
+        // we can write to it
+        short newPerm = 0666;
+        
+        // hdfsChown
+        fprintf(stderr, "hdfsChown: %s\n",
+                ((result = hdfsChown(fs, rwPath, NULL, "users")) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsChown: %s\n",
+                ((result = hdfsChown(fs, rwPath, newOwner, NULL)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        // hdfsChmod
+        fprintf(stderr, "hdfsChmod: %s\n",
+                ((result = hdfsChmod(fs, rwPath, newPerm)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        sleep(2);
+        tTime newMtime = time(NULL);
+        tTime newAtime = time(NULL);
+        
+        // utime write
+        fprintf(stderr, "hdfsUtime: %s\n",
+                ((result = hdfsUtime(fs, rwPath, newMtime, newAtime)) ?
+                 "Failed!" : "Success!"));        
+        totalResult += result;
+        
+        // chown/chmod/utime read
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, rwPath);
+        
+        fprintf(stderr, "hdfsChown read: %s\n",
+                ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        fprintf(stderr, "hdfsChmod read: %s\n",
+                ((result = (finfo->mPermissions != newPerm)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        // will later use /tmp/ as a different user so enable it
+        fprintf(stderr, "hdfsChmod: %s\n",
+                ((result = hdfsChmod(fs, slashTmp, 0777)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        fprintf(stderr,"newMTime=%ld\n",newMtime);
+        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
+        
+        
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n",
+                ((result = (finfo->mLastMod != newMtime / 1000)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        // Clean up
+        hdfsFreeFileInfo(finfo, 1);
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, newDirectory, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, srcPath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsExists: %s\n",
+                ((result = hdfsExists(fs, newDirectory)) ?
+                 "Success!" : "Failed!"));
+        totalResult += (result ? 0 : 1);
+        // Done test generic operations
+    }
+    
+    {
+        // Test Appends
+        appendTemplate = strdup("/tmp/appendsXXXXXX");
+        if (!appendTemplate) {
+            fprintf(stderr, "Failed to create appendTemplate!\n");
+            exit(1);
+        }
+        char *appendPath = mktemp(appendTemplate);
+        const char* helloBuffer = "Hello,";
+        hdfsFile writeFile = NULL;
+        
+        // Create
+        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+            exit(1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
+        
+        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+                                      (int) strlen(helloBuffer));
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+        hdfsCloseFile(fs, writeFile);
+        
+        fprintf(stderr, "hdfsSetReplication: %s\n",
+                ((result = hdfsSetReplication(fs, appendPath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        
+        // Re-Open for Append
+        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY | O_APPEND, 0, 0, 0);
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+            exit(1);
+        }
+        fprintf(stderr, "Opened %s for appending successfully...\n",
+                appendPath);
+        
+        helloBuffer = " World";
+        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+                                      (int)strlen(helloBuffer) + 1);
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+        
+        hdfsCloseFile(fs, writeFile);
+
+        // Check size
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, appendPath);
+        fprintf(stderr, "fileinfo->mSize: == total %s\n",
+                ((result = (finfo->mSize == strlen("Hello, World") + 1)) ?
+                 "Success!" : "Failed!"));
+        totalResult += (result ? 0 : 1);
+        
+        // Read and check data
+        hdfsFile readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
+        if (!readFile) {
+            fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
+            exit(1);
+        }
+        
+        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n",
+                num_read_bytes, buffer);
+        fprintf(stderr, "read == Hello, World %s\n",
+                (result = (strcmp(buffer, "Hello, World") == 0)) ?
+                "Success!" : "Failed!");
+        hdfsCloseFile(fs, readFile);
+        
+        // Cleanup
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, appendPath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        // Done test appends
+    }
+    
+    totalResult += (hdfsDisconnect(fs) != 0);
+    
+    {
+        //
+        // Now test as connecting as a specific user
+        // This only meant to test that we connected as that user, not to test
+        // the actual fs user capabilities. Thus just create a file and read
+        // the owner is correct.
+        const char *tuser = "nobody";
+        userTemplate = strdup("/tmp/usertestXXXXXX");
+        if (!userTemplate) {
+            fprintf(stderr, "Failed to create userTemplate!\n");
+            exit(1);
+        }
+        char* userWritePath = mktemp(userTemplate);
+        hdfsFile writeFile = NULL;
+        
+        fs = hdfsConnectAsUserNewInstance("default", 50070, tuser);
+        if(!fs) {
+            fprintf(stderr,
+                    "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+            exit(1);
+        }
+        
+        writeFile = hdfsOpenFile(fs, userWritePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", userWritePath);
+            exit(1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n",
+                userWritePath);
+        
+        num_written_bytes = hdfsWrite(fs, writeFile, fileContents,
+                                      (int)strlen(fileContents) + 1);
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+        hdfsCloseFile(fs, writeFile);
+        
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, userWritePath);
+        if (finfo) {
+            fprintf(stderr, "hdfs new file user is correct: %s\n",
+                    ((result = (strcmp(finfo->mOwner, tuser) != 0)) ?
+                     "Failed!" : "Success!"));
+        } else {
+            fprintf(stderr,
+                    "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
+            result = -1;
+        }
+        totalResult += result;
+        
+        // Cleanup
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, userWritePath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        // Done test specific user
+    }
+
+    totalResult += (hdfsDisconnect(fs) != 0);
+    
+    // Shutdown the native minidfscluster
+    nmdShutdown(cluster);
+    nmdFree(cluster);
+    
+    fprintf(stderr, "totalResult == %d\n", totalResult);
+    if (totalResult != 0) {
+        return -1;
+    } else {
+        return 0;
+    }
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
new file mode 100644
index 0000000..4bd3078
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv) {
+
+    const char* rfile;
+    tSize fileTotalSize, bufferSize, curSize, totalReadSize;
+    hdfsFS fs;
+    hdfsFile readFile;
+    char *buffer = NULL;
+    
+    if (argc != 4) {
+        fprintf(stderr, "Usage: test_libwebhdfs_read"
+                " <filename> <filesize> <buffersize>\n");
+        exit(1);
+    }
+    
+    fs = hdfsConnect("localhost", 50070);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(1);
+    }
+    
+    rfile = argv[1];
+    fileTotalSize = strtoul(argv[2], NULL, 10);
+    bufferSize = strtoul(argv[3], NULL, 10);
+    
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+    if (!readFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
+        exit(1);
+    }
+    
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize);
+    if(buffer == NULL) {
+        fprintf(stderr, "Failed to allocate buffer.\n");
+        exit(1);
+    }
+    
+    // read from the file
+    curSize = bufferSize;
+    totalReadSize = 0;
+    for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) {
+        totalReadSize += curSize;
+    }
+    totalReadSize += curSize;
+    
+    fprintf(stderr, "size of the file: %d; reading size: %d\n",
+            fileTotalSize, totalReadSize);
+    
+    free(buffer);
+    hdfsCloseFile(fs, readFile);
+    hdfsDisconnect(fs);
+    
+    return 0;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
new file mode 100644
index 0000000..6c9a12e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs.h"
+#include "native_mini_dfs.h"
+
+#include <errno.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define TLH_MAX_THREADS 100
+
+static struct NativeMiniDfsCluster* cluster;
+
+static const char *user;
+
+struct tlhThreadInfo {
+    /** Thread index */
+    int threadIdx;
+    /** 0 = thread was successful; error code otherwise */
+    int success;
+    /** pthread identifier */
+    pthread_t thread;
+};
+
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cluster,
+                                     hdfsFS *fs)
+{
+    int nnPort;
+    const char *nnHost;
+    hdfsFS hdfs;
+    
+    if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+        fprintf(stderr, "Error when retrieving namenode host address.\n");
+        return 1;
+    }
+    
+    hdfs = hdfsConnectAsUser(nnHost, nnPort, user);
+    if(!hdfs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        return 1;
+    }
+
+    *fs = hdfs;
+    return 0;
+}
+
+static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
+{
+    char prefix[256], tmp[256];
+    hdfsFile file;
+    int ret, expected;
+    hdfsFileInfo *fileInfo;
+    
+    snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
+    
+    if (hdfsExists(fs, prefix) == 0) {
+        EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
+    }
+    EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
+    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+    
+    EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
+    
+    file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+    
+    /* TODO: implement writeFully and use it here */
+    expected = (int)strlen(prefix);
+    ret = hdfsWrite(fs, file, prefix, expected);
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
+                "it wrote %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(hdfsFlush(fs, file));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+    
+    /* Let's re-open the file for reading */
+    file = hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+    
+    /* TODO: implement readFully and use it here */
+    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
+    if (ret < 0) {
+        ret = errno;
+        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
+        return ret;
+    }
+    if (ret != expected) {
+        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
+                "it read %d\n", ret, expected);
+        return EIO;
+    }
+    EXPECT_ZERO(memcmp(prefix, tmp, expected));
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+        
+    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+    EXPECT_NONZERO(hdfsChown(fs, tmp, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+    
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+    
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+    
+    EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
+    return 0;
+}
+
+static void *testHdfsOperations(void *v)
+{
+    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
+    hdfsFS fs = NULL;
+    int ret;
+    
+    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
+            ti->threadIdx);
+    ret = hdfsSingleNameNodeConnect(cluster, &fs);
+    if (ret) {
+        fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
+                "hdfsSingleNameNodeConnect failed with error %d.\n",
+                ti->threadIdx, ret);
+        ti->success = EIO;
+        return NULL;
+    }
+    ti->success = doTestHdfsOperations(ti, fs);
+    if (hdfsDisconnect(fs)) {
+        ret = errno;
+        fprintf(stderr, "hdfsDisconnect error %d\n", ret);
+        ti->success = ret;
+    }
+    return NULL;
+}
+
+static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
+{
+    int i, threadsFailed = 0;
+    const char *sep = "";
+    
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            threadsFailed = 1;
+        }
+    }
+    if (!threadsFailed) {
+        fprintf(stderr, "testLibHdfs: all threads succeeded.  SUCCESS.\n");
+        return EXIT_SUCCESS;
+    }
+    fprintf(stderr, "testLibHdfs: some threads failed: [");
+    for (i = 0; i < tlhNumThreads; i++) {
+        if (ti[i].success != 0) {
+            fprintf(stderr, "%s%d", sep, i);
+            sep = ", ";
+        }
+    }
+    fprintf(stderr, "].  FAILURE.\n");
+    return EXIT_FAILURE;
+}
+
+/**
+ * Test that we can write a file with libhdfs and then read it back
+ */
+int main(int argc, const char *args[])
+{
+    int i, tlhNumThreads;
+    const char *tlhNumThreadsStr;
+    struct tlhThreadInfo ti[TLH_MAX_THREADS];
+    
+    if (argc != 2) {
+        fprintf(stderr, "usage: test_libwebhdfs_threaded <username>\n");
+        exit(1);
+    }
+    user = args[1];
+    
+    struct NativeMiniDfsConf conf = {
+        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+    };
+    cluster = nmdCreate(&conf);
+    EXPECT_NONNULL(cluster);
+    EXPECT_ZERO(nmdWaitClusterUp(cluster));
+    
+    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
+    if (!tlhNumThreadsStr) {
+        tlhNumThreadsStr = "3";
+    }
+    tlhNumThreads = atoi(tlhNumThreadsStr);
+    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
+        fprintf(stderr, "testLibHdfs: must have a number of threads "
+                "between 1 and %d inclusive, not %d\n",
+                TLH_MAX_THREADS, tlhNumThreads);
+        return EXIT_FAILURE;
+    }
+    memset(&ti[0], 0, sizeof(ti));
+    for (i = 0; i < tlhNumThreads; i++) {
+        ti[i].threadIdx = i;
+    }
+    
+    for (i = 0; i < tlhNumThreads; i++) {
+        EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
+                                   testHdfsOperations, &ti[i]));
+    }
+    for (i = 0; i < tlhNumThreads; i++) {
+        EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
+    }
+    
+    EXPECT_ZERO(nmdShutdown(cluster));
+    nmdFree(cluster);
+    return checkFailures(ti, tlhNumThreads);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
new file mode 100644
index 0000000..652fb86
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h"
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char* writeFileName;
+    off_t fileTotalSize;
+    long long tmpBufferSize;
+    tSize bufferSize = 0, totalWriteSize = 0, toWrite = 0, written = 0;
+    hdfsFile writeFile = NULL;
+    int append, i = 0;
+    char* buffer = NULL;
+    
+    if (argc != 6) {
+        fprintf(stderr, "Usage: test_libwebhdfs_write <filename> <filesize> "
+                "<buffersize> <username> <append>\n");
+        exit(1);
+    }
+    
+    fs = hdfsConnectAsUser("default", 50070, argv[4]);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(1);
+    }
+    
+    writeFileName = argv[1];
+    fileTotalSize = strtoul(argv[2], NULL, 10);
+    tmpBufferSize = strtoul(argv[3], NULL, 10);
+    
+    // sanity check
+    if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
+        fprintf(stderr, "invalid file size %s - must be <= %lu\n",
+                argv[2], ULONG_MAX);
+        exit(1);
+    }
+    
+    // currently libhdfs writes are of tSize which is int32
+    if(tmpBufferSize > INT_MAX) {
+        fprintf(stderr,
+                "invalid buffer size libhdfs API write chunks must be <= %d\n",
+                INT_MAX);
+        exit(1);
+    }
+    
+    bufferSize = (tSize) tmpBufferSize;
+    append = atoi(argv[5]);
+    if (!append) {
+        writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0);
+    } else {
+        writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND,
+                                 bufferSize, 2, 0);
+    }
+    if (!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
+        exit(1);
+    }
+    
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize + 1);
+    if(buffer == NULL) {
+        fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
+        exit(1);
+    }
+    for (i = 0; i < bufferSize; ++i) {
+        buffer[i] = 'a' + (i%26);
+    }
+    buffer[bufferSize] = '\0';
+
+    // write to the file
+    totalWriteSize = 0;
+    for (; totalWriteSize < fileTotalSize; ) {
+        toWrite = bufferSize < (fileTotalSize - totalWriteSize) ?
+                            bufferSize : (fileTotalSize - totalWriteSize);
+        written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
+        fprintf(stderr, "written size %d, to write size %d\n",
+                written, toWrite);
+        totalWriteSize += written;
+    }
+    
+    // cleanup
+    free(buffer);
+    hdfsCloseFile(fs, writeFile);
+    fprintf(stderr, "file total size: %" PRId64 ", total write size: %d\n",
+            fileTotalSize, totalWriteSize);
+    hdfsDisconnect(fs);
+    
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
new file mode 100644
index 0000000..0e0db5d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
@@ -0,0 +1,88 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set(CMAKE_SKIP_RPATH TRUE)
+
+# Flatten a list into a string.
+function(flatten_list INPUT SEPARATOR OUTPUT)
+  string (REPLACE ";" "${SEPARATOR}" _TMPS "${INPUT}")
+  set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
+endfunction()
+
+flatten_list("${FUSE_CFLAGS}" " " FUSE_CFLAGS)
+flatten_list("${FUSE_LDFLAGS}" " " FUSE_LDFLAGS)
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FUSE_CFLAGS}")
+set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} ${FUSE_LDFLAGS}")
+message(STATUS "Building Linux FUSE client.")
+
+include_directories(
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_BINARY_DIR}
+    ${JNI_INCLUDE_DIRS}
+    ${CMAKE_SOURCE_DIR}/main/native
+    ${CMAKE_SOURCE_DIR}/main/native/libhdfs
+    ${OS_DIR}
+    ${FUSE_INCLUDE_DIRS})
+
+add_executable(fuse_dfs
+    fuse_dfs.c
+    fuse_options.c
+    fuse_connect.c
+    fuse_impls_access.c
+    fuse_impls_chmod.c
+    fuse_impls_chown.c
+    fuse_impls_create.c
+    fuse_impls_flush.c
+    fuse_impls_getattr.c
+    fuse_impls_mkdir.c
+    fuse_impls_mknod.c
+    fuse_impls_open.c
+    fuse_impls_read.c
+    fuse_impls_readdir.c
+    fuse_impls_release.c
+    fuse_impls_rename.c
+    fuse_impls_rmdir.c
+    fuse_impls_statfs.c
+    fuse_impls_symlink.c
+    fuse_impls_truncate.c
+    fuse_impls_unlink.c
+    fuse_impls_utimens.c
+    fuse_impls_write.c
+    fuse_init.c
+    fuse_stat_struct.c
+    fuse_trash.c
+    fuse_users.c
+)
+target_link_libraries(fuse_dfs
+    ${FUSE_LIBRARIES}
+    ${JAVA_JVM_LIBRARY}
+    hdfs
+    m
+    pthread
+    rt
+)
+add_executable(test_fuse_dfs
+    test/test_fuse_dfs.c
+    test/fuse_workload.c
+    util/posix_util.c
+)
+target_link_libraries(test_fuse_dfs
+    ${FUSE_LIBRARIES}
+    native_mini_dfs
+    pthread
+)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
new file mode 100644
index 0000000..1744892
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/doc/README
@@ -0,0 +1,131 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+Fuse-DFS
+
+Supports reads, writes, and directory operations (e.g., cp, ls, more, cat, find, less, rm, mkdir, mv, rmdir).  Things like touch, chmod, chown, and permissions are in the works. Fuse-dfs currently shows all files as owned by nobody.
+
+Contributing
+
+It's pretty straightforward to add functionality to fuse-dfs as fuse makes things relatively simple. Some other tasks require also augmenting libhdfs to expose more hdfs functionality to C. See [http://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&mode=hide&pid=12310240&sorter/order=DESC&sorter/field=priority&resolution=-1&component=12312376  contrib/fuse-dfs JIRAs]
+
+Requirements
+
+ * Hadoop with compiled libhdfs.so
+ * Linux kernel > 2.6.9 with fuse, which is the default or Fuse 2.7.x, 2.8.x installed. See: [http://fuse.sourceforge.net/]
+ * modprobe fuse to load it
+ * fuse-dfs executable (see below)
+ * fuse_dfs_wrapper.sh installed in /bin or other appropriate location (see below)
+
+
+BUILDING
+
+   1. in HADOOP_PREFIX: `ant compile-libhdfs -Dlibhdfs=1
+   2. in HADOOP_PREFIX: `ant package` to deploy libhdfs
+   3. in HADOOP_PREFIX: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
+
+NOTE: for amd64 architecture, libhdfs will not compile unless you edit
+the Makefile in src/c++/libhdfs/Makefile and set OS_ARCH=amd64
+(probably the same for others too). See [https://issues.apache.org/jira/browse/HADOOP-3344 HADOOP-3344]
+
+Common build problems include not finding the libjvm.so in JAVA_HOME/jre/lib/OS_ARCH/server or not finding fuse in FUSE_HOME or /usr/local.
+
+
+CONFIGURING
+
+Look at all the paths in fuse_dfs_wrapper.sh and either correct them or set them in your environment before running. (note for automount and mount as root, you probably cannot control the environment, so best to set them in the wrapper)
+
+INSTALLING
+
+1. `mkdir /export/hdfs` (or wherever you want to mount it)
+
+2. `fuse_dfs_wrapper.sh dfs://hadoop_server1.foo.com:9000 /export/hdfs -d` and from another terminal, try `ls /export/hdfs`
+
+If 2 works, try again dropping the debug mode, i.e., -d
+
+(note - common problems are that you don't have libhdfs.so or libjvm.so or libfuse.so on your LD_LIBRARY_PATH, and your CLASSPATH does not contain hadoop and other required jars.)
+
+Also note, fuse-dfs will write error/warn messages to the syslog - typically in /var/log/messages
+
+You can use fuse-dfs to mount multiple hdfs instances by just changing the server/port name and directory mount point above.
+
+DEPLOYING
+
+in a root shell do the following:
+
+1. add the following to /etc/fstab
+
+fuse_dfs#dfs://hadoop_server.foo.com:9000 /export/hdfs fuse -oallow_other,rw,-ousetrash,-oinitchecks 0 0
+
+
+2. Mount using: `mount /export/hdfs`. Expect problems with not finding fuse_dfs. You will need to probably add this to /sbin and then problems finding the above 3 libraries. Add these using ldconfig.
+
+
+Fuse DFS takes the following mount options (i.e., on the command line or the comma separated list of options in /etc/fstab:
+
+-oserver=%s  (optional place to specify the server but in fstab use the format above)
+-oport=%d (optional port see comment on server option)
+-oentry_timeout=%d (how long directory entries are cached by fuse in seconds - see fuse docs)
+-oattribute_timeout=%d (how long attributes are cached by fuse in seconds - see fuse docs)
+-oprotected=%s (a colon separated list of directories that fuse-dfs should not allow to be deleted or moved - e.g., /user:/tmp)
+-oprivate (not often used but means only the person who does the mount can use the filesystem - aka ! allow_others in fuse speak)
+-ordbuffer=%d (in KBs how large a buffer should fuse-dfs use when doing hdfs reads)
+ro 
+rw
+-ousetrash (should fuse dfs throw things in /Trash when deleting them)
+-onotrash (opposite of usetrash)
+-odebug (do not daemonize - aka -d in fuse speak)
+-obig_writes (use fuse big_writes option so as to allow better performance of writes on kernels >= 2.6.26)
+-initchecks - have fuse-dfs try to connect to hdfs to ensure all is ok upon startup. recommended to have this  on
+The defaults are:
+
+entry,attribute_timeouts = 60 seconds
+rdbuffer = 10 MB
+protected = null
+debug = 0
+notrash
+private = 0
+
+EXPORTING
+
+Add the following to /etc/exports:
+
+/export/hdfs *.foo.com(no_root_squash,rw,fsid=1,sync)
+
+NOTE - you cannot export this with a FUSE module built into the kernel
+- e.g., kernel 2.6.17. For info on this, refer to the FUSE wiki.
+
+
+RECOMMENDATIONS
+
+1. From /bin, `ln -s $HADOOP_PREFIX/contrib/fuse-dfs/fuse_dfs* .`
+
+2. Always start with debug on so you can see if you are missing a classpath or something like that.
+
+3. use -obig_writes
+
+4. use -initchecks
+
+KNOWN ISSUES 
+
+1. if you alias `ls` to `ls --color=auto` and try listing a directory with lots (over thousands) of files, expect it to be slow and at 10s of thousands, expect it to be very very slow.  This is because `--color=auto` causes ls to stat every file in the directory. Since fuse-dfs does not cache attribute entries when doing a readdir, 
+this is very slow. see [https://issues.apache.org/jira/browse/HADOOP-3797 HADOOP-3797]
+
+2. Writes are approximately 33% slower than the DFSClient. TBD how to optimize this. see: [https://issues.apache.org/jira/browse/HADOOP-3805 HADOOP-3805] - try using -obig_writes if on a >2.6.26 kernel, should perform much better since bigger writes implies less context switching.
+
+3. Reads are ~20-30% slower even with the read buffering. 
+
+4. fuse-dfs and underlying libhdfs have no support for permissions. See [https://issues.apache.org/jira/browse/HADOOP-3536 HADOOP-3536] 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
new file mode 100644
index 0000000..8a2a00b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
@@ -0,0 +1,644 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fuse_connect.h"
+#include "fuse_dfs.h"
+#include "fuse_users.h" 
+#include "libhdfs/hdfs.h"
+#include "util/tree.h"
+
+#include <inttypes.h>
+#include <limits.h>
+#include <poll.h>
+#include <search.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <utime.h>
+
+#define FUSE_CONN_DEFAULT_TIMER_PERIOD      5
+#define FUSE_CONN_DEFAULT_EXPIRY_PERIOD     (5 * 60)
+#define HADOOP_SECURITY_AUTHENTICATION      "hadoop.security.authentication"
+#define HADOOP_FUSE_CONNECTION_TIMEOUT      "hadoop.fuse.connection.timeout"
+#define HADOOP_FUSE_TIMER_PERIOD            "hadoop.fuse.timer.period"
+
+/** Length of the buffer needed by asctime_r */
+#define TIME_STR_LEN 26
+
+struct hdfsConn;
+
+static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b);
+static void hdfsConnExpiry(void);
+static void* hdfsConnExpiryThread(void *v);
+
+RB_HEAD(hdfsConnTree, hdfsConn);
+
+enum authConf {
+    AUTH_CONF_UNKNOWN,
+    AUTH_CONF_KERBEROS,
+    AUTH_CONF_OTHER,
+};
+
+struct hdfsConn {
+  RB_ENTRY(hdfsConn) entry;
+  /** How many threads are currently using this hdfsConnection object */
+  int64_t refcnt;
+  /** The username used to make this connection.  Dynamically allocated. */
+  char *usrname;
+  /** Kerberos ticket cache path, or NULL if this is not a kerberized
+   * connection.  Dynamically allocated. */
+  char *kpath;
+  /** mtime of the kpath, if the kpath is non-NULL */
+  time_t kPathMtime;
+  /** nanosecond component of the mtime of the kpath, if the kpath is non-NULL */
+  long kPathMtimeNs;
+  /** The cached libhdfs fs instance */
+  hdfsFS fs;
+  /** Nonzero if this hdfs connection needs to be closed as soon as possible.
+   * If this is true, the connection has been removed from the tree. */
+  int condemned;
+  /** Number of times we should run the expiration timer on this connection
+   * before removing it. */
+  int expirationCount;
+};
+
+RB_GENERATE(hdfsConnTree, hdfsConn, entry, hdfsConnCompare);
+
+/** Current cached libhdfs connections */
+static struct hdfsConnTree gConnTree;
+
+/** The URI used to make our connections.  Dynamically allocated. */
+static char *gUri;
+
+/** The port used to make our connections, or 0. */
+static int gPort;
+
+/** Lock which protects gConnTree and gConnectTimer->active */
+static pthread_mutex_t gConnMutex;
+
+/** Type of authentication configured */
+static enum authConf gHdfsAuthConf;
+
+/** FUSE connection timer expiration period */
+static int32_t gTimerPeriod;
+
+/** FUSE connection expiry period */
+static int32_t gExpiryPeriod;
+
+/** FUSE timer expiration thread */
+static pthread_t gTimerThread;
+
+/** 
+ * Find out what type of authentication the system administrator
+ * has configured.
+ *
+ * @return     the type of authentication, or AUTH_CONF_UNKNOWN on error.
+ */
+static enum authConf discoverAuthConf(void)
+{
+  int ret;
+  char *val = NULL;
+  enum authConf authConf;
+
+  ret = hdfsConfGetStr(HADOOP_SECURITY_AUTHENTICATION, &val);
+  if (ret)
+    authConf = AUTH_CONF_UNKNOWN;
+  else if (!val)
+    authConf = AUTH_CONF_OTHER;
+  else if (!strcmp(val, "kerberos"))
+    authConf = AUTH_CONF_KERBEROS;
+  else
+    authConf = AUTH_CONF_OTHER;
+  free(val);
+  return authConf;
+}
+
+int fuseConnectInit(const char *nnUri, int port)
+{
+  int ret;
+
+  gTimerPeriod = FUSE_CONN_DEFAULT_TIMER_PERIOD;
+  ret = hdfsConfGetInt(HADOOP_FUSE_TIMER_PERIOD, &gTimerPeriod);
+  if (ret) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_FUSE_TIMER_PERIOD);
+    return -EINVAL;
+  }
+  if (gTimerPeriod < 1) {
+    fprintf(stderr, "Invalid value %d given for %s.\n",
+          gTimerPeriod, HADOOP_FUSE_TIMER_PERIOD);
+    return -EINVAL;
+  }
+  gExpiryPeriod = FUSE_CONN_DEFAULT_EXPIRY_PERIOD;
+  ret = hdfsConfGetInt(HADOOP_FUSE_CONNECTION_TIMEOUT, &gExpiryPeriod);
+  if (ret) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_FUSE_CONNECTION_TIMEOUT);
+    return -EINVAL;
+  }
+  if (gExpiryPeriod < 1) {
+    fprintf(stderr, "Invalid value %d given for %s.\n",
+          gExpiryPeriod, HADOOP_FUSE_CONNECTION_TIMEOUT);
+    return -EINVAL;
+  }
+  gHdfsAuthConf = discoverAuthConf();
+  if (gHdfsAuthConf == AUTH_CONF_UNKNOWN) {
+    fprintf(stderr, "Unable to determine the configured value for %s.",
+          HADOOP_SECURITY_AUTHENTICATION);
+    return -EINVAL;
+  }
+  gPort = port;
+  gUri = strdup(nnUri);
+  if (!gUri) {
+    fprintf(stderr, "fuseConnectInit: OOM allocting nnUri\n");
+    return -ENOMEM;
+  }
+  ret = pthread_mutex_init(&gConnMutex, NULL);
+  if (ret) {
+    free(gUri);
+    fprintf(stderr, "fuseConnectInit: pthread_mutex_init failed with error %d\n",
+            ret);
+    return -ret;
+  }
+  RB_INIT(&gConnTree);
+  ret = pthread_create(&gTimerThread, NULL, hdfsConnExpiryThread, NULL);
+  if (ret) {
+    free(gUri);
+    pthread_mutex_destroy(&gConnMutex);
+    fprintf(stderr, "fuseConnectInit: pthread_create failed with error %d\n",
+            ret);
+    return -ret;
+  }
+  fprintf(stderr, "fuseConnectInit: initialized with timer period %d, "
+          "expiry period %d\n", gTimerPeriod, gExpiryPeriod);
+  return 0;
+}
+
+/**
+ * Compare two libhdfs connections by username
+ *
+ * @param a                The first libhdfs connection
+ * @param b                The second libhdfs connection
+ *
+ * @return                 -1, 0, or 1 depending on a < b, a ==b, a > b
+ */
+static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b)
+{
+  return strcmp(a->usrname, b->usrname);
+}
+
+/**
+ * Find a libhdfs connection by username
+ *
+ * @param usrname         The username to look up
+ *
+ * @return                The connection, or NULL if none could be found
+ */
+static struct hdfsConn* hdfsConnFind(const char *usrname)
+{
+  struct hdfsConn exemplar;
+
+  memset(&exemplar, 0, sizeof(exemplar));
+  exemplar.usrname = (char*)usrname;
+  return RB_FIND(hdfsConnTree, &gConnTree, &exemplar);
+}
+
+/**
+ * Free the resource associated with a libhdfs connection.
+ *
+ * You must remove the connection from the tree before calling this function.
+ *
+ * @param conn            The libhdfs connection
+ */
+static void hdfsConnFree(struct hdfsConn *conn)
+{
+  int ret;
+
+  ret = hdfsDisconnect(conn->fs);
+  if (ret) {
+    fprintf(stderr, "hdfsConnFree(username=%s): "
+      "hdfsDisconnect failed with error %d\n",
+      (conn->usrname ? conn->usrname : "(null)"), ret);
+  }
+  free(conn->usrname);
+  free(conn->kpath);
+  free(conn);
+}
+
+/**
+ * Convert a time_t to a string.
+ *
+ * @param sec           time in seconds since the epoch
+ * @param buf           (out param) output buffer
+ * @param bufLen        length of output buffer
+ *
+ * @return              0 on success; ENAMETOOLONG if the provided buffer was
+ *                      too short
+ */
+static int timeToStr(time_t sec, char *buf, size_t bufLen)
+{
+  struct tm tm, *out;
+  size_t l;
+
+  if (bufLen < TIME_STR_LEN) {
+    return -ENAMETOOLONG;
+  }
+  out = localtime_r(&sec, &tm);
+  asctime_r(out, buf);
+  // strip trailing newline
+  l = strlen(buf);
+  if (l != 0)
+    buf[l - 1] = '\0';
+  return 0;
+}
+
+/** 
+ * Check an HDFS connection's Kerberos path.
+ *
+ * If the mtime of the Kerberos ticket cache file has changed since we first
+ * opened the connection, mark the connection as condemned and remove it from
+ * the hdfs connection tree.
+ *
+ * @param conn      The HDFS connection
+ */
+static int hdfsConnCheckKpath(const struct hdfsConn *conn)
+{
+  int ret;
+  struct stat st;
+  char prevTimeBuf[TIME_STR_LEN], newTimeBuf[TIME_STR_LEN];
+
+  if (stat(conn->kpath, &st) < 0) {
+    ret = errno;
+    if (ret == ENOENT) {
+      fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): the kerberos "
+              "ticket cache file '%s' has disappeared.  Condemning the "
+              "connection.\n", conn->usrname, conn->kpath);
+    } else {
+      fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): stat(%s) "
+              "failed with error code %d.  Pessimistically condemning the "
+              "connection.\n", conn->usrname, conn->kpath, ret);
+    }
+    return -ret;
+  }
+  if ((st.st_mtim.tv_sec != conn->kPathMtime) ||
+      (st.st_mtim.tv_nsec != conn->kPathMtimeNs)) {
+    timeToStr(conn->kPathMtime, prevTimeBuf, sizeof(prevTimeBuf));
+    timeToStr(st.st_mtim.tv_sec, newTimeBuf, sizeof(newTimeBuf));
+    fprintf(stderr, "hdfsConnCheckKpath(conn.usrname=%s): mtime on '%s' "
+            "has changed from '%s' to '%s'.  Condemning the connection "
+            "because our cached Kerberos credentials have probably "
+            "changed.\n", conn->usrname, conn->kpath, prevTimeBuf, newTimeBuf);
+    return -EINTERNAL;
+  }
+  return 0;
+}
+
+/**
+ * Cache expiration logic.
+ *
+ * This function is called periodically by the cache expiration thread.  For
+ * each FUSE connection not currently in use (refcnt == 0) it will decrement the
+ * expirationCount for that connection.  Once the expirationCount reaches 0 for
+ * a connection, it can be garbage collected.
+ *
+ * We also check to see if the Kerberos credentials have changed.  If so, the
+ * connecton is immediately condemned, even if it is currently in use.
+ */
+static void hdfsConnExpiry(void)
+{
+  struct hdfsConn *conn, *tmpConn;
+
+  pthread_mutex_lock(&gConnMutex);
+  RB_FOREACH_SAFE(conn, hdfsConnTree, &gConnTree, tmpConn) {
+    if (conn->kpath) {
+      if (hdfsConnCheckKpath(conn)) {
+        conn->condemned = 1;
+        RB_REMOVE(hdfsConnTree, &gConnTree, conn);
+        if (conn->refcnt == 0) {
+          /* If the connection is not in use by any threads, delete it
+           * immediately.  If it is still in use by some threads, the last
+           * thread using it will clean it up later inside hdfsConnRelease. */
+          hdfsConnFree(conn);
+          continue;
+        }
+      }
+    }
+    if (conn->refcnt == 0) {
+      /* If the connection is not currently in use by a thread, check to see if
+       * it ought to be removed because it's too old. */
+      conn->expirationCount--;
+      if (conn->expirationCount <= 0) {
+        if (conn->condemned) {
+          fprintf(stderr, "hdfsConnExpiry: LOGIC ERROR: condemned connection "
+                  "as %s is still in the tree!\n", conn->usrname);
+        }
+        fprintf(stderr, "hdfsConnExpiry: freeing and removing connection as "
+                "%s because it's now too old.\n", conn->usrname);
+        RB_REMOVE(hdfsConnTree, &gConnTree, conn);
+        hdfsConnFree(conn);
+      }
+    }
+  }
+  pthread_mutex_unlock(&gConnMutex);
+}
+
+// The Kerberos FILE: prefix.  This indicates that the kerberos ticket cache
+// specifier is a file.  (Note that we also assume that the specifier is a file
+// if no prefix is present.)
+#define KRB_FILE_PREFIX "FILE:"
+
+// Length of the Kerberos file prefix, which is equal to the string size in
+// bytes minus 1 (because we don't count the null terminator in the length.)
+#define KRB_FILE_PREFIX_LEN (sizeof(KRB_FILE_PREFIX) - 1)
+
+/**
+ * Find the Kerberos ticket cache path.
+ *
+ * This function finds the Kerberos ticket cache path from the thread ID and
+ * user ID of the process making the request.
+ *
+ * Normally, the ticket cache path is in a well-known location in /tmp.
+ * However, it's possible that the calling process could set the KRB5CCNAME
+ * environment variable, indicating that its Kerberos ticket cache is at a
+ * non-default location.  We try to handle this possibility by reading the
+ * process' environment here.  This will be allowed if we have root
+ * capabilities, or if our UID is the same as the remote process' UID.
+ *
+ * Note that we don't check to see if the cache file actually exists or not.
+ * We're just trying to find out where it would be if it did exist. 
+ *
+ * @param path          (out param) the path to the ticket cache file
+ * @param pathLen       length of the path buffer
+ */
+static void findKerbTicketCachePath(struct fuse_context *ctx,
+                                    char *path, size_t pathLen)
+{
+  FILE *fp = NULL;
+  static const char * const KRB5CCNAME = "\0KRB5CCNAME=";
+  int c = '\0', pathIdx = 0, keyIdx = 0;
+  size_t KRB5CCNAME_LEN = strlen(KRB5CCNAME + 1) + 1;
+
+  // /proc/<tid>/environ contains the remote process' environment.  It is
+  // exposed to us as a series of KEY=VALUE pairs, separated by NULL bytes.
+  snprintf(path, pathLen, "/proc/%d/environ", ctx->pid);
+  fp = fopen(path, "r");
+  if (!fp)
+    goto done;
+  while (1) {
+    if (c == EOF)
+      goto done;
+    if (keyIdx == KRB5CCNAME_LEN) {
+      if (pathIdx >= pathLen - 1)
+        goto done;
+      if (c == '\0')
+        goto done;
+      path[pathIdx++] = c;
+    } else if (KRB5CCNAME[keyIdx++] != c) {
+      keyIdx = 0;
+    }
+    c = fgetc(fp);
+  }
+
+done:
+  if (fp)
+    fclose(fp);
+  if (pathIdx == 0) {
+    snprintf(path, pathLen, "/tmp/krb5cc_%d", ctx->uid);
+  } else {
+    path[pathIdx] = '\0';
+  }
+  if (strncmp(path, KRB_FILE_PREFIX, KRB_FILE_PREFIX_LEN) == 0) {
+    fprintf(stderr, "stripping " KRB_FILE_PREFIX " from the front of "
+            "KRB5CCNAME.\n");
+    memmove(path, path + KRB_FILE_PREFIX_LEN,
+            strlen(path + KRB_FILE_PREFIX_LEN) + 1);
+  }
+}
+
+/**
+ * Create a new libhdfs connection.
+ *
+ * @param usrname       Username to use for the new connection
+ * @param ctx           FUSE context to use for the new connection
+ * @param out           (out param) the new libhdfs connection
+ *
+ * @return              0 on success; error code otherwise
+ */
+static int fuseNewConnect(const char *usrname, struct fuse_context *ctx,
+        struct hdfsConn **out)
+{
+  struct hdfsBuilder *bld = NULL;
+  char kpath[PATH_MAX] = { 0 };
+  struct hdfsConn *conn = NULL;
+  int ret;
+  struct stat st;
+
+  conn = calloc(1, sizeof(struct hdfsConn));
+  if (!conn) {
+    fprintf(stderr, "fuseNewConnect: OOM allocating struct hdfsConn\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  bld = hdfsNewBuilder();
+  if (!bld) {
+    fprintf(stderr, "Unable to create hdfs builder\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  /* We always want to get a new FileSystem instance here-- that's why we call
+   * hdfsBuilderSetForceNewInstance.  Otherwise the 'cache condemnation' logic
+   * in hdfsConnExpiry will not work correctly, since FileSystem might re-use the
+   * existing cached connection which we wanted to get rid of.
+   */
+  hdfsBuilderSetForceNewInstance(bld);
+  hdfsBuilderSetNameNode(bld, gUri);
+  if (gPort) {
+    hdfsBuilderSetNameNodePort(bld, gPort);
+  }
+  hdfsBuilderSetUserName(bld, usrname);
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+    findKerbTicketCachePath(ctx, kpath, sizeof(kpath));
+    if (stat(kpath, &st) < 0) {
+      fprintf(stderr, "fuseNewConnect: failed to find Kerberos ticket cache "
+        "file '%s'.  Did you remember to kinit for UID %d?\n",
+        kpath, ctx->uid);
+      ret = -EACCES;
+      goto error;
+    }
+    conn->kPathMtime = st.st_mtim.tv_sec;
+    conn->kPathMtimeNs = st.st_mtim.tv_nsec;
+    hdfsBuilderSetKerbTicketCachePath(bld, kpath);
+    conn->kpath = strdup(kpath);
+    if (!conn->kpath) {
+      fprintf(stderr, "fuseNewConnect: OOM allocating kpath\n");
+      ret = -ENOMEM;
+      goto error;
+    }
+  }
+  conn->usrname = strdup(usrname);
+  if (!conn->usrname) {
+    fprintf(stderr, "fuseNewConnect: OOM allocating usrname\n");
+    ret = -ENOMEM;
+    goto error;
+  }
+  conn->fs = hdfsBuilderConnect(bld);
+  bld = NULL;
+  if (!conn->fs) {
+    ret = errno;
+    fprintf(stderr, "fuseNewConnect(usrname=%s): Unable to create fs: "
+            "error code %d\n", usrname, ret);
+    goto error;
+  }
+  RB_INSERT(hdfsConnTree, &gConnTree, conn);
+  *out = conn;
+  return 0;
+
+error:
+  if (bld) {
+    hdfsFreeBuilder(bld);
+  }
+  if (conn) {
+    free(conn->kpath);
+    free(conn->usrname);
+    free(conn);
+  }
+  return ret;
+}
+
+int fuseConnect(const char *usrname, struct fuse_context *ctx,
+                struct hdfsConn **out)
+{
+  int ret;
+  struct hdfsConn* conn;
+
+  pthread_mutex_lock(&gConnMutex);
+  conn = hdfsConnFind(usrname);
+  if (!conn) {
+    ret = fuseNewConnect(usrname, ctx, &conn);
+    if (ret) {
+      pthread_mutex_unlock(&gConnMutex);
+      fprintf(stderr, "fuseConnect(usrname=%s): fuseNewConnect failed with "
+              "error code %d\n", usrname, ret);
+      return ret;
+    }
+  }
+  conn->refcnt++;
+  conn->expirationCount = (gExpiryPeriod + gTimerPeriod - 1) / gTimerPeriod;
+  if (conn->expirationCount < 2)
+    conn->expirationCount = 2;
+  pthread_mutex_unlock(&gConnMutex);
+  *out = conn;
+  return 0;
+}
+
+int fuseConnectAsThreadUid(struct hdfsConn **conn)
+{
+  struct fuse_context *ctx;
+  char *usrname;
+  int ret;
+  
+  ctx = fuse_get_context();
+  usrname = getUsername(ctx->uid);
+  ret = fuseConnect(usrname, ctx, conn);
+  free(usrname);
+  return ret;
+}
+
+int fuseConnectTest(void)
+{
+  int ret;
+  struct hdfsConn *conn;
+
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+    // TODO: call some method which can tell us whether the FS exists.  In order
+    // to implement this, we have to add a method to FileSystem in order to do
+    // this without valid Kerberos authentication.  See HDFS-3674 for details.
+    return 0;
+  }
+  ret = fuseNewConnect("root", NULL, &conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectTest failed with error code %d\n", ret);
+    return ret;
+  }
+  hdfsConnRelease(conn);
+  return 0;
+}
+
+struct hdfs_internal* hdfsConnGetFs(struct hdfsConn *conn)
+{
+  return conn->fs;
+}
+
+void hdfsConnRelease(struct hdfsConn *conn)
+{
+  pthread_mutex_lock(&gConnMutex);
+  conn->refcnt--;
+  if ((conn->refcnt == 0) && (conn->condemned)) {
+    fprintf(stderr, "hdfsConnRelease(usrname=%s): freeing condemend FS!\n",
+      conn->usrname);
+    /* Notice that we're not removing the connection from gConnTree here.
+     * If the connection is condemned, it must have already been removed from
+     * the tree, so that no other threads start using it.
+     */
+    hdfsConnFree(conn);
+  }
+  pthread_mutex_unlock(&gConnMutex);
+}
+
+/**
+ * Get the monotonic time.
+ *
+ * Unlike the wall-clock time, monotonic time only ever goes forward.  If the
+ * user adjusts the time, the monotonic time will not be affected.
+ *
+ * @return        The monotonic time
+ */
+static time_t getMonotonicTime(void)
+{
+  int res;
+  struct timespec ts;
+       
+  res = clock_gettime(CLOCK_MONOTONIC, &ts);
+  if (res)
+    abort();
+  return ts.tv_sec;
+}
+
+/**
+ * FUSE connection expiration thread
+ *
+ */
+static void* hdfsConnExpiryThread(void *v)
+{
+  time_t nextTime, curTime;
+  int waitTime;
+
+  nextTime = getMonotonicTime() + gTimerPeriod;
+  while (1) {
+    curTime = getMonotonicTime();
+    if (curTime >= nextTime) {
+      hdfsConnExpiry();
+      nextTime = curTime + gTimerPeriod;
+    }
+    waitTime = (nextTime - curTime) * 1000;
+    poll(NULL, 0, waitTime);
+  }
+  return NULL;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.h
new file mode 100644
index 0000000..35645c6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.h
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FUSE_CONNECT_H__
+#define __FUSE_CONNECT_H__
+
+struct fuse_context;
+struct hdfsConn;
+struct hdfs_internal;
+
+/**
+ * Initialize the fuse connection subsystem.
+ *
+ * This must be called before any of the other functions in this module.
+ *
+ * @param nnUri      The NameNode URI
+ * @param port       The NameNode port
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectInit(const char *nnUri, int port);
+
+/**
+ * Get a libhdfs connection.
+ *
+ * If there is an existing connection, it will be reused.  If not, a new one
+ * will be created.
+ *
+ * You must call hdfsConnRelease on the connection you get back!
+ *
+ * @param usrname    The username to use
+ * @param ctx        The FUSE context to use (contains UID, PID of requestor)
+ * @param conn       (out param) The HDFS connection
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnect(const char *usrname, struct fuse_context *ctx,
+                struct hdfsConn **out);
+
+/**
+ * Get a libhdfs connection.
+ *
+ * The same as fuseConnect, except the username will be determined from the FUSE
+ * thread context.
+ *
+ * @param conn       (out param) The HDFS connection
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectAsThreadUid(struct hdfsConn **conn);
+
+/**
+ * Test whether we can connect to the HDFS cluster
+ *
+ * @return           0 on success; error code otherwise
+ */
+int fuseConnectTest(void);
+
+/**
+ * Get the hdfsFS associated with an hdfsConn.
+ *
+ * @param conn       The hdfsConn
+ *
+ * @return           the hdfsFS
+ */
+struct hdfs_internal* hdfsConnGetFs(struct hdfsConn *conn);
+
+/**
+ * Release an hdfsConn when we're done with it.
+ *
+ * @param conn       The hdfsConn
+ */
+void hdfsConnRelease(struct hdfsConn *conn);
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
new file mode 100644
index 0000000..6929062
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FUSE_CONTEXT_HANDLE_H__
+#define __FUSE_CONTEXT_HANDLE_H__
+
+#include <hdfs.h>
+#include <stddef.h>
+#include <sys/types.h>
+
+//
+// Structure to store fuse_dfs specific data
+// this will be created and passed to fuse at startup
+// and fuse will pass it back to us via the context function
+// on every operation.
+//
+typedef struct dfs_context_struct {
+  int debug;
+  int usetrash;
+  int direct_io;
+  char **protectedpaths;
+  size_t rdbuffer_size;
+} dfs_context;
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.c
new file mode 100644
index 0000000..f693032
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.c
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fuse_dfs.h"
+#include "fuse_options.h"
+#include "fuse_impls.h"
+#include "fuse_init.h"
+#include "fuse_connect.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int is_protected(const char *path) {
+
+  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  assert(dfs != NULL);
+  assert(dfs->protectedpaths);
+
+  int i ;
+  for (i = 0; dfs->protectedpaths[i]; i++) {
+    if (strcmp(path, dfs->protectedpaths[i]) == 0) {
+      return 1;
+    }
+  }
+  return 0;
+}
+
+static struct fuse_operations dfs_oper = {
+  .getattr  = dfs_getattr,
+  .access   = dfs_access,
+  .readdir  = dfs_readdir,
+  .destroy  = dfs_destroy,
+  .init     = dfs_init,
+  .open     = dfs_open,
+  .read     = dfs_read,
+  .symlink  = dfs_symlink,
+  .statfs   = dfs_statfs,
+  .mkdir    = dfs_mkdir,
+  .rmdir    = dfs_rmdir,
+  .rename   = dfs_rename,
+  .unlink   = dfs_unlink,
+  .release  = dfs_release,
+  .create   = dfs_create,
+  .write    = dfs_write,
+  .flush    = dfs_flush,
+  .mknod    = dfs_mknod,
+  .utimens  = dfs_utimens,
+  .chmod    = dfs_chmod,
+  .chown    = dfs_chown,
+  .truncate = dfs_truncate,
+};
+
+int main(int argc, char *argv[])
+{
+  int ret;
+
+  umask(0);
+
+  extern const char *program;  
+  program = argv[0];
+  struct fuse_args args = FUSE_ARGS_INIT(argc, argv);
+
+  memset(&options, 0, sizeof(struct options));
+
+  options.rdbuffer_size = 10*1024*1024; 
+  options.attribute_timeout = 60; 
+  options.entry_timeout = 60;
+
+  if (-1 == fuse_opt_parse(&args, &options, dfs_opts, dfs_options)) {
+    return -1;
+  }
+
+  if (!options.private) {
+    fuse_opt_add_arg(&args, "-oallow_other");
+  }
+
+  if (!options.no_permissions) {
+    fuse_opt_add_arg(&args, "-odefault_permissions");
+  }
+  /*
+   * FUSE already has a built-in parameter for mounting the filesystem as
+   * read-only, -r.  We defined our own parameter for doing this called -oro.
+   * We support it by translating it into -r internally.
+   * The kernel intercepts and returns an error message for any "write"
+   * operations that the user attempts to perform on a read-only filesystem.
+   * That means that we don't have to write any code to handle read-only mode.
+   * See HDFS-4139 for more details.
+   */
+  if (options.read_only) {
+    fuse_opt_add_arg(&args, "-r");
+  }
+
+  {
+    char buf[80];
+
+    snprintf(buf, sizeof buf, "-oattr_timeout=%d",options.attribute_timeout);
+    fuse_opt_add_arg(&args, buf);
+
+    snprintf(buf, sizeof buf, "-oentry_timeout=%d",options.entry_timeout);
+    fuse_opt_add_arg(&args, buf);
+  }
+
+  if (options.nn_uri == NULL) {
+    print_usage(argv[0]);
+    exit(EXIT_SUCCESS);
+  }
+
+  /* Note: do not call any libhdfs functions until fuse_main has been invoked.
+   *
+   * fuse_main will daemonize this process, by calling fork().  This will cause
+   * any extant threads to be destroyed, which could cause problems if 
+   * libhdfs has started some Java threads.
+   *
+   * Most initialization code should go in dfs_init, which is invoked after the
+   * fork.  See HDFS-3808 for details.
+   */
+  ret = fuse_main(args.argc, args.argv, &dfs_oper, NULL);
+  fuse_opt_free_args(&args);
+  return ret;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h
new file mode 100644
index 0000000..4554dbd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FUSE_DFS_H__
+#define __FUSE_DFS_H__
+
+#define FUSE_USE_VERSION 26
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <strings.h>
+#include <syslog.h>
+
+#include <fuse.h>
+#include <fuse/fuse_opt.h>
+
+#include <sys/xattr.h>
+
+#include "config.h"
+
+//
+// Check if a path is in the mount option supplied protected paths.
+//
+int is_protected(const char *path);
+
+#undef INFO
+#define INFO(_fmt, ...) {                       \
+  fprintf(stdout, "INFO %s:%d " _fmt "\n",      \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_INFO, "INFO %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
+
+#undef DEBUG
+#define DEBUG(_fmt, ...) {                      \
+  fprintf(stdout, "DEBUG %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_DEBUG, "DEBUG %s:%d " _fmt "\n",   \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
+
+#undef ERROR
+#define ERROR(_fmt, ...) {                      \
+  fprintf(stderr, "ERROR %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+  syslog(LOG_ERR, "ERROR %s:%d " _fmt "\n",     \
+          __FILE__, __LINE__, ## __VA_ARGS__);  \
+}
+
+//#define DOTRACE
+#ifdef DOTRACE
+#define TRACE(x) {        \
+    DEBUG("TRACE %s", x); \
+}
+
+#define TRACE1(x,y) {             \
+    DEBUG("TRACE %s %s\n", x, y); \
+}
+#else
+#define TRACE(x) ; 
+#define TRACE1(x,y) ; 
+#endif
+
+#endif // __FUSE_DFS_H__

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
new file mode 100755
index 0000000..97239cc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs_wrapper.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr/local/share/hadoop}
+
+if [ "$OS_ARCH" = "" ]; then
+export OS_ARCH=amd64
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+export  JAVA_HOME=/usr/local/java
+fi
+
+if [ "$LD_LIBRARY_PATH" = "" ]; then
+export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/$OS_ARCH/server:/usr/local/lib
+fi
+
+# If dev build set paths accordingly
+if [ -d $HADOOP_PREFIX/build ]; then
+  export HADOOP_PREFIX=$HADOOP_PREFIX
+  for f in ${HADOOP_PREFIX}/build/*.jar ; do
+    export CLASSPATH=$CLASSPATH:$f
+  done
+  for f in $HADOOP_PREFIX/build/ivy/lib/hadoop-hdfs/common/*.jar ; do
+    export CLASSPATH=$CLASSPATH:$f
+  done
+  export PATH=$HADOOP_PREFIX/build/contrib/fuse-dfs:$PATH
+  export LD_LIBRARY_PATH=$HADOOP_PREFIX/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
+fi
+
+fuse_dfs $@

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
new file mode 100644
index 0000000..7f9346c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FUSE_FILE_HANDLE_H__
+#define __FUSE_FILE_HANDLE_H__
+
+#include <hdfs.h>
+#include <pthread.h>
+
+struct hdfsConn;
+
+/**
+ *
+ * dfs_fh_struct is passed around for open files. Fuse provides a hook (the context) 
+ * for storing file specific data.
+ *
+ * 2 Types of information:
+ * a) a read buffer for performance reasons since fuse is typically called on 4K chunks only
+ * b) the hdfs fs handle 
+ *
+ */
+typedef struct dfs_fh_struct {
+  hdfsFile hdfsFH;
+  struct hdfsConn *conn;
+  char *buf;
+  tSize bufferSize;  //what is the size of the buffer we have
+  off_t buffersStartOffset; //where the buffer starts in the file
+  pthread_mutex_t mutex;
+} dfs_fh;
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls.h
new file mode 100644
index 0000000..d0d93e2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls.h
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __FUSE_IMPLS_H__
+#define __FUSE_IMPLS_H__
+
+#include <fuse.h>
+#include <syslog.h>
+
+#include "fuse_context_handle.h"
+
+/**
+ * Implementations of the various fuse hooks.
+ * All of these (should be) thread safe.
+ *
+ */
+
+int dfs_mkdir(const char *path, mode_t mode);
+int dfs_rename(const char *from, const char *to);
+int dfs_getattr(const char *path, struct stat *st);
+int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
+                off_t offset, struct fuse_file_info *fi);
+int dfs_read(const char *path, char *buf, size_t size, off_t offset,
+                    struct fuse_file_info *fi);
+int dfs_statfs(const char *path, struct statvfs *st);
+int dfs_mkdir(const char *path, mode_t mode);
+int dfs_rename(const char *from, const char *to);
+int dfs_rmdir(const char *path);
+int dfs_unlink(const char *path);
+int dfs_utimens(const char *path, const struct timespec ts[2]);
+int dfs_chmod(const char *path, mode_t mode);
+int dfs_chown(const char *path, uid_t uid, gid_t gid);
+int dfs_open(const char *path, struct fuse_file_info *fi);
+int dfs_write(const char *path, const char *buf, size_t size,
+              off_t offset, struct fuse_file_info *fi);
+int dfs_release (const char *path, struct fuse_file_info *fi);
+int dfs_mknod(const char *path, mode_t mode, dev_t rdev) ;
+int dfs_create(const char *path, mode_t mode, struct fuse_file_info *fi);
+int dfs_flush(const char *path, struct fuse_file_info *fi);
+int dfs_access(const char *path, int mask);
+int dfs_truncate(const char *path, off_t size);
+int dfs_symlink(const char *from, const char *to);
+
+#endif
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls_access.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls_access.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls_access.c
new file mode 100644
index 0000000..033a1c3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_impls_access.c
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fuse_dfs.h"
+#include "fuse_impls.h"
+#include "fuse_connect.h"
+
+int dfs_access(const char *path, int mask)
+{
+  TRACE1("access", path)
+  assert(path != NULL);
+  // TODO: HDFS-428
+  return 0;
+}


Mime
View raw message