Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 752A9D540 for ; Tue, 30 Oct 2012 23:13:00 +0000 (UTC) Received: (qmail 33772 invoked by uid 500); 30 Oct 2012 23:13:00 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 33737 invoked by uid 500); 30 Oct 2012 23:13:00 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 33729 invoked by uid 99); 30 Oct 2012 23:13:00 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 30 Oct 2012 23:13:00 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 30 Oct 2012 23:12:57 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id EE59E23888FD; Tue, 30 Oct 2012 23:12:13 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1403922 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/libwebhdfs/src/ src/main/native/libhdfs/ Date: Tue, 30 Oct 2012 23:12:13 -0000 To: hdfs-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121030231213.EE59E23888FD@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: suresh Date: Tue Oct 30 23:12:12 2012 New Revision: 1403922 URL: http://svn.apache.org/viewvc?rev=1403922&view=rev Log: HDFS-3916. libwebhdfs testing code cleanup. Contributed by Jing Zhao. Removed: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 30 23:12:12 2012 @@ -440,6 +440,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3789. JournalManager#format() should be able to throw IOException (Ivan Kelly via todd) + HDFS-3916. libwebhdfs testing code cleanup. (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c Tue Oct 30 23:12:12 2012 @@ -17,6 +17,7 @@ */ #include "hdfs.h" +#include "native_mini_dfs.h" #include #include @@ -26,228 +27,254 @@ #include #include -void permission_disp(short permissions, char *rtr) { +static struct NativeMiniDfsCluster *cluster; + +void permission_disp(short permissions, char *rtr) +{ rtr[9] = '\0'; int i; - for(i=2;i>=0;i--) + short perm; + for(i = 2; i >= 0; i--) { - short permissionsId = permissions >> (i * 3) & (short)7; - char* perm; - switch(permissionsId) { - case 7: - perm = "rwx"; break; - case 6: - perm = "rw-"; break; - case 5: - perm = "r-x"; break; - case 4: - perm = "r--"; break; - case 3: - perm = "-wx"; break; - case 2: - perm = "-w-"; break; - case 1: - perm = "--x"; break; - case 0: - perm = "---"; break; - default: - perm = "???"; - } - strncpy(rtr, perm, 3); - rtr+=3; + perm = permissions >> (i * 3); + rtr[0] = perm & 4 ? 'r' : '-'; + rtr[1] = perm & 2 ? 'w' : '-'; + rtr[2] = perm & 1 ? 'x' : '-'; + rtr += 3; } } -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ + char buffer[32]; + tSize num_written_bytes; + const char* slashTmp = "/tmp"; + int nnPort; + char *rwTemplate, *rwTemplate2, *newDirTemplate, + *appendTemplate, *userTemplate, *rwPath = NULL; + const char* fileContents = "Hello, World!"; + const char* nnHost = NULL; + if (argc != 2) { fprintf(stderr, "usage: test_libwebhdfs_ops \n"); - return -1; + exit(1); } - char buffer[32]; - tSize num_written_bytes; + struct NativeMiniDfsConf conf = { + .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070, + }; + cluster = nmdCreate(&conf); + if (!cluster) { + fprintf(stderr, "Failed to create the NativeMiniDfsCluster.\n"); + exit(1); + } + if (nmdWaitClusterUp(cluster)) { + fprintf(stderr, "Error when waiting for cluster to be ready.\n"); + exit(1); + } + if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) { + fprintf(stderr, "Error when retrieving namenode host address.\n"); + exit(1); + } - hdfsFS fs = hdfsConnectAsUserNewInstance("default", 50070, argv[1]); + hdfsFS fs = hdfsConnectAsUserNewInstance(nnHost, nnPort, argv[1]); if(!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); exit(-1); } - const char* writePath = "/tmp/testfile.txt"; - const char* fileContents = "Hello, World!"; - { - //Write tests - - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); + // Write tests + rwTemplate = strdup("/tmp/helloWorldXXXXXX"); + if (!rwTemplate) { + fprintf(stderr, "Failed to create rwTemplate!\n"); + exit(1); + } + rwPath = mktemp(rwTemplate); + // hdfsOpenFile + hdfsFile writeFile = hdfsOpenFile(fs, rwPath, + O_WRONLY|O_CREAT, 0, 0, 0); + if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); - exit(-1); + fprintf(stderr, "Failed to open %s for writing!\n", rwPath); + exit(1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); - num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents) + 1); + fprintf(stderr, "Opened %s for writing successfully...\n", rwPath); + // hdfsWrite + num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents, + (int) strlen(fileContents) + 1); if (num_written_bytes != strlen(fileContents) + 1) { - fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n", - (int)(strlen(fileContents) + 1), (int)num_written_bytes); - exit(-1); + fprintf(stderr, "Failed to write correct number of bytes - " + "expected %d, got %d\n", + (int)(strlen(fileContents) + 1), (int) num_written_bytes); + exit(1); } fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); + // hdfsTell tOffset currentPos = -1; if ((currentPos = hdfsTell(fs, writeFile)) == -1) { fprintf(stderr, - "Failed to get current file position correctly! Got %lld!\n", - currentPos); - exit(-1); - } - fprintf(stderr, "Current position: %lld\n", currentPos); - - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); - exit(-1); + "Failed to get current file position correctly. Got %" + PRId64 "!\n", currentPos); + exit(1); } - fprintf(stderr, "Flushed %s successfully!\n", writePath); - - if (hdfsHFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'hflush' %s\n", writePath); - exit(-1); - } - fprintf(stderr, "HFlushed %s successfully!\n", writePath); + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); hdfsCloseFile(fs, writeFile); + // Done test write } + sleep(1); + { //Read tests - sleep(1); - const char* readPath = "/tmp/testfile.txt"; - int exists = hdfsExists(fs, readPath); + int available = 0, exists = 0; + // hdfsExists + exists = hdfsExists(fs, rwPath); if (exists) { - fprintf(stderr, "Failed to validate existence of %s\n", readPath); - exists = hdfsExists(fs, readPath); + fprintf(stderr, "Failed to validate existence of %s\n", rwPath); + exists = hdfsExists(fs, rwPath); if (exists) { - fprintf(stderr, "Still failed to validate existence of %s\n", readPath); - exit(-1); + fprintf(stderr, + "Still failed to validate existence of %s\n", rwPath); + exit(1); } } - hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0); + hdfsFile readFile = hdfsOpenFile(fs, rwPath, O_RDONLY, 0, 0, 0); if (!readFile) { - fprintf(stderr, "Failed to open %s for reading!\n", readPath); - exit(-1); + fprintf(stderr, "Failed to open %s for reading!\n", rwPath); + exit(1); } - if (!hdfsFileIsOpenForRead(readFile)) { fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file " "with O_RDONLY, and it did not show up as 'open for " "read'\n"); - exit(-1); + exit(1); } - fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile)); + available = hdfsAvailable(fs, readFile); + fprintf(stderr, "hdfsAvailable: %d\n", available); + // hdfsSeek, hdfsTell tOffset seekPos = 1; if(hdfsSeek(fs, readFile, seekPos)) { - fprintf(stderr, "Failed to seek %s for reading!\n", readPath); - exit(-1); + fprintf(stderr, "Failed to seek %s for reading!\n", rwPath); + exit(1); } tOffset currentPos = -1; if((currentPos = hdfsTell(fs, readFile)) != seekPos) { fprintf(stderr, - "Failed to get current file position correctly! Got %lld!\n", - currentPos); - exit(-1); - } - fprintf(stderr, "Current position: %lld\n", currentPos); - - if (!hdfsFileUsesDirectRead(readFile)) { - fprintf(stderr, "Direct read support incorrectly not detected " - "for HDFS filesystem\n"); - exit(-1); + "Failed to get current file position correctly! Got %" + PRId64 "!\n", currentPos); + + exit(1); } + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); - fprintf(stderr, "Direct read support detected for HDFS\n"); - - // Test the direct read path if(hdfsSeek(fs, readFile, 0)) { - fprintf(stderr, "Failed to seek %s for reading!\n", readPath); - exit(-1); + fprintf(stderr, "Failed to seek %s for reading!\n", rwPath); + exit(1); } + + // hdfsRead memset(buffer, 0, sizeof(buffer)); - tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, - sizeof(buffer)); + tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer)); if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) { - fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n", + fprintf(stderr, "Failed to read (direct). " + "Expected %s but got %s (%d bytes)\n", fileContents, buffer, num_read_bytes); - exit(-1); + exit(1); } fprintf(stderr, "Read following %d bytes:\n%s\n", num_read_bytes, buffer); + if (hdfsSeek(fs, readFile, 0L)) { fprintf(stderr, "Failed to seek to file start!\n"); - exit(-1); + exit(1); } - // Disable the direct read path so that we really go through the slow - // read path - hdfsFileDisableDirectRead(readFile); - - num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, - sizeof(buffer)); - fprintf(stderr, "Read following %d bytes:\n%s\n", - num_read_bytes, buffer); - + // hdfsPread memset(buffer, 0, strlen(fileContents + 1)); - - num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, - sizeof(buffer)); + num_read_bytes = hdfsPread(fs, readFile, 0, buffer, sizeof(buffer)); fprintf(stderr, "Read following %d bytes:\n%s\n", num_read_bytes, buffer); hdfsCloseFile(fs, readFile); + // Done test read } int totalResult = 0; int result = 0; { //Generic file-system operations - - const char* srcPath = "/tmp/testfile.txt"; - const char* dstPath = "/tmp/testfile2.txt"; - const char* copyPath = "/tmp/testfile_copy.txt"; - const char* movePath = "/tmp/testfile_move.txt"; - - fprintf(stderr, "hdfsCopy: %s\n", ((result = hdfsCopy(fs, srcPath, fs, copyPath)) ? "Failed!" : "Success!")); - totalResult += result; - fprintf(stderr, "hdfsMove: %s\n", ((result = hdfsMove(fs, copyPath, fs, movePath)) ? "Failed!" : "Success!")); - totalResult += result; - - fprintf(stderr, "hdfsGetDefaultBlockSize: %lld\n", hdfsGetDefaultBlockSize(fs)); - - fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, srcPath, dstPath)) ? "Failed!" : "Success!")); - totalResult += result; - fprintf(stderr, "hdfsRename back: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!")); - totalResult += result; - - const char* slashTmp = "/tmp"; - const char* newDirectory = "/tmp/newdir"; - fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!")); - totalResult += result; - - fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 1)) ? "Failed!" : "Success!")); - totalResult += result; - + char *srcPath = rwPath; char buffer[256]; const char *resp; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); - totalResult += (resp ? 0 : 1); - fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!")); + rwTemplate2 = strdup("/tmp/helloWorld2XXXXXX"); + if (!rwTemplate2) { + fprintf(stderr, "Failed to create rwTemplate2!\n"); + exit(1); + } + char *dstPath = mktemp(rwTemplate2); + newDirTemplate = strdup("/tmp/newdirXXXXXX"); + if (!newDirTemplate) { + fprintf(stderr, "Failed to create newDirTemplate!\n"); + exit(1); + } + char *newDirectory = mktemp(newDirTemplate); + + // hdfsRename + fprintf(stderr, "hdfsRename: %s\n", + ((result = hdfsRename(fs, rwPath, dstPath)) ? + "Failed!" : "Success!")); + totalResult += result; + fprintf(stderr, "hdfsRename back: %s\n", + ((result = hdfsRename(fs, dstPath, srcPath)) ? + "Failed!" : "Success!")); + totalResult += result; + + // hdfsCreateDirectory + fprintf(stderr, "hdfsCreateDirectory: %s\n", + ((result = hdfsCreateDirectory(fs, newDirectory)) ? + "Failed!" : "Success!")); + totalResult += result; + + // hdfsSetReplication + fprintf(stderr, "hdfsSetReplication: %s\n", + ((result = hdfsSetReplication(fs, srcPath, 1)) ? + "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); + + // hdfsGetWorkingDirectory, hdfsSetWorkingDirectory + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", + ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? + buffer : "Failed!")); totalResult += (resp ? 0 : 1); - + + const char* path[] = {"/foo", "/foo/bar", "foobar", "//foo/bar//foobar", + "foo//bar", "foo/bar///", "/", "////"}; + for (int i = 0; i < 8; i++) { + fprintf(stderr, "hdfsSetWorkingDirectory: %s, %s\n", + ((result = hdfsSetWorkingDirectory(fs, path[i])) ? + "Failed!" : "Success!"), + hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))); + totalResult += result; + } + + fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", + ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? + "Failed!" : "Success!")); + totalResult += result; + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", + ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? + buffer : "Failed!")); + totalResult += (resp ? 0 : 1); + + // hdfsGetPathInfo hdfsFileInfo *fileInfo = NULL; if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) { fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n"); @@ -261,13 +288,15 @@ int main(int argc, char **argv) { fprintf(stderr, "Group: %s, ", fileInfo->mGroup); char permissions[10]; permission_disp(fileInfo->mPermissions, permissions); - fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions); + fprintf(stderr, "Permissions: %d (%s)\n", + fileInfo->mPermissions, permissions); hdfsFreeFileInfo(fileInfo, 1); } else { totalResult++; - fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp); + fprintf(stderr, "hdfsGetPathInfo for %s - FAILED!\n", slashTmp); } + // hdfsListDirectory hdfsFileInfo *fileList = 0; int numEntries = 0; if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) { @@ -283,7 +312,8 @@ int main(int argc, char **argv) { fprintf(stderr, "Group: %s, ", fileList[i].mGroup); char permissions[10]; permission_disp(fileList[i].mPermissions, permissions); - fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions); + fprintf(stderr, "Permissions: %d (%s)\n", + fileList[i].mPermissions, permissions); } hdfsFreeFileInfo(fileList, numEntries); } else { @@ -295,203 +325,220 @@ int main(int argc, char **argv) { } } - // char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1); - // if(hosts) { - // fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n"); - // int i=0; - // while(hosts[i]) { - // int j = 0; - // while(hosts[i][j]) { - // fprintf(stderr, - // "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]); - // ++j; - // } - // ++i; - // } - // } else { - // totalResult++; - // fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n"); - // } - char *newOwner = "root"; - // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it + // Setting tmp dir to 777 so later when connectAsUser nobody, + // we can write to it short newPerm = 0666; - // chown write - fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!")); - totalResult += result; - fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!")); + // hdfsChown + fprintf(stderr, "hdfsChown: %s\n", + ((result = hdfsChown(fs, rwPath, NULL, "users")) ? + "Failed!" : "Success!")); + totalResult += result; + fprintf(stderr, "hdfsChown: %s\n", + ((result = hdfsChown(fs, rwPath, newOwner, NULL)) ? + "Failed!" : "Success!")); + totalResult += result; + // hdfsChmod + fprintf(stderr, "hdfsChmod: %s\n", + ((result = hdfsChmod(fs, rwPath, newPerm)) ? + "Failed!" : "Success!")); totalResult += result; - // chmod write - fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!")); - totalResult += result; - - sleep(2); tTime newMtime = time(NULL); tTime newAtime = time(NULL); // utime write - fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!")); - + fprintf(stderr, "hdfsUtime: %s\n", + ((result = hdfsUtime(fs, rwPath, newMtime, newAtime)) ? + "Failed!" : "Success!")); totalResult += result; // chown/chmod/utime read - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); + hdfsFileInfo *finfo = hdfsGetPathInfo(fs, rwPath); - fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChown read: %s\n", + ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? + "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChmod read: %s\n", + ((result = (finfo->mPermissions != newPerm)) ? + "Failed!" : "Success!")); totalResult += result; // will later use /tmp/ as a different user so enable it - fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChmod: %s\n", + ((result = hdfsChmod(fs, slashTmp, 0777)) ? + "Failed!" : "Success!")); totalResult += result; fprintf(stderr,"newMTime=%ld\n",newMtime); fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod); - fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime / 1000)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsUtime read (mtime): %s\n", + ((result = (finfo->mLastMod != newMtime / 1000)) ? + "Failed!" : "Success!")); totalResult += result; - hdfsFreeFileInfo(finfo, 1); - // Clean up - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!")); - totalResult += result; - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!")); - totalResult += result; -// fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, movePath, 1)) ? "Failed!" : "Success!")); -// totalResult += result; - fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!")); + hdfsFreeFileInfo(finfo, 1); + fprintf(stderr, "hdfsDelete: %s\n", + ((result = hdfsDelete(fs, newDirectory, 1)) ? + "Failed!" : "Success!")); + totalResult += result; + fprintf(stderr, "hdfsDelete: %s\n", + ((result = hdfsDelete(fs, srcPath, 1)) ? + "Failed!" : "Success!")); + totalResult += result; + fprintf(stderr, "hdfsExists: %s\n", + ((result = hdfsExists(fs, newDirectory)) ? + "Success!" : "Failed!")); totalResult += (result ? 0 : 1); + // Done test generic operations } { - // TEST APPENDS - const char *writePath = "/tmp/appends"; + // Test Appends + appendTemplate = strdup("/tmp/appendsXXXXXX"); + if (!appendTemplate) { + fprintf(stderr, "Failed to create appendTemplate!\n"); + exit(1); + } + char *appendPath = mktemp(appendTemplate); + const char* helloBuffer = "Hello,"; + hdfsFile writeFile = NULL; - // CREATE - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0); + // Create + writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0); if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); - exit(-1); + fprintf(stderr, "Failed to open %s for writing!\n", appendPath); + exit(1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); + fprintf(stderr, "Opened %s for writing successfully...\n", appendPath); - const char* buffer = "Hello,"; - tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)); + num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer, + (int) strlen(helloBuffer)); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); - exit(-1); - } - fprintf(stderr, "Flushed %s successfully!\n", writePath); - hdfsCloseFile(fs, writeFile); - fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, writePath, 1)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsSetReplication: %s\n", + ((result = hdfsSetReplication(fs, appendPath, 1)) ? + "Failed!" : "Success!")); totalResult += result; - // RE-OPEN - writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0); + // Re-Open for Append + writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY | O_APPEND, 0, 0, 0); if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); - exit(-1); + fprintf(stderr, "Failed to open %s for writing!\n", appendPath); + exit(1); } - fprintf(stderr, "Opened %s for appending successfully...\n", writePath); + fprintf(stderr, "Opened %s for appending successfully...\n", + appendPath); - buffer = " World"; - num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1); + helloBuffer = " World"; + num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer, + (int)strlen(helloBuffer) + 1); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); - exit(-1); - } - fprintf(stderr, "Flushed %s successfully!\n", writePath); - hdfsCloseFile(fs, writeFile); - // CHECK size - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); - fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!")); + // Check size + hdfsFileInfo *finfo = hdfsGetPathInfo(fs, appendPath); + fprintf(stderr, "fileinfo->mSize: == total %s\n", + ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? + "Success!" : "Failed!")); totalResult += (result ? 0 : 1); - // READ and check data - hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0); + // Read and check data + hdfsFile readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0); if (!readFile) { - fprintf(stderr, "Failed to open %s for reading!\n", writePath); - exit(-1); + fprintf(stderr, "Failed to open %s for reading!\n", appendPath); + exit(1); } - char rdbuffer[32]; - tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer)); + tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer)); fprintf(stderr, "Read following %d bytes:\n%s\n", - num_read_bytes, rdbuffer); - - fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!"); - + num_read_bytes, buffer); + fprintf(stderr, "read == Hello, World %s\n", + (result = (strcmp(buffer, "Hello, World") == 0)) ? + "Success!" : "Failed!"); hdfsCloseFile(fs, readFile); - // DONE test appends + // Cleanup + fprintf(stderr, "hdfsDelete: %s\n", + ((result = hdfsDelete(fs, appendPath, 1)) ? + "Failed!" : "Success!")); + totalResult += result; + // Done test appends } - totalResult += (hdfsDisconnect(fs) != 0); { // // Now test as connecting as a specific user - // This is only meant to test that we connected as that user, not to test + // This only meant to test that we connected as that user, not to test // the actual fs user capabilities. Thus just create a file and read // the owner is correct. - const char *tuser = "nobody"; - const char* writePath = "/tmp/usertestfile.txt"; + userTemplate = strdup("/tmp/usertestXXXXXX"); + if (!userTemplate) { + fprintf(stderr, "Failed to create userTemplate!\n"); + exit(1); + } + char* userWritePath = mktemp(userTemplate); + hdfsFile writeFile = NULL; fs = hdfsConnectAsUserNewInstance("default", 50070, tuser); if(!fs) { - fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser); - exit(-1); + fprintf(stderr, + "Oops! Failed to connect to hdfs as user %s!\n",tuser); + exit(1); } - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); + writeFile = hdfsOpenFile(fs, userWritePath, O_WRONLY|O_CREAT, 0, 0, 0); if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); - exit(-1); + fprintf(stderr, "Failed to open %s for writing!\n", userWritePath); + exit(1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); + fprintf(stderr, "Opened %s for writing successfully...\n", + userWritePath); - char* buffer = "Hello, World!"; - tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1); + num_written_bytes = hdfsWrite(fs, writeFile, fileContents, + (int)strlen(fileContents) + 1); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); - exit(-1); - } - fprintf(stderr, "Flushed %s successfully!\n", writePath); - hdfsCloseFile(fs, writeFile); - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); + hdfsFileInfo *finfo = hdfsGetPathInfo(fs, userWritePath); if (finfo) { - fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfs new file user is correct: %s\n", + ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? + "Failed!" : "Success!")); } else { - fprintf(stderr, "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n"); + fprintf(stderr, + "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n"); result = -1; } totalResult += result; + + // Cleanup + fprintf(stderr, "hdfsDelete: %s\n", + ((result = hdfsDelete(fs, userWritePath, 1)) ? + "Failed!" : "Success!")); + totalResult += result; + // Done test specific user } - + totalResult += (hdfsDisconnect(fs) != 0); - fprintf(stderr, "totalResult == %d\n", totalResult); + // Shutdown the native minidfscluster + nmdShutdown(cluster); + nmdFree(cluster); + + fprintf(stderr, "totalResult == %d\n", totalResult); if (totalResult != 0) { return -1; } else { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c Tue Oct 30 23:12:12 2012 @@ -22,43 +22,52 @@ #include int main(int argc, char **argv) { + + const char* rfile; + tSize fileTotalSize, bufferSize, curSize, totalReadSize; + hdfsFS fs; + hdfsFile readFile; + char *buffer = NULL; if (argc != 4) { - fprintf(stderr, "Usage: hdfs_read \n"); - exit(-1); + fprintf(stderr, "Usage: test_libwebhdfs_read" + " \n"); + exit(1); } - hdfsFS fs = hdfsConnect("0.0.0.0", 50070); + fs = hdfsConnect("localhost", 50070); if (!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); - exit(-1); + exit(1); } - const char* rfile = argv[1]; - tSize fileTotalSize = strtoul(argv[2], NULL, 10); - tSize bufferSize = strtoul(argv[3], NULL, 10); + rfile = argv[1]; + fileTotalSize = strtoul(argv[2], NULL, 10); + bufferSize = strtoul(argv[3], NULL, 10); - hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0); + readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0); if (!readFile) { fprintf(stderr, "Failed to open %s for writing!\n", rfile); - exit(-2); + exit(1); } // data to be written to the file - char* buffer = malloc(sizeof(char) * bufferSize); + buffer = malloc(sizeof(char) * bufferSize); if(buffer == NULL) { - return -2; + fprintf(stderr, "Failed to allocate buffer.\n"); + exit(1); } // read from the file - tSize curSize = bufferSize; - tSize totalReadSize = 0; - for (; (curSize = hdfsRead(fs, readFile, (void*)buffer, bufferSize)) == bufferSize ;) { + curSize = bufferSize; + totalReadSize = 0; + for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) { totalReadSize += curSize; } totalReadSize += curSize; - fprintf(stderr, "size of the file: %d; reading size: %d\n", fileTotalSize, totalReadSize); + fprintf(stderr, "size of the file: %d; reading size: %d\n", + fileTotalSize, totalReadSize); free(buffer); hdfsCloseFile(fs, readFile); @@ -67,7 +76,3 @@ int main(int argc, char **argv) { return 0; } -/** - * vim: ts=4: sw=4: et: - */ - Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c Tue Oct 30 23:12:12 2012 @@ -18,6 +18,7 @@ #include "expect.h" #include "hdfs.h" +#include "native_mini_dfs.h" #include #include @@ -28,11 +29,9 @@ #define TLH_MAX_THREADS 100 -static sem_t *tlhSem; +static struct NativeMiniDfsCluster* cluster; -static const char *nn; static const char *user; -static int port; struct tlhThreadInfo { /** Thread index */ @@ -43,19 +42,24 @@ struct tlhThreadInfo { pthread_t thread; }; -static int hdfsSingleNameNodeConnect(const char *nn, int port, const char *user, hdfsFS *fs) +static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cluster, + hdfsFS *fs) { + int nnPort; + const char *nnHost; hdfsFS hdfs; - if (port < 0) { - fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort " - "returned error %d\n", port); - return port; + + if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) { + fprintf(stderr, "Error when retrieving namenode host address.\n"); + return 1; } - hdfs = hdfsConnectAsUserNewInstance(nn, port, user); - if (!hdfs) { - return -errno; + hdfs = hdfsConnectAsUser(nnHost, nnPort, user); + if(!hdfs) { + fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); + return 1; } + *fs = hdfs; return 0; } @@ -65,6 +69,7 @@ static int doTestHdfsOperations(struct t char prefix[256], tmp[256]; hdfsFile file; int ret, expected; + hdfsFileInfo *fileInfo; snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx); @@ -74,18 +79,13 @@ static int doTestHdfsOperations(struct t EXPECT_ZERO(hdfsCreateDirectory(fs, prefix)); snprintf(tmp, sizeof(tmp), "%s/file", prefix); - /* - * Although there should not be any file to open for reading, - * the right now implementation only construct a local - * information struct when opening file - */ EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0)); file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0); EXPECT_NONNULL(file); /* TODO: implement writeFully and use it here */ - expected = strlen(prefix); + expected = (int)strlen(prefix); ret = hdfsWrite(fs, file, prefix, expected); if (ret < 0) { ret = errno; @@ -118,9 +118,28 @@ static int doTestHdfsOperations(struct t } EXPECT_ZERO(memcmp(prefix, tmp, expected)); EXPECT_ZERO(hdfsCloseFile(fs, file)); - - // TODO: Non-recursive delete should fail? - //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0)); + + snprintf(tmp, sizeof(tmp), "%s/file", prefix); + EXPECT_NONZERO(hdfsChown(fs, tmp, NULL, NULL)); + EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop")); + fileInfo = hdfsGetPathInfo(fs, tmp); + EXPECT_NONNULL(fileInfo); + EXPECT_ZERO(strcmp("doop", fileInfo->mGroup)); + hdfsFreeFileInfo(fileInfo, 1); + + EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2")); + fileInfo = hdfsGetPathInfo(fs, tmp); + EXPECT_NONNULL(fileInfo); + EXPECT_ZERO(strcmp("ha", fileInfo->mOwner)); + EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup)); + hdfsFreeFileInfo(fileInfo, 1); + + EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL)); + fileInfo = hdfsGetPathInfo(fs, tmp); + EXPECT_NONNULL(fileInfo); + EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner)); + EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup)); + hdfsFreeFileInfo(fileInfo, 1); EXPECT_ZERO(hdfsDelete(fs, prefix, 1)); return 0; @@ -134,7 +153,7 @@ static void *testHdfsOperations(void *v) fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n", ti->threadIdx); - ret = hdfsSingleNameNodeConnect(nn, port, user, &fs); + ret = hdfsSingleNameNodeConnect(cluster, &fs); if (ret) { fprintf(stderr, "testHdfsOperations(threadIdx=%d): " "hdfsSingleNameNodeConnect failed with error %d.\n", @@ -181,19 +200,23 @@ static int checkFailures(struct tlhThrea */ int main(int argc, const char *args[]) { - if (argc != 4) { - fprintf(stderr, "usage: test_libhdfs_threaded "); - return -1; - } - - nn = args[1]; - port = atoi(args[2]); - user = args[3]; - int i, tlhNumThreads; const char *tlhNumThreadsStr; struct tlhThreadInfo ti[TLH_MAX_THREADS]; + if (argc != 2) { + fprintf(stderr, "usage: test_libwebhdfs_threaded \n"); + exit(1); + } + user = args[1]; + + struct NativeMiniDfsConf conf = { + .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070, + }; + cluster = nmdCreate(&conf); + EXPECT_NONNULL(cluster); + EXPECT_ZERO(nmdWaitClusterUp(cluster)); + tlhNumThreadsStr = getenv("TLH_NUM_THREADS"); if (!tlhNumThreadsStr) { tlhNumThreadsStr = "3"; @@ -210,8 +233,6 @@ int main(int argc, const char *args[]) ti[i].threadIdx = i; } -// tlhSem = sem_open("sem", O_CREAT, 0644, tlhNumThreads); - for (i = 0; i < tlhNumThreads; i++) { EXPECT_ZERO(pthread_create(&ti[i].thread, NULL, testHdfsOperations, &ti[i])); @@ -220,6 +241,7 @@ int main(int argc, const char *args[]) EXPECT_ZERO(pthread_join(ti[i].thread, NULL)); } -// EXPECT_ZERO(sem_close(tlhSem)); + EXPECT_ZERO(nmdShutdown(cluster)); + nmdFree(cluster); return checkFailures(ti, tlhNumThreads); } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c Tue Oct 30 23:12:12 2012 @@ -22,97 +22,90 @@ #include #include #include +#include int main(int argc, char **argv) { + hdfsFS fs; + const char* writeFileName; + off_t fileTotalSize; + long long tmpBufferSize; + tSize bufferSize = 0, totalWriteSize = 0, toWrite = 0, written = 0; + hdfsFile writeFile = NULL; + int append, i = 0; + char* buffer = NULL; if (argc != 6) { - fprintf(stderr, "Usage: hdfs_write \n"); - exit(-1); + fprintf(stderr, "Usage: test_libwebhdfs_write " + " \n"); + exit(1); } - hdfsFS fs = hdfsConnectAsUser("0.0.0.0", 50070, argv[4]); + fs = hdfsConnectAsUser("default", 50070, argv[4]); if (!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); - exit(-1); + exit(1); } - const char* writeFileName = argv[1]; - off_t fileTotalSize = strtoul(argv[2], NULL, 10); - long long tmpBufferSize = strtoul(argv[3], NULL, 10); + writeFileName = argv[1]; + fileTotalSize = strtoul(argv[2], NULL, 10); + tmpBufferSize = strtoul(argv[3], NULL, 10); // sanity check if(fileTotalSize == ULONG_MAX && errno == ERANGE) { - fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX); - exit(-3); + fprintf(stderr, "invalid file size %s - must be <= %lu\n", + argv[2], ULONG_MAX); + exit(1); } // currently libhdfs writes are of tSize which is int32 if(tmpBufferSize > INT_MAX) { - fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX); - exit(-3); + fprintf(stderr, + "invalid buffer size libhdfs API write chunks must be <= %d\n", + INT_MAX); + exit(1); } - tSize bufferSize = tmpBufferSize; - - hdfsFile writeFile = NULL; - int append = atoi(argv[5]); + bufferSize = (tSize) tmpBufferSize; + append = atoi(argv[5]); if (!append) { writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0); } else { - writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, bufferSize, 2, 0); + writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, + bufferSize, 2, 0); } if (!writeFile) { fprintf(stderr, "Failed to open %s for writing!\n", writeFileName); - exit(-2); + exit(1); } // data to be written to the file - char* buffer = malloc(sizeof(char) * bufferSize + 1); + buffer = malloc(sizeof(char) * bufferSize + 1); if(buffer == NULL) { fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize); - return -2; + exit(1); } - int i = 0; - for (i=0; i < bufferSize; ++i) { + for (i = 0; i < bufferSize; ++i) { buffer[i] = 'a' + (i%26); } buffer[bufferSize] = '\0'; - size_t totalWriteSize = 0; + // write to the file + totalWriteSize = 0; for (; totalWriteSize < fileTotalSize; ) { - tSize toWrite = bufferSize < (fileTotalSize - totalWriteSize) ? bufferSize : (fileTotalSize - totalWriteSize); - size_t written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite); - fprintf(stderr, "written size %ld, to write size %d\n", written, toWrite); + toWrite = bufferSize < (fileTotalSize - totalWriteSize) ? + bufferSize : (fileTotalSize - totalWriteSize); + written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite); + fprintf(stderr, "written size %d, to write size %d\n", + written, toWrite); totalWriteSize += written; - //sleep(1); } + // cleanup free(buffer); hdfsCloseFile(fs, writeFile); - - fprintf(stderr, "file total size: %lld, total write size: %ld\n", fileTotalSize, totalWriteSize); - - hdfsFile readFile = hdfsOpenFile(fs, writeFileName, O_RDONLY, 0, 0, 0); - //sleep(1); - fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile)); - - hdfsFile writeFile2 = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, 0, 2, 0); - fprintf(stderr, "Opened %s for writing successfully...\n", writeFileName); - const char *content = "Hello, World!"; - size_t num_written_bytes = hdfsWrite(fs, writeFile2, content, strlen(content) + 1); - if (num_written_bytes != strlen(content) + 1) { - fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n", - (int)(strlen(content) + 1), (int)num_written_bytes); - exit(-1); - } - fprintf(stderr, "Wrote %zd bytes\n", num_written_bytes); - + fprintf(stderr, "file total size: %" PRId64 ", total write size: %d\n", + fileTotalSize, totalWriteSize); hdfsDisconnect(fs); return 0; } - -/** - * vim: ts=4: sw=4: et: - */ - Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Tue Oct 30 23:12:12 2012 @@ -24,10 +24,15 @@ #include #include #include +#include #define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder" #define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster" #define HADOOP_CONF "org/apache/hadoop/conf/Configuration" +#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode" +#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress" + +#define DFS_WEBHDFS_ENABLED_KEY "dfs.webhdfs.enabled" struct NativeMiniDfsCluster { /** @@ -43,6 +48,7 @@ struct NativeMiniDfsCluster* nmdCreate(s jvalue val; JNIEnv *env = getJNIEnv(); jthrowable jthr; + jstring jconfStr; if (!env) { fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n"); @@ -59,6 +65,22 @@ struct NativeMiniDfsCluster* nmdCreate(s "nmdCreate: new Configuration"); goto error_free_cl; } + if (conf->webhdfsEnabled) { + jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: new String"); + goto error_dlr_cobj; + } + jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF, + "setBoolean", "(Ljava/lang/String;Z)V", + jconfStr, conf->webhdfsEnabled); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: Configuration::setBoolean"); + goto error_dlr_cobj; + } + } jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER, "(L"HADOOP_CONF";)V", cobj); if (jthr) { @@ -74,6 +96,16 @@ struct NativeMiniDfsCluster* nmdCreate(s goto error_dlr_bld; } bld2 = val.l; + if (conf->webhdfsEnabled) { + jthr = invokeMethod(env, &val, INSTANCE, bld2, MINIDFS_CLUSTER_BUILDER, + "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";", + conf->namenodeHttpPort); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: " + "Builder::nameNodeHttpPort"); + goto error_dlr_bld2; + } + } jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER, "build", "()L" MINIDFS_CLUSTER ";"); if (jthr) { @@ -91,6 +123,7 @@ struct NativeMiniDfsCluster* nmdCreate(s (*env)->DeleteLocalRef(env, bld2); (*env)->DeleteLocalRef(env, bld); (*env)->DeleteLocalRef(env, cobj); + (*env)->DeleteLocalRef(env, jconfStr); return cl; error_dlr_val: @@ -101,6 +134,7 @@ error_dlr_bld: (*env)->DeleteLocalRef(env, bld); error_dlr_cobj: (*env)->DeleteLocalRef(env, cobj); + (*env)->DeleteLocalRef(env, jconfStr); error_free_cl: free(cl); error: @@ -177,3 +211,69 @@ int nmdGetNameNodePort(const struct Nati } return jVal.i; } + +int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl, + int *port, const char **hostName) +{ + JNIEnv *env = getJNIEnv(); + jvalue jVal; + jobject jNameNode, jAddress; + jthrowable jthr; + int ret = 0; + const char *host; + + if (!env) { + fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n"); + return -EIO; + } + // First get the (first) NameNode of the cluster + jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER, + "getNameNode", "()L" HADOOP_NAMENODE ";"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdGetNameNodeHttpAddress: " + "MiniDFSCluster#getNameNode"); + return -EIO; + } + jNameNode = jVal.l; + + // Then get the http address (InetSocketAddress) of the NameNode + jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE, + "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdGetNameNodeHttpAddress: " + "NameNode#getHttpAddress"); + goto error_dlr_nn; + } + jAddress = jVal.l; + + jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, + JAVA_INETSOCKETADDRESS, "getPort", "()I"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdGetNameNodeHttpAddress: " + "InetSocketAddress#getPort"); + goto error_dlr_addr; + } + *port = jVal.i; + + jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS, + "getHostName", "()Ljava/lang/String;"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdGetNameNodeHttpAddress: " + "InetSocketAddress#getHostName"); + goto error_dlr_addr; + } + host = (*env)->GetStringUTFChars(env, jVal.l, NULL); + *hostName = strdup(host); + (*env)->ReleaseStringUTFChars(env, jVal.l, host); + +error_dlr_addr: + (*env)->DeleteLocalRef(env, jAddress); +error_dlr_nn: + (*env)->DeleteLocalRef(env, jNameNode); + + return ret; +} Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h?rev=1403922&r1=1403921&r2=1403922&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h Tue Oct 30 23:12:12 2012 @@ -31,6 +31,14 @@ struct NativeMiniDfsConf { * Nonzero if the cluster should be formatted prior to startup */ jboolean doFormat; + /** + * Whether or not to enable webhdfs in MiniDfsCluster + */ + jboolean webhdfsEnabled; + /** + * The http port of the namenode in MiniDfsCluster + */ + jint namenodeHttpPort; }; /** @@ -76,5 +84,21 @@ void nmdFree(struct NativeMiniDfsCluster * * @return the port, or a negative error code */ -int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); +int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); + +/** + * Get the http address that's in use by the given (non-HA) nativeMiniDfs + * + * @param cl The initialized NativeMiniDfsCluster + * @param port Used to capture the http port of the NameNode + * of the NativeMiniDfsCluster + * @param hostName Used to capture the http hostname of the NameNode + * of the NativeMiniDfsCluster + * + * @return 0 on success; a non-zero error code if failing to + * get the information. + */ +int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl, + int *port, const char **hostName); + #endif