hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r743302 - in /hadoop/core/branches/branch-0.19: CHANGES.txt src/c++/libhdfs/hdfs.c src/c++/libhdfs/hdfs.h src/c++/libhdfs/hdfs_test.c
Date Wed, 11 Feb 2009 11:04:04 GMT
Author: dhruba
Date: Wed Feb 11 11:04:03 2009
New Revision: 743302

URL: http://svn.apache.org/viewvc?rev=743302&view=rev
Log:
HADOOP-4494. Allow libhdfs to append to files.
(Pete Wyckoff via dhruba)


Modified:
    hadoop/core/branches/branch-0.19/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c
    hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.h
    hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c

Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=743302&r1=743301&r2=743302&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Wed Feb 11 11:04:03 2009
@@ -96,6 +96,9 @@
 
     HADOOP-5193. Correct calculation of edits modification time. (shv)
 
+    HADOOP-4494. Allow libhdfs to append to files.
+    (Pete Wyckoff via dhruba)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/core/branches/branch-0.19/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Feb 11 11:04:03 2009
@@ -1,2 +1,2 @@
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
-/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697,740077,741703,741762
+/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697,740077,741703,741762,743296

Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfs.c?rev=743302&r1=743301&r2=743302&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c Wed Feb 11 11:04:03 2009
@@ -410,12 +410,17 @@
       return NULL;
     }
 
+    if ((flags & O_CREAT) && (flags & O_EXCL)) {
+      fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
+    }
 
     /* The hadoop java api/signature */
-    const char* method = ((flags & O_WRONLY) == 0) ? "open" : "create";
+    const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND)
? "append" : "create";
     const char* signature = ((flags & O_WRONLY) == 0) ?
         JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
-        JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
+      (flags & O_APPEND) ?
+      JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
+      JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
 
     /* Return value */
     hdfsFile file = NULL;
@@ -459,7 +464,7 @@
         jBufferSize = jVal.i;
     }
 
-    if (flags & O_WRONLY) {
+    if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
         //replication
 
         if (!replication) {
@@ -490,15 +495,28 @@
     /* Create and return either the FSDataInputStream or
        FSDataOutputStream references jobject jStream */
 
+    // READ?
     if ((flags & O_WRONLY) == 0) {
+      if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+                       method, signature, jPath, jBufferSize)) {
+        errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+                                   "FileSystem::%s(%s)", method,
+                                   signature);
+        goto done;
+      }
+      // WRITE/APPEND?
+      else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
         if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
-                         method, signature, jPath, jBufferSize)) {
-            errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
-                                       "FileSystem::%s(%s)", method,
-                                       signature);
-            goto done;
+                         method, signature, jPath)) {
+          errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+                                     "FileSystem::%s(%s)", method,
+                                     signature);
+          goto done;
         }
+      }
+
     }
+    // WRITE/CREATE
     else {
         jboolean jOverWrite = 1;
         if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,

Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfs.h?rev=743302&r1=743301&r2=743302&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.h (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.h Wed Feb 11 11:04:03 2009
@@ -134,7 +134,8 @@
      * hdfsOpenFile - Open a hdfs file in given mode.
      * @param fs The configured filesystem handle.
      * @param path The full path to the file.
-     * @param flags Either O_RDONLY or O_WRONLY, for read-only or write-only.
+     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY
(meaning create or overwrite i.e., implies O_TRUNCAT), 
+     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL
& O_CREAT)) which return NULL and set errno equal ENOTSUP.
      * @param bufferSize Size of buffer for read/write - pass 0 if you want
      * to use the default configured values.
      * @param replication Block replication - pass 0 if you want to use

Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfs_test.c?rev=743302&r1=743301&r2=743302&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs_test.c Wed Feb 11 11:04:03 2009
@@ -63,7 +63,7 @@
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
         exit(-1);
     } 
- 
+
         const char* writePath = "/tmp/testfile.txt";
     {
         //Write tests
@@ -317,7 +317,75 @@
         totalResult += (result ? 0 : 1);
     }
 
+    {
+      // TEST APPENDS
+      const char *writePath = "/tmp/appends";
+
+      // CREATE
+      hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
+      if(!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+
+      char* buffer = "Hello,";
+      tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, writeFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        exit(-1);
+        }
+      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+      hdfsCloseFile(fs, writeFile);
+
+      // RE-OPEN
+      writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
 
+      buffer = " World";
+      num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, writeFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        exit(-1);
+      }
+      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+      hdfsCloseFile(fs, writeFile);
+
+      // CHECK size
+      hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize ==
strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+      totalResult += (result ? 0 : 1);
+
+      // READ and check data
+      hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+      if (!readFile) {
+        fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+        exit(-1);
+      }
+
+      char rdbuffer[32];
+      tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+              num_read_bytes, rdbuffer);
+
+      fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World")
== 0)) ? "Success!" : "Failed!");
+
+      hdfsCloseFile(fs, readFile);
+
+      // DONE test appends
+    }
+      
+      
     totalResult += (hdfsDisconnect(fs) != 0);
 
     {



Mime
View raw message