Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5CD071126A for ; Fri, 16 May 2014 23:57:25 +0000 (UTC) Received: (qmail 22089 invoked by uid 500); 16 May 2014 22:38:38 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 17955 invoked by uid 500); 16 May 2014 22:38:33 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 12480 invoked by uid 99); 16 May 2014 22:34:42 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 16 May 2014 22:34:42 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 16 May 2014 22:25:14 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id E7CCD23889BB; Fri, 16 May 2014 22:24:54 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1595372 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/CHANGES.txt hadoop-hdfs/src/main/native/ hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c Date: Fri, 16 May 2014 22:24:54 -0000 To: hdfs-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140516222454.E7CCD23889BB@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cmccabe Date: Fri May 16 22:24:54 2014 New Revision: 1595372 URL: http://svn.apache.org/r1595372 Log: HDFS-4913. Deleting file through fuse-dfs when using trash fails, requiring root permissions (cmccabe) Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project:r1595371 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1595371 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1595372&r1=1595371&r2=1595372&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri May 16 22:24:54 2014 @@ -243,6 +243,9 @@ Release 2.5.0 - UNRELEASED HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality (Binglin Chang and Chen He via junping_du) + HDFS-4913. Deleting file through fuse-dfs when using trash fails requiring + root permissions (cmccabe) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1595371 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c?rev=1595372&r1=1595371&r2=1595372&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c Fri May 16 22:24:54 2014 @@ -16,111 +16,228 @@ * limitations under the License. */ - #include +#include +#include +#include +#include +#include #include +#include "fuse_context_handle.h" #include "fuse_dfs.h" #include "fuse_trash.h" -#include "fuse_context_handle.h" - - -const char *const TrashPrefixDir = "/user/root/.Trash"; -const char *const TrashDir = "/user/root/.Trash/Current"; +#include "fuse_users.h" #define TRASH_RENAME_TRIES 100 +#define ALREADY_IN_TRASH_ERR 9000 -// -// NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path). -// - -int move_to_trash(const char *item, hdfsFS userFS) { - - // retrieve dfs specific data - dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data; - - // check params and the context var - assert(item); - assert(dfs); - assert('/' == *item); - assert(rindex(item,'/') >= 0); - - - char fname[4096]; // or last element of the directory path - char parent_dir[4096]; // the directory the fname resides in - - if (strlen(item) > sizeof(fname) - strlen(TrashDir)) { - ERROR("Buffer too small to accomodate path of len %d", (int)strlen(item)); - return -EIO; +/** + * Split a path into a parent directory and a base path component. + * + * @param abs_path The absolute path. + * @param pcomp (out param) Will be set to the last path component. + * Malloced. + * @param parent_dir (out param) Will be set to the parent directory. + * Malloced. + * + * @return 0 on success. + * On success, both *pcomp and *parent_dir will contain + * malloc'ed strings. + * EINVAL if the path wasn't absolute. + * EINVAL if there is no parent directory (i.e. abs_path=/) + * ENOMEM if we ran out of memory. + */ +static int get_parent_dir(const char *abs_path, char **pcomp, + char **parent_dir) +{ + int ret; + char *pdir = NULL, *pc = NULL, *last_slash; + + pdir = strdup(abs_path); + if (!pdir) { + ret = ENOMEM; + goto done; + } + last_slash = rindex(pdir, '/'); + if (!last_slash) { + ERROR("get_parent_dir(%s): expected absolute path.\n", abs_path); + ret = EINVAL; + goto done; + } + if (last_slash[1] == '\0') { + *last_slash = '\0'; + last_slash = rindex(pdir, '/'); + if (!last_slash) { + ERROR("get_parent_dir(%s): there is no parent dir.\n", abs_path); + ret = EINVAL; + goto done; + } } - - // separate the file name and the parent directory of the item to be deleted - { - int length_of_parent_dir = rindex(item, '/') - item ; - int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/' - - // note - the below strncpys should be safe from overflow because of the check on item's string length above. - strncpy(parent_dir, item, length_of_parent_dir); - parent_dir[length_of_parent_dir ] = 0; - strncpy(fname, item + length_of_parent_dir + 1, strlen(item)); - fname[length_of_fname + 1] = 0; + pc = strdup(last_slash + 1); + if (!pc) { + ret = ENOMEM; + goto done; + } + *last_slash = '\0'; + ret = 0; +done: + if (ret) { + free(pdir); + free(pc); + return ret; } + *pcomp = pc; + *parent_dir = pdir; + return 0; +} - // create the target trash directory - char trash_dir[4096]; - if (snprintf(trash_dir, sizeof(trash_dir), "%s%s", TrashDir, parent_dir) - >= sizeof trash_dir) { - ERROR("Move to trash error target not big enough for %s", item); - return -EIO; +/** + * Get the base path to the trash. This will depend on the user ID. + * For example, a user whose ID maps to 'foo' will get back the path + * "/user/foo/.Trash/Current". + * + * @param trash_base (out param) the base path to the trash. + * Malloced. + * + * @return 0 on success; error code otherwise. + */ +static int get_trash_base(char **trash_base) +{ + const char * const PREFIX = "/user/"; + const char * const SUFFIX = "/.Trash/Current"; + char *user_name = NULL, *base = NULL; + uid_t uid = fuse_get_context()->uid; + int ret; + + user_name = getUsername(uid); + if (!user_name) { + ERROR("get_trash_base(): failed to get username for uid %"PRId64"\n", + (uint64_t)uid); + ret = EIO; + goto done; + } + if (asprintf(&base, "%s%s%s", PREFIX, user_name, SUFFIX) < 0) { + base = NULL; + ret = ENOMEM; + goto done; + } + ret = 0; +done: + free(user_name); + if (ret) { + free(base); + return ret; } + *trash_base = base; + return 0; +} +// +// NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path). +// +int move_to_trash(const char *abs_path, hdfsFS userFS) +{ + int ret; + char *pcomp = NULL, *parent_dir = NULL, *trash_base = NULL; + char *target_dir = NULL, *target = NULL; + + ret = get_parent_dir(abs_path, &pcomp, &parent_dir); + if (ret) { + goto done; + } + ret = get_trash_base(&trash_base); + if (ret) { + goto done; + } + if (!strncmp(trash_base, abs_path, strlen(trash_base))) { + INFO("move_to_trash(%s): file is already in the trash; deleting.", + abs_path); + ret = ALREADY_IN_TRASH_ERR; + goto done; + } + fprintf(stderr, "trash_base='%s'\n", trash_base); + if (asprintf(&target_dir, "%s%s", trash_base, parent_dir) < 0) { + ret = ENOMEM; + target_dir = NULL; + goto done; + } + if (asprintf(&target, "%s/%s", target_dir, pcomp) < 0) { + ret = ENOMEM; + target = NULL; + goto done; + } // create the target trash directory in trash (if needed) - if ( hdfsExists(userFS, trash_dir)) { + if (hdfsExists(userFS, target_dir) != 0) { // make the directory to put it in in the Trash - NOTE // hdfsCreateDirectory also creates parents, so Current will be created if it does not exist. - if (hdfsCreateDirectory(userFS, trash_dir)) { - return -EIO; + if (hdfsCreateDirectory(userFS, target_dir)) { + ret = errno; + ERROR("move_to_trash(%s) error: hdfsCreateDirectory(%s) failed with error %d", + abs_path, target_dir, ret); + goto done; } - } - - // - // if the target path in Trash already exists, then append with - // a number. Start from 1. - // - char target[4096]; - int j ; - if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) { - ERROR("Move to trash error target not big enough for %s", item); - return -EIO; - } - - // NOTE: this loop differs from the java version by capping the #of tries - for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) { - if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) { - ERROR("Move to trash error target not big enough for %s", item); - return -EIO; + } else if (hdfsExists(userFS, target) == 0) { + // If there is already a file in the trash with this path, append a number. + int idx; + for (idx = 1; idx < TRASH_RENAME_TRIES; idx++) { + free(target); + if (asprintf(&target, "%s%s.%d", target_dir, pcomp, idx) < 0) { + target = NULL; + ret = ENOMEM; + goto done; + } + if (hdfsExists(userFS, target) != 0) { + break; + } + } + if (idx == TRASH_RENAME_TRIES) { + ERROR("move_to_trash(%s) error: there are already %d files in the trash " + "with this name.\n", abs_path, TRASH_RENAME_TRIES); + ret = EINVAL; + goto done; } } - if (hdfsRename(userFS, item, target)) { - ERROR("Trying to rename %s to %s", item, target); - return -EIO; - } - return 0; -} - - -int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) { + if (hdfsRename(userFS, abs_path, target)) { + ret = errno; + ERROR("move_to_trash(%s): failed to rename the file to %s: error %d", + abs_path, target, ret); + goto done; + } + + ret = 0; +done: + if ((ret != 0) && (ret != ALREADY_IN_TRASH_ERR)) { + ERROR("move_to_trash(%s) failed with error %d", abs_path, ret); + } + free(pcomp); + free(parent_dir); + free(trash_base); + free(target_dir); + free(target); + return ret; +} - // move the file to the trash if this is enabled and its not actually in the trash. - if (useTrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) { - int ret= move_to_trash(path, userFS); - return ret; +int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) +{ + int tried_to_move_to_trash = 0; + if (useTrash) { + tried_to_move_to_trash = 1; + if (move_to_trash(path, userFS) == 0) { + return 0; + } } - if (hdfsDelete(userFS, path, 1)) { - ERROR("Trying to delete the file %s", path); - return -EIO; + int err = errno; + if (err < 0) { + err = -err; + } + ERROR("hdfsDeleteWithTrash(%s): hdfsDelete failed: error %d.", + path, err); + return -err; + } + if (tried_to_move_to_trash) { + ERROR("hdfsDeleteWithTrash(%s): deleted the file instead.\n", path); } - return 0; }