Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 5FD1E200B17 for ; Tue, 21 Jun 2016 22:43:04 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 5E2AE160A63; Tue, 21 Jun 2016 20:43:04 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 575B4160A4F for ; Tue, 21 Jun 2016 22:43:03 +0200 (CEST) Received: (qmail 59212 invoked by uid 500); 21 Jun 2016 20:43:02 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 59203 invoked by uid 99); 21 Jun 2016 20:43:02 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 21 Jun 2016 20:43:02 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 5381EE020A; Tue, 21 Jun 2016 20:43:02 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: bobhansen@apache.org To: common-commits@hadoop.apache.org Date: Tue, 21 Jun 2016 20:43:02 -0000 Message-Id: <3966eed8696e4c73a9263a4222db59e3@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/3] hadoop git commit: HDFS-10511: libhdfs++: make error returning mechanism consistent across all hdfs operations. Contributed by Anatoli Shein. archived-at: Tue, 21 Jun 2016 20:43:04 -0000 Repository: hadoop Updated Branches: refs/heads/HDFS-8707 71af40868 -> adb1a63e1 HDFS-10511: libhdfs++: make error returning mechanism consistent across all hdfs operations. Contributed by Anatoli Shein. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5166a309 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5166a309 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5166a309 Branch: refs/heads/HDFS-8707 Commit: 5166a309768084cf036a314f70985ba932d4b7c5 Parents: 71af408 Author: Bob Hansen Authored: Tue Jun 21 16:26:58 2016 -0400 Committer: Bob Hansen Committed: Tue Jun 21 16:26:58 2016 -0400 ---------------------------------------------------------------------- .../native/libhdfspp/include/hdfspp/hdfs_ext.h | 48 +++++++------- .../native/libhdfspp/lib/bindings/c/hdfs.cc | 67 ++++++++++++++++---- .../src/main/native/libhdfspp/tests/hdfs_shim.c | 2 +- 3 files changed, 79 insertions(+), 38 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5166a309/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h index 47ef792..af7393f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h @@ -55,16 +55,17 @@ #ifdef __cplusplus extern "C" { #endif + /** * Reads the last error, if any, that happened in this thread * into the user supplied buffer. * @param buf A chunk of memory with room for the error string. * @param len Size of the buffer, if the message is longer than * len len-1 bytes of the message will be copied. + * @return 0 on successful read of the last error, -1 otherwise. **/ - LIBHDFS_EXTERNAL -void hdfsGetLastError(char *buf, int len); +int hdfsGetLastError(char *buf, int len); /** @@ -94,7 +95,7 @@ struct hdfsBuilder *hdfsNewBuilderFromDirectory(const char * configDirectory); * key isn't found. You must free this string with * hdfsConfStrFree. * - * @return 0 on success; nonzero error code otherwise. + * @return 0 on success; -1 otherwise. * Failure to find the key is not an error. */ LIBHDFS_EXTERNAL @@ -108,28 +109,12 @@ int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key, * @param val (out param) The value. This will NOT be changed if the * key isn't found. * - * @return 0 on success; nonzero error code otherwise. + * @return 0 on success; -1 otherwise. * Failure to find the key is not an error. */ LIBHDFS_EXTERNAL int hdfsBuilderConfGetInt(struct hdfsBuilder *bld, const char *key, int32_t *val); - -/** - * Returns the block information and data nodes associated with a particular file. - * - * The hdfsBlockLocations structure will have zero or more hdfsBlockInfo elements, - * which will have zero or more ip_addr elements indicating which datanodes have - * each block. - * - * @param fs A connected hdfs instance - * @param path Path of the file to query - * @param locations The address of an output pointer to contain the block information. - * On success, this pointer must be later freed with hdfsFreeBlockLocations. - * - * @return 0 on success; nonzero error code otherwise. - * If the file does not exist, an error will be returned. - */ struct hdfsDNInfo { const char * ip_address; const char * hostname; @@ -157,6 +142,21 @@ struct hdfsBlockLocations struct hdfsBlockInfo * blocks; }; +/** + * Returns the block information and data nodes associated with a particular file. + * + * The hdfsBlockLocations structure will have zero or more hdfsBlockInfo elements, + * which will have zero or more ip_addr elements indicating which datanodes have + * each block. + * + * @param fs A connected hdfs instance + * @param path Path of the file to query + * @param locations The address of an output pointer to contain the block information. + * On success, this pointer must be later freed with hdfsFreeBlockLocations. + * + * @return 0 on success; -1 otherwise. + * If the file does not exist, -1 will be returned and errno will be set. + */ LIBHDFS_EXTERNAL int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations ** locations); @@ -164,7 +164,7 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations * Frees up an hdfsBlockLocations pointer allocated by hdfsGetBlockLocations. * * @param locations The previously-populated pointer allocated by hdfsGetBlockLocations - * @return 0 on success, nonzero on error + * @return 0 on success, -1 on error */ LIBHDFS_EXTERNAL int hdfsFreeBlockLocations(struct hdfsBlockLocations * locations); @@ -200,21 +200,21 @@ void hdfsFreeLogData(LogData*); /** * Enable loggind functionality for a component. - * Return 1 on failure, 0 otherwise. + * Return -1 on failure, 0 otherwise. **/ LIBHDFS_EXTERNAL int hdfsEnableLoggingForComponent(int component); /** * Disable logging functionality for a component. - * Return 1 on failure, 0 otherwise. + * Return -1 on failure, 0 otherwise. **/ LIBHDFS_EXTERNAL int hdfsDisableLoggingForComponent(int component); /** * Set level between trace and error. - * Return 1 on failure, 0 otherwise. + * Return -1 on failure, 0 otherwise. **/ LIBHDFS_EXTERNAL int hdfsSetLoggingLevel(int component); http://git-wip-us.apache.org/repos/asf/hadoop/blob/5166a309/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc index 458d56e..6c9a00d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc @@ -69,9 +69,15 @@ struct hdfsFile_internal { thread_local std::string errstr; /* Fetch last error that happened in this thread */ -void hdfsGetLastError(char *buf, int len) { +int hdfsGetLastError(char *buf, int len) { + //No error message + if(errstr.empty()){ + return -1; + } + + //There is an error, but no room for the error message to be copied to if(nullptr == buf || len < 1) { - return; + return -1; } /* leave space for a trailing null */ @@ -84,6 +90,8 @@ void hdfsGetLastError(char *buf, int len) { /* stick in null */ buf[copylen] = 0; + + return 0; } /* Event callbacks for next open calls */ @@ -214,6 +222,7 @@ int hdfsFileIsOpenForRead(hdfsFile file) { hdfsFS doHdfsConnect(optional nn, optional port, optional user, const Options & options) { try { + errno = 0; IoService * io_service = IoService::New(); FileSystem *fs = FileSystem::New(io_service, user.value_or(""), options); @@ -270,6 +279,7 @@ hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user) { int hdfsDisconnect(hdfsFS fs) { try { + errno = 0; if (!fs) { ReportError(ENODEV, "Cannot disconnect null FS handle."); return -1; @@ -288,6 +298,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags, int bufferSize, short replication, tSize blocksize) { try { + errno = 0; (void)flags; (void)bufferSize; (void)replication; @@ -315,6 +326,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags, int bufferSize, int hdfsCloseFile(hdfsFS fs, hdfsFile file) { try { + errno = 0; if (!CheckSystemAndHandle(fs, file)) { return -1; } @@ -424,6 +436,7 @@ void StatInfoToHdfsFileInfo(hdfsFileInfo * file_info, hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) { try { + errno = 0; if (!CheckSystem(fs)) { return nullptr; } @@ -448,6 +461,7 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) { hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries) { try { + errno = 0; if (!CheckSystem(fs)) { *numEntries = 0; return nullptr; @@ -485,6 +499,7 @@ hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries) { void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries) { + errno = 0; int i; for (i = 0; i < numEntries; ++i) { delete[] hdfsFileInfo[i].mName; @@ -593,6 +608,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void *buffer, tSize length) { try { + errno = 0; if (!CheckSystemAndHandle(fs, file)) { return -1; } @@ -613,9 +629,10 @@ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void *buffer, tSize hdfsRead(hdfsFS fs, hdfsFile file, void *buffer, tSize length) { try { - if (!CheckSystemAndHandle(fs, file)) { - return -1; - } + errno = 0; + if (!CheckSystemAndHandle(fs, file)) { + return -1; + } size_t len = length; Status stat = file->get_impl()->Read(buffer, &len); @@ -635,6 +652,7 @@ tSize hdfsRead(hdfsFS fs, hdfsFile file, void *buffer, tSize length) { int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) { try { + errno = 0; if (!CheckSystemAndHandle(fs, file)) { return -1; } @@ -656,6 +674,7 @@ int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) { tOffset hdfsTell(hdfsFS fs, hdfsFile file) { try { + errno = 0; if (!CheckSystemAndHandle(fs, file)) { return -1; } @@ -678,6 +697,7 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile file) { int hdfsCancel(hdfsFS fs, hdfsFile file) { try { + errno = 0; if (!CheckSystemAndHandle(fs, file)) { return -1; } @@ -695,12 +715,13 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations { try { + errno = 0; if (!CheckSystem(fs)) { return -1; } if (locations_out == nullptr) { ReportError(EINVAL, "Null pointer passed to hdfsGetBlockLocations"); - return -2; + return -1; } std::shared_ptr ppLocations; @@ -759,6 +780,7 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations } int hdfsFreeBlockLocations(struct hdfsBlockLocations * blockLocations) { + errno = 0; if (blockLocations == nullptr) return 0; @@ -861,6 +883,7 @@ HdfsConfiguration LoadDefault(ConfigurationLoader & loader) hdfsBuilder::hdfsBuilder() : config(loader.New()) { + errno = 0; loader.SetDefaultSearchPath(); config = LoadDefault(loader); } @@ -868,6 +891,7 @@ hdfsBuilder::hdfsBuilder() : config(loader.New()) hdfsBuilder::hdfsBuilder(const char * directory) : config(loader.New()) { + errno = 0; loader.SetSearchPath(directory); config = LoadDefault(loader); } @@ -876,6 +900,7 @@ struct hdfsBuilder *hdfsNewBuilder(void) { try { + errno = 0; return new struct hdfsBuilder(); } catch (const std::exception & e) { ReportException(e); @@ -888,16 +913,19 @@ struct hdfsBuilder *hdfsNewBuilder(void) void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn) { + errno = 0; bld->overrideHost = std::string(nn); } void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port) { + errno = 0; bld->overridePort = port; } void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName) { + errno = 0; if (userName && *userName) { bld->user = std::string(userName); } @@ -908,6 +936,7 @@ void hdfsFreeBuilder(struct hdfsBuilder *bld) { try { + errno = 0; delete bld; } catch (const std::exception & e) { ReportException(e); @@ -921,6 +950,7 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key, { try { + errno = 0; optional newConfig = bld->loader.OverlayValue(bld->config, key, val); if (newConfig) { @@ -930,7 +960,7 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key, else { ReportError(EINVAL, "Could not change Builder value"); - return 1; + return -1; } } catch (const std::exception & e) { return ReportException(e); @@ -941,6 +971,7 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key, void hdfsConfStrFree(char *val) { + errno = 0; free(val); } @@ -952,6 +983,7 @@ int hdfsConfGetStr(const char *key, char **val) { try { + errno = 0; hdfsBuilder builder; return hdfsBuilderConfGetStr(&builder, key, val); } catch (const std::exception & e) { @@ -965,6 +997,7 @@ int hdfsConfGetInt(const char *key, int32_t *val) { try { + errno = 0; hdfsBuilder builder; return hdfsBuilderConfGetInt(&builder, key, val); } catch (const std::exception & e) { @@ -981,6 +1014,7 @@ struct hdfsBuilder *hdfsNewBuilderFromDirectory(const char * configDirectory) { try { + errno = 0; return new struct hdfsBuilder(configDirectory); } catch (const std::exception & e) { ReportException(e); @@ -996,6 +1030,7 @@ int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key, { try { + errno = 0; optional value = bld->config.Get(key); if (value) { @@ -1027,14 +1062,17 @@ int hdfsBuilderConfGetInt(struct hdfsBuilder *bld, const char *key, int32_t *val { try { + errno = 0; // Pull from default configuration optional value = bld->config.GetInt(key); if (value) { - if (!isValidInt(*value)) - return 1; - + if (!isValidInt(*value)){ + ReportError(EINVAL, "Builder value is not valid"); + return -1; + } *val = *value; + return 0; } // If not found, don't change val ReportError(EINVAL, "Could not get Builder value"); @@ -1160,22 +1198,25 @@ static bool IsComponentValid(int component) { } int hdfsEnableLoggingForComponent(int component) { + errno = 0; if(!IsComponentValid(component)) - return 1; + return -1; LogManager::EnableLogForComponent(static_cast(component)); return 0; } int hdfsDisableLoggingForComponent(int component) { + errno = 0; if(!IsComponentValid(component)) - return 1; + return -1; LogManager::DisableLogForComponent(static_cast(component)); return 0; } int hdfsSetLoggingLevel(int level) { + errno = 0; if(!IsLevelValid(level)) - return 1; + return -1; LogManager::SetLogLevel(static_cast(level)); return 0; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5166a309/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c index 3438dc3..12e9f71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c @@ -388,7 +388,7 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer) { * hdfs_ext functions */ -void hdfsGetLastError(char *buf, int len) { +int hdfsGetLastError(char *buf, int len) { return libhdfspp_hdfsGetLastError(buf, len); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org