From commits-return-24321-archive-asf-public=cust-asf.ponee.io@accumulo.apache.org Wed Nov 4 18:25:46 2020 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mailroute1-lw-us.apache.org (mailroute1-lw-us.apache.org [207.244.88.153]) by mx-eu-01.ponee.io (Postfix) with ESMTPS id B18AB18060E for ; Wed, 4 Nov 2020 19:25:46 +0100 (CET) Received: from mail.apache.org (localhost [127.0.0.1]) by mailroute1-lw-us.apache.org (ASF Mail Server at mailroute1-lw-us.apache.org) with SMTP id CF9F7123834 for ; Wed, 4 Nov 2020 18:25:45 +0000 (UTC) Received: (qmail 42778 invoked by uid 500); 4 Nov 2020 18:25:45 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 42769 invoked by uid 99); 4 Nov 2020 18:25:45 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 04 Nov 2020 18:25:45 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 2A123820EB; Wed, 4 Nov 2020 18:25:45 +0000 (UTC) Date: Wed, 04 Nov 2020 18:25:45 +0000 To: "commits@accumulo.apache.org" Subject: [accumulo] branch main updated: Wrap undo in try block (#1759) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <160451434498.3035.17039318346371877848@gitbox.apache.org> From: jmanno@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: accumulo X-Git-Refname: refs/heads/main X-Git-Reftype: branch X-Git-Oldrev: a1054168278e81045b7d3850cf5a7208deb0a7fa X-Git-Newrev: 5e0bbfcf7fe74a41cc3283d3f255162a5900c671 X-Git-Rev: 5e0bbfcf7fe74a41cc3283d3f255162a5900c671 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. jmanno pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/accumulo.git The following commit(s) were added to refs/heads/main by this push: new 5e0bbfc Wrap undo in try block (#1759) 5e0bbfc is described below commit 5e0bbfcf7fe74a41cc3283d3f255162a5900c671 Author: Jeffrey Manno AuthorDate: Wed Nov 4 13:25:33 2020 -0500 Wrap undo in try block (#1759) * wrap undo methods in a try block and add exception handling --- .../accumulo/master/tableOps/create/ChooseDir.java | 18 +++++++++++++++--- .../accumulo/master/tableOps/create/CreateTable.java | 19 ++++++++++++++----- .../master/tableOps/create/FinishCreateTable.java | 14 +++++++++++--- 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java index 4e16f354..e085349 100644 --- a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java +++ b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java @@ -35,11 +35,14 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class ChooseDir extends MasterRepo { private static final long serialVersionUID = 1L; private final TableInfo tableInfo; + private static final Logger log = LoggerFactory.getLogger(ChooseDir.class); ChooseDir(TableInfo ti) { this.tableInfo = ti; @@ -60,9 +63,18 @@ class ChooseDir extends MasterRepo { @Override public void undo(long tid, Master master) throws Exception { - Path p = tableInfo.getSplitDirsPath(); - FileSystem fs = p.getFileSystem(master.getContext().getHadoopConf()); - fs.delete(p, true); + // Clean up split files if ChooseDir operation fails + Path p = null; + try { + if (tableInfo.getInitialSplitSize() > 0) { + p = tableInfo.getSplitDirsPath(); + FileSystem fs = p.getFileSystem(master.getContext().getHadoopConf()); + fs.delete(p, true); + } + } catch (IOException e) { + log.error("Failed to undo ChooseDir operation and failed to clean up split files at {}", p, + e); + } } /** diff --git a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java index 061d05f..ca56428 100644 --- a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java +++ b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java @@ -33,9 +33,12 @@ import org.apache.accumulo.master.tableOps.TableInfo; import org.apache.accumulo.master.tableOps.Utils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class CreateTable extends MasterRepo { private static final long serialVersionUID = 1L; + private static final Logger log = LoggerFactory.getLogger(CreateTable.class); private TableInfo tableInfo; @@ -82,12 +85,18 @@ public class CreateTable extends MasterRepo { @Override public void undo(long tid, Master env) throws IOException { // Clean up split files if create table operation fails - if (tableInfo.getInitialSplitSize() > 0) { - Path p = tableInfo.getSplitPath().getParent(); - FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf()); - fs.delete(p, true); + Path p = null; + try { + if (tableInfo.getInitialSplitSize() > 0) { + p = tableInfo.getSplitPath().getParent(); + FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf()); + fs.delete(p, true); + } + } catch (IOException e) { + log.error("Table failed to be created and failed to clean up split files at {}", p, e); + } finally { + Utils.unreserveNamespace(env, tableInfo.getNamespaceId(), tid, false); } - Utils.unreserveNamespace(env, tableInfo.getNamespaceId(), tid, false); } } diff --git a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java index aabb739..c8b515e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java +++ b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java @@ -29,10 +29,13 @@ import org.apache.accumulo.master.tableOps.TableInfo; import org.apache.accumulo.master.tableOps.Utils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class FinishCreateTable extends MasterRepo { private static final long serialVersionUID = 1L; + private static final Logger log = LoggerFactory.getLogger(FinishCreateTable.class); private final TableInfo tableInfo; @@ -70,9 +73,14 @@ class FinishCreateTable extends MasterRepo { private void cleanupSplitFiles(Master env) throws IOException { // it is sufficient to delete from the parent, because both files are in the same directory, and // we want to delete the directory also - Path p = tableInfo.getSplitPath().getParent(); - FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf()); - fs.delete(p, true); + Path p = null; + try { + p = tableInfo.getSplitPath().getParent(); + FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf()); + fs.delete(p, true); + } catch (IOException e) { + log.error("Table was created, but failed to clean up temporary splits files at {}", p, e); + } } @Override