Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 174BD200C50 for ; Fri, 3 Mar 2017 23:05:11 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 155DE160B80; Fri, 3 Mar 2017 22:05:11 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id B1B5E160B81 for ; Fri, 3 Mar 2017 23:05:09 +0100 (CET) Received: (qmail 9666 invoked by uid 500); 3 Mar 2017 22:04:55 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 8044 invoked by uid 99); 3 Mar 2017 22:04:54 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 03 Mar 2017 22:04:54 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 5957EDFC15; Fri, 3 Mar 2017 22:04:54 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aengineer@apache.org To: common-commits@hadoop.apache.org Date: Fri, 03 Mar 2017 22:05:41 -0000 Message-Id: In-Reply-To: <275bda3b88a54b959537eb82f6111bf2@git.apache.org> References: <275bda3b88a54b959537eb82f6111bf2@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240 archived-at: Fri, 03 Mar 2017 22:05:11 -0000 http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml ---------------------------------------------------------------------- diff --cc hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index 0000000,a2ba0bf..4b19a21f mode 000000,100644..100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@@ -1,0 -1,132 +1,143 @@@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.0.0-alpha3-SNAPSHOT + ../../hadoop-project + + hadoop-client-check-test-invariants + 3.0.0-alpha3-SNAPSHOT + pom + + Enforces our invariants for the testing client modules. + Apache Hadoop Client Packaging Invariants for Test + + + + + + + org.apache.hadoop + hadoop-client-api + + + org.apache.hadoop + hadoop-client-runtime + + + org.apache.hadoop + hadoop-client-minicluster + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + 1.4 + + + org.codehaus.mojo + extra-enforcer-rules + 1.0-beta-3 + + + + + enforce-banned-dependencies + + enforce + + + + + + + + org.apache.hadoop:hadoop-annotations + + org.apache.htrace:htrace-core4 + + org.slf4j:slf4j-api + + commons-logging:commons-logging + + log4j:log4j + + junit:junit + + org.hamcrest:hamcrest-core + + + + true + + + org.apache.hadoop + hadoop-annotations + + * + + ++ ++ ++ ++ ++ ++ ++ io.netty ++ netty ++ * ++ ++ + + + + + + + + + + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/pom.xml ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 86aed61,6f24858..f5d7da1 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@@ -115,8 -108,8 +110,9 @@@ import org.apache.hadoop.hdfs.DFSUtil import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; - import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; ++import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; + import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker; + import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@@ -395,8 -389,12 +394,13 @@@ public class DataNode extends Reconfigu private static final double CONGESTION_RATIO = 1.5; private DiskBalancer diskBalancer; + @Nullable + private final StorageLocationChecker storageLocationChecker; + + private final DatasetVolumeChecker volumeChecker; + private final SocketFactory socketFactory; + private DatanodeStateMachine datanodeStateMachine; private static Tracer createTracer(Configuration conf) { return new Tracer.Builder("DataNode"). @@@ -427,11 -424,11 +432,12 @@@ this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, this.getConf()); this.pipelineSupportECN = false; + this.ozoneEnabled = false; - this.checkDiskErrorInterval = - ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25)); this.socketFactory = NetUtils.getDefaultSocketFactory(conf); + this.dnConf = new DNConf(this); initOOBTimeout(); + storageLocationChecker = null; + volumeChecker = new DatasetVolumeChecker(conf, new Timer()); } /** @@@ -1920,7 -1920,7 +1954,7 @@@ } } } -- ++ List bposArray = (this.blockPoolManager == null) ? new ArrayList() : this.blockPoolManager.getAllNamenodeThreads(); @@@ -1968,137 -1964,135 +1997,150 @@@ } } - volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS); - - if (storageLocationChecker != null) { - storageLocationChecker.shutdownAndWait(1, TimeUnit.SECONDS); + // Stop the object store handler + if (this.objectStoreHandler != null) { + this.objectStoreHandler.close(); } - if(this.ozoneEnabled) { - if(datanodeStateMachine != null) { - if (pauseMonitor != null) { - pauseMonitor.stop(); - } ++ if (this.ozoneEnabled) { ++ if (datanodeStateMachine != null) { + try { + datanodeStateMachine.close(); + } catch (Exception e) { + LOG.error("Error is ozone shutdown. ex {}", e.toString()); + } + } - } ++ volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS); - if (pauseMonitor != null) { - pauseMonitor.stop(); - } - // shouldRun is set to false here to prevent certain threads from exiting - // before the restart prep is done. - this.shouldRun = false; - - // wait reconfiguration thread, if any, to exit - shutdownReconfigurationTask(); - - // wait for all data receiver threads to exit - if (this.threadGroup != null) { - int sleepMs = 2; - while (true) { - // When shutting down for restart, wait 2.5 seconds before forcing - // termination of receiver threads. - if (!this.shutdownForUpgrade || - (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified - > 1000))) { - this.threadGroup.interrupt(); - break; ++ if (storageLocationChecker != null) { ++ storageLocationChecker.shutdownAndWait(1, TimeUnit.SECONDS); ++ } + - // shouldRun is set to false here to prevent certain threads from exiting - // before the restart prep is done. - this.shouldRun = false; - - // wait reconfiguration thread, if any, to exit - shutdownReconfigurationTask(); - - // wait for all data receiver threads to exit - if (this.threadGroup != null) { - int sleepMs = 2; - while (true) { - // When shutting down for restart, wait 2.5 seconds before forcing - // termination of receiver threads. - if (!this.shutdownForUpgrade || - (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified - > 1000))) { - this.threadGroup.interrupt(); - break; ++ if (pauseMonitor != null) { ++ pauseMonitor.stop(); ++ } ++ ++ // shouldRun is set to false here to prevent certain threads from exiting ++ // before the restart prep is done. ++ this.shouldRun = false; ++ ++ // wait reconfiguration thread, if any, to exit ++ shutdownReconfigurationTask(); ++ ++ // wait for all data receiver threads to exit ++ if (this.threadGroup != null) { ++ int sleepMs = 2; ++ while (true) { ++ // When shutting down for restart, wait 2.5 seconds before forcing ++ // termination of receiver threads. ++ if (!this.shutdownForUpgrade || ++ (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified ++ > 1000))) { ++ this.threadGroup.interrupt(); ++ break; ++ } ++ LOG.info("Waiting for threadgroup to exit, active threads is " + ++ this.threadGroup.activeCount()); ++ if (this.threadGroup.activeCount() == 0) { ++ break; ++ } ++ try { ++ Thread.sleep(sleepMs); ++ } catch (InterruptedException e) { ++ } ++ sleepMs = sleepMs * 3 / 2; // exponential backoff ++ if (sleepMs > 200) { ++ sleepMs = 200; ++ } } -- LOG.info("Waiting for threadgroup to exit, active threads is " + -- this.threadGroup.activeCount()); -- if (this.threadGroup.activeCount() == 0) { -- break; ++ this.threadGroup = null; ++ } ++ if (this.dataXceiverServer != null) { ++ // wait for dataXceiverServer to terminate ++ try { ++ this.dataXceiverServer.join(); ++ } catch (InterruptedException ie) { } ++ } ++ if (this.localDataXceiverServer != null) { ++ // wait for localDataXceiverServer to terminate try { -- Thread.sleep(sleepMs); -- } catch (InterruptedException e) {} -- sleepMs = sleepMs * 3 / 2; // exponential backoff -- if (sleepMs > 200) { -- sleepMs = 200; ++ this.localDataXceiverServer.join(); ++ } catch (InterruptedException ie) { } } -- this.threadGroup = null; -- } -- if (this.dataXceiverServer != null) { -- // wait for dataXceiverServer to terminate -- try { -- this.dataXceiverServer.join(); -- } catch (InterruptedException ie) { ++ if (metrics != null) { ++ metrics.setDataNodeActiveXceiversCount(0); } -- } -- if (this.localDataXceiverServer != null) { -- // wait for localDataXceiverServer to terminate -- try { -- this.localDataXceiverServer.join(); -- } catch (InterruptedException ie) { ++ ++ // IPC server needs to be shutdown late in the process, otherwise ++ // shutdown command response won't get sent. ++ if (ipcServer != null) { ++ ipcServer.stop(); } -- } -- if (metrics != null) { -- metrics.setDataNodeActiveXceiversCount(0); -- } -- // IPC server needs to be shutdown late in the process, otherwise -- // shutdown command response won't get sent. -- if (ipcServer != null) { -- ipcServer.stop(); -- } ++ if (blockPoolManager != null) { ++ try { ++ this.blockPoolManager.shutDownAll(bposArray); ++ } catch (InterruptedException ie) { ++ LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie); ++ } ++ } -- if(blockPoolManager != null) { -- try { -- this.blockPoolManager.shutDownAll(bposArray); -- } catch (InterruptedException ie) { -- LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie); ++ if (storage != null) { ++ try { ++ this.storage.unlockAll(); ++ } catch (IOException ie) { ++ LOG.warn("Exception when unlocking storage: " + ie, ie); ++ } } -- } -- -- if (storage != null) { -- try { -- this.storage.unlockAll(); -- } catch (IOException ie) { -- LOG.warn("Exception when unlocking storage: " + ie, ie); ++ if (data != null) { ++ data.shutdown(); } ++ if (metrics != null) { ++ metrics.shutdown(); ++ } ++ if (diskMetrics != null) { ++ diskMetrics.shutdownAndWait(); ++ } ++ if (dataNodeInfoBeanName != null) { ++ MBeans.unregister(dataNodeInfoBeanName); ++ dataNodeInfoBeanName = null; ++ } ++ if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown(); ++ LOG.info("Shutdown complete."); ++ synchronized (this) { ++ // it is already false, but setting it again to avoid a findbug warning. ++ this.shouldRun = false; ++ // Notify the main thread. ++ notifyAll(); ++ } ++ tracer.close(); } -- if (data != null) { -- data.shutdown(); -- } -- if (metrics != null) { -- metrics.shutdown(); - } - if (diskMetrics != null) { - diskMetrics.shutdownAndWait(); -- } -- if (dataNodeInfoBeanName != null) { -- MBeans.unregister(dataNodeInfoBeanName); -- dataNodeInfoBeanName = null; -- } -- if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown(); -- LOG.info("Shutdown complete."); -- synchronized(this) { -- // it is already false, but setting it again to avoid a findbug warning. -- this.shouldRun = false; -- // Notify the main thread. -- notifyAll(); -- } -- tracer.close(); } - - + /** - * Check if there is a disk failure asynchronously and if so, handle the error + * Check if there is a disk failure asynchronously + * and if so, handle the error. */ - public void checkDiskErrorAsync() { - synchronized(checkDiskErrorMutex) { - checkDiskErrorFlag = true; - if(checkDiskErrorThread == null) { - startCheckDiskErrorThread(); - checkDiskErrorThread.start(); - LOG.info("Starting CheckDiskError Thread"); - } - } + public void checkDiskErrorAsync(FsVolumeSpi volume) { + volumeChecker.checkVolume( + volume, (healthyVolumes, failedVolumes) -> { + if (failedVolumes.size() > 0) { + LOG.warn("checkDiskErrorAsync callback got {} failed volumes: {}", + failedVolumes.size(), failedVolumes); + } else { + LOG.debug("checkDiskErrorAsync: no volume failures detected"); + } + lastDiskErrorCheck = Time.monotonicNow(); + handleVolumeFailures(failedVolumes); + }); } - - private void handleDiskError(String errMsgr) { + + private void handleDiskError(String failedVolumes) { final boolean hasEnoughResources = data.hasEnoughResource(); - LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources); + LOG.warn("DataNode.handleDiskError on : [" + failedVolumes + + "] Keep Running: " + hasEnoughResources); // If we have enough active valid volumes then we do not want to // shutdown the DN completely. http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java ---------------------------------------------------------------------- diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 86b7899,b51b1fc..58d8de2 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@@ -98,15 -95,8 +98,15 @@@ public class DatanodeHttpServer impleme this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf); this.conf = conf; + final ObjectStoreJerseyContainer finalContainer; + if (objectStoreHandler != null) { + finalContainer = objectStoreHandler.getObjectStoreJerseyContainer(); + } else { + finalContainer = null; + } + Configuration confForInfoServer = new Configuration(conf); - confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10); + confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10); HttpServer2.Builder builder = new HttpServer2.Builder() .setName("datanode") .setConf(confForInfoServer) @@@ -148,9 -138,16 +148,16 @@@ .childHandler(new ChannelInitializer() { @Override protected void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addLast(new PortUnificationServerHandler(jettyAddr, - conf, confForCreate, restCsrfPreventionFilter, - finalContainer)); + ChannelPipeline p = ch.pipeline(); + p.addLast(new HttpRequestDecoder(), + new HttpResponseEncoder()); + if (restCsrfPreventionFilter != null) { + p.addLast(new RestCsrfPreventionFilterHandler( + restCsrfPreventionFilter)); + } + p.addLast( + new ChunkedWriteHandler(), - new URLDispatcher(jettyAddr, conf, confForCreate)); ++ new URLDispatcher(jettyAddr, conf, confForCreate, finalContainer)); } }); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java ---------------------------------------------------------------------- diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index ae4e8f9,66ce9ee..b0e045b --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@@ -33,9 -34,9 +34,9 @@@ import com.google.common.annotations.Vi import com.google.common.base.Preconditions; /** -- * NamespaceInfo is returned by the name-node in reply ++ * NamespaceInfo is returned by the name-node in reply * to a data-node handshake. -- * ++ * */ @InterfaceAudience.Private @InterfaceStability.Evolving @@@ -106,18 -110,19 +110,19 @@@ public class NamespaceInfo extends Stor this.capabilities = capabilities; } -- public NamespaceInfo(int nsID, String clusterID, String bpID, ++ public NamespaceInfo(int nsID, String clusterID, String bpID, long cT) { this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), VersionInfo.getVersion()); } public NamespaceInfo(int nsID, String clusterID, String bpID, - long cT, NodeType nodeType) { + long cT, HAServiceState st) { this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), - VersionInfo.getVersion(), nodeType, CAPABILITIES_SUPPORTED); + VersionInfo.getVersion()); + this.state = st; } -- ++ public long getCapabilities() { return capabilities; } @@@ -141,7 -151,7 +151,7 @@@ public String getBlockPoolID() { return blockPoolID; } -- ++ public String getSoftwareVersion() { return softwareVersion; } @@@ -172,4 -186,4 +186,4 @@@ "BPID=" + storage.getBlockPoolID() + "."); } } --} ++} http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org