hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240
Date Fri, 03 Mar 2017 22:05:41 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
----------------------------------------------------------------------
diff --cc hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 0000000,a2ba0bf..4b19a21f
mode 000000,100644..100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@@ -1,0 -1,132 +1,143 @@@
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+ 
+    http://www.apache.org/licenses/LICENSE-2.0
+ 
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+ -->
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+   <modelVersion>4.0.0</modelVersion>
+   <parent>
+     <groupId>org.apache.hadoop</groupId>
+     <artifactId>hadoop-project</artifactId>
+     <version>3.0.0-alpha3-SNAPSHOT</version>
+     <relativePath>../../hadoop-project</relativePath>
+   </parent>
+   <artifactId>hadoop-client-check-test-invariants</artifactId>
+   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <packaging>pom</packaging>
+ 
+   <description>Enforces our invariants for the testing client modules.</description>
+   <name>Apache Hadoop Client Packaging Invariants for Test</name>
+ 
+   <properties>
+   </properties>
+ 
+   <dependencies>
+     <dependency>
+       <groupId>org.apache.hadoop</groupId>
+       <artifactId>hadoop-client-api</artifactId>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.hadoop</groupId>
+       <artifactId>hadoop-client-runtime</artifactId>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.hadoop</groupId>
+       <artifactId>hadoop-client-minicluster</artifactId>
+     </dependency>
+   </dependencies>
+   <build>
+     <plugins>
+       <plugin>
+         <groupId>org.apache.maven.plugins</groupId>
+         <artifactId>maven-enforcer-plugin</artifactId>
+         <version>1.4</version>
+         <dependencies>
+           <dependency>
+             <groupId>org.codehaus.mojo</groupId>
+             <artifactId>extra-enforcer-rules</artifactId>
+             <version>1.0-beta-3</version>
+           </dependency>
+         </dependencies>
+         <executions>
+           <execution>
+             <id>enforce-banned-dependencies</id>
+             <goals>
+               <goal>enforce</goal>
+             </goals>
+             <configuration>
+               <rules>
+                 <banTransitiveDependencies>
+ <!--
+                   <message>
+     Our client-facing artifacts are not supposed to have additional dependencies
+     and one or more of them do. The output from the enforcer plugin should give
+     specifics.
+                   </message>
+ -->
+                   <excludes>
+                     <!-- annotations is provided, and both artifacts exclude the tools
transitive,
+                          but enforcer still sees it.
+                     -->
+                     <exclude>org.apache.hadoop:hadoop-annotations</exclude>
+                     <!-- We leave HTrace as an unshaded dependnecy on purpose so that
tracing within a JVM will work -->
+                     <exclude>org.apache.htrace:htrace-core4</exclude>
+                     <!-- Leave slf4j unshaded so downstream users can configure logging.
-->
+                     <exclude>org.slf4j:slf4j-api</exclude>
+                     <!-- Leave commons-logging unshaded so downstream users can configure
logging. -->
+                     <exclude>commons-logging:commons-logging</exclude>
+                     <!-- Leave log4j unshaded so downstream users can configure logging.
-->
+                     <exclude>log4j:log4j</exclude>
+                     <!-- Leave JUnit unshaded so downstream can use our test helper classes
-->
+                     <exclude>junit:junit</exclude>
+                     <!-- JUnit brings in hamcrest -->
+                     <exclude> org.hamcrest:hamcrest-core</exclude>
+                   </excludes>
+                 </banTransitiveDependencies>
+                 <banDuplicateClasses>
+                   <findAllDuplicates>true</findAllDuplicates>
+                   <dependencies>
+                     <dependency>
+                       <groupId>org.apache.hadoop</groupId>
+                       <artifactId>hadoop-annotations</artifactId>
+                       <ignoreClasses>
+                         <ignoreClass>*</ignoreClass>
+                       </ignoreClasses>
+                     </dependency>
++                    <dependency>
++                      <!--Duplicate classes found:-->
++                      <!--Found in:-->
++                      <!--org.apache.hadoop:hadoop-client-runtime:jar:3.0.0-alpha3-SNAPSHOT:compile-->
++                      <!--org.apache.hadoop:hadoop-client-minicluster:jar:3.0.0-alpha3-SNAPSHOT:compile-->
++                      <ignoreClasses>
++                        <groupId>io.netty</groupId>
++                        <artifactId>netty</artifactId>
++                        <ignoreClass>*</ignoreClass>
++                      </ignoreClasses>
++                    </dependency>
+                   </dependencies>
+                 </banDuplicateClasses>
+               </rules>
+ <!-- TODO we need a rule for "we don't have classes that are outside of the org.apache.hadoop
package" -->
+ <!-- TODO we need a rule for "the constants in this set of classes haven't been shaded
/ don't have this prefix"
+      Manually checking the set of Keys that look like packages we relocate:
+ 
+       cat `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`
 | grep -E "\"(io\.|org\.|com\.|net\.)" | grep -v "^package" | grep -v "^import" | grep -v
"\"org.apache.hadoop"
+ 
+      Manually check the set of shaded artifacts to see if the Keys constants have been relocated:
+ 
+      for clazz in `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path
'*/src/main/*'`; do
+        clazz=${clazz#*src/main/java/}
+        clazz="${clazz%.java}"
+        javap -cp hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-3.0.0-alpha2-SNAPSHOT.jar
\
+            -constants "${clazz//\//.}" | grep "org.apache.hadoop.shaded"
+      done
+ -->
+             </configuration>
+           </execution>
+         </executions>
+       </plugin>
+     </plugins>
+   </build>
+ 
+ </project>
+ 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 86aed61,6f24858..f5d7da1
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -115,8 -108,8 +110,9 @@@ import org.apache.hadoop.hdfs.DFSUtil
  import org.apache.hadoop.hdfs.DFSUtilClient;
  import org.apache.hadoop.hdfs.HDFSPolicyProvider;
  import org.apache.hadoop.hdfs.HdfsConfiguration;
- import org.apache.hadoop.ozone.container.common.statemachine
-     .DatanodeStateMachine;
++import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+ import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker;
+ import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
  import org.apache.hadoop.util.AutoCloseableLock;
  import org.apache.hadoop.hdfs.client.BlockReportOptions;
  import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@@ -395,8 -389,12 +394,13 @@@ public class DataNode extends Reconfigu
    private static final double CONGESTION_RATIO = 1.5;
    private DiskBalancer diskBalancer;
  
+   @Nullable
+   private final StorageLocationChecker storageLocationChecker;
+ 
+   private final DatasetVolumeChecker volumeChecker;
+ 
    private final SocketFactory socketFactory;
 +  private DatanodeStateMachine datanodeStateMachine;
  
    private static Tracer createTracer(Configuration conf) {
      return new Tracer.Builder("DataNode").
@@@ -427,11 -424,11 +432,12 @@@
      this.connectToDnViaHostname = false;
      this.blockScanner = new BlockScanner(this, this.getConf());
      this.pipelineSupportECN = false;
 +    this.ozoneEnabled = false;
-     this.checkDiskErrorInterval =
-         ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25));
      this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
+     this.dnConf = new DNConf(this);
      initOOBTimeout();
+     storageLocationChecker = null;
+     volumeChecker = new DatasetVolumeChecker(conf, new Timer());
    }
  
    /**
@@@ -1920,7 -1920,7 +1954,7 @@@
          }
        }
      }
--    
++
      List<BPOfferService> bposArray = (this.blockPoolManager == null)
          ? new ArrayList<BPOfferService>()
          : this.blockPoolManager.getAllNamenodeThreads();
@@@ -1968,137 -1964,135 +1997,150 @@@
        }
      }
  
 -    volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS);
 -
 -    if (storageLocationChecker != null) {
 -      storageLocationChecker.shutdownAndWait(1, TimeUnit.SECONDS);
 +    // Stop the object store handler
 +    if (this.objectStoreHandler != null) {
 +      this.objectStoreHandler.close();
      }
  
-     if(this.ozoneEnabled) {
-       if(datanodeStateMachine != null) {
 -    if (pauseMonitor != null) {
 -      pauseMonitor.stop();
 -    }
++    if (this.ozoneEnabled) {
++      if (datanodeStateMachine != null) {
 +        try {
 +          datanodeStateMachine.close();
 +        } catch (Exception e) {
 +          LOG.error("Error is ozone shutdown. ex {}", e.toString());
 +        }
 +      }
-     }
++      volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS);
  
-     if (pauseMonitor != null) {
-       pauseMonitor.stop();
-     }
 -    // shouldRun is set to false here to prevent certain threads from exiting
 -    // before the restart prep is done.
 -    this.shouldRun = false;
 -    
 -    // wait reconfiguration thread, if any, to exit
 -    shutdownReconfigurationTask();
 -
 -    // wait for all data receiver threads to exit
 -    if (this.threadGroup != null) {
 -      int sleepMs = 2;
 -      while (true) {
 -        // When shutting down for restart, wait 2.5 seconds before forcing
 -        // termination of receiver threads.
 -        if (!this.shutdownForUpgrade ||
 -            (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
 -                > 1000))) {
 -          this.threadGroup.interrupt();
 -          break;
++      if (storageLocationChecker != null) {
++        storageLocationChecker.shutdownAndWait(1, TimeUnit.SECONDS);
++      }
 +
-     // shouldRun is set to false here to prevent certain threads from exiting
-     // before the restart prep is done.
-     this.shouldRun = false;
-     
-     // wait reconfiguration thread, if any, to exit
-     shutdownReconfigurationTask();
- 
-     // wait for all data receiver threads to exit
-     if (this.threadGroup != null) {
-       int sleepMs = 2;
-       while (true) {
-         // When shutting down for restart, wait 2.5 seconds before forcing
-         // termination of receiver threads.
-         if (!this.shutdownForUpgrade ||
-             (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
-                 > 1000))) {
-           this.threadGroup.interrupt();
-           break;
++      if (pauseMonitor != null) {
++        pauseMonitor.stop();
++      }
++
++      // shouldRun is set to false here to prevent certain threads from exiting
++      // before the restart prep is done.
++      this.shouldRun = false;
++
++      // wait reconfiguration thread, if any, to exit
++      shutdownReconfigurationTask();
++
++      // wait for all data receiver threads to exit
++      if (this.threadGroup != null) {
++        int sleepMs = 2;
++        while (true) {
++          // When shutting down for restart, wait 2.5 seconds before forcing
++          // termination of receiver threads.
++          if (!this.shutdownForUpgrade ||
++              (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
++                  > 1000))) {
++            this.threadGroup.interrupt();
++            break;
++          }
++          LOG.info("Waiting for threadgroup to exit, active threads is " +
++              this.threadGroup.activeCount());
++          if (this.threadGroup.activeCount() == 0) {
++            break;
++          }
++          try {
++            Thread.sleep(sleepMs);
++          } catch (InterruptedException e) {
++          }
++          sleepMs = sleepMs * 3 / 2; // exponential backoff
++          if (sleepMs > 200) {
++            sleepMs = 200;
++          }
          }
--        LOG.info("Waiting for threadgroup to exit, active threads is " +
--                 this.threadGroup.activeCount());
--        if (this.threadGroup.activeCount() == 0) {
--          break;
++        this.threadGroup = null;
++      }
++      if (this.dataXceiverServer != null) {
++        // wait for dataXceiverServer to terminate
++        try {
++          this.dataXceiverServer.join();
++        } catch (InterruptedException ie) {
          }
++      }
++      if (this.localDataXceiverServer != null) {
++        // wait for localDataXceiverServer to terminate
          try {
--          Thread.sleep(sleepMs);
--        } catch (InterruptedException e) {}
--        sleepMs = sleepMs * 3 / 2; // exponential backoff
--        if (sleepMs > 200) {
--          sleepMs = 200;
++          this.localDataXceiverServer.join();
++        } catch (InterruptedException ie) {
          }
        }
--      this.threadGroup = null;
--    }
--    if (this.dataXceiverServer != null) {
--      // wait for dataXceiverServer to terminate
--      try {
--        this.dataXceiverServer.join();
--      } catch (InterruptedException ie) {
++      if (metrics != null) {
++        metrics.setDataNodeActiveXceiversCount(0);
        }
--    }
--    if (this.localDataXceiverServer != null) {
--      // wait for localDataXceiverServer to terminate
--      try {
--        this.localDataXceiverServer.join();
--      } catch (InterruptedException ie) {
++
++      // IPC server needs to be shutdown late in the process, otherwise
++      // shutdown command response won't get sent.
++      if (ipcServer != null) {
++        ipcServer.stop();
        }
--    }
--    if (metrics != null) {
--      metrics.setDataNodeActiveXceiversCount(0);
--    }
  
--   // IPC server needs to be shutdown late in the process, otherwise
--   // shutdown command response won't get sent.
--   if (ipcServer != null) {
--      ipcServer.stop();
--    }
++      if (blockPoolManager != null) {
++        try {
++          this.blockPoolManager.shutDownAll(bposArray);
++        } catch (InterruptedException ie) {
++          LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
++        }
++      }
  
--    if(blockPoolManager != null) {
--      try {
--        this.blockPoolManager.shutDownAll(bposArray);
--      } catch (InterruptedException ie) {
--        LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
++      if (storage != null) {
++        try {
++          this.storage.unlockAll();
++        } catch (IOException ie) {
++          LOG.warn("Exception when unlocking storage: " + ie, ie);
++        }
        }
--    }
--    
--    if (storage != null) {
--      try {
--        this.storage.unlockAll();
--      } catch (IOException ie) {
--        LOG.warn("Exception when unlocking storage: " + ie, ie);
++      if (data != null) {
++        data.shutdown();
        }
++      if (metrics != null) {
++        metrics.shutdown();
++      }
++      if (diskMetrics != null) {
++        diskMetrics.shutdownAndWait();
++      }
++      if (dataNodeInfoBeanName != null) {
++        MBeans.unregister(dataNodeInfoBeanName);
++        dataNodeInfoBeanName = null;
++      }
++      if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
++      LOG.info("Shutdown complete.");
++      synchronized (this) {
++        // it is already false, but setting it again to avoid a findbug warning.
++        this.shouldRun = false;
++        // Notify the main thread.
++        notifyAll();
++      }
++      tracer.close();
      }
--    if (data != null) {
--      data.shutdown();
--    }
--    if (metrics != null) {
--      metrics.shutdown();
 -    }
 -    if (diskMetrics != null) {
 -      diskMetrics.shutdownAndWait();
--    }
--    if (dataNodeInfoBeanName != null) {
--      MBeans.unregister(dataNodeInfoBeanName);
--      dataNodeInfoBeanName = null;
--    }
--    if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
--    LOG.info("Shutdown complete.");
--    synchronized(this) {
--      // it is already false, but setting it again to avoid a findbug warning.
--      this.shouldRun = false;
--      // Notify the main thread.
--      notifyAll();
--    }
--    tracer.close();
    }
-   
-   
+ 
    /**
-    * Check if there is a disk failure asynchronously and if so, handle the error
+    * Check if there is a disk failure asynchronously
+    * and if so, handle the error.
     */
-   public void checkDiskErrorAsync() {
-     synchronized(checkDiskErrorMutex) {
-       checkDiskErrorFlag = true;
-       if(checkDiskErrorThread == null) {
-         startCheckDiskErrorThread();
-         checkDiskErrorThread.start();
-         LOG.info("Starting CheckDiskError Thread");
-       }
-     }
+   public void checkDiskErrorAsync(FsVolumeSpi volume) {
+     volumeChecker.checkVolume(
+         volume, (healthyVolumes, failedVolumes) -> {
+           if (failedVolumes.size() > 0) {
+             LOG.warn("checkDiskErrorAsync callback got {} failed volumes: {}",
+                 failedVolumes.size(), failedVolumes);
+           } else {
+             LOG.debug("checkDiskErrorAsync: no volume failures detected");
+           }
+           lastDiskErrorCheck = Time.monotonicNow();
+           handleVolumeFailures(failedVolumes);
+         });
    }
-   
-   private void handleDiskError(String errMsgr) {
+ 
+   private void handleDiskError(String failedVolumes) {
      final boolean hasEnoughResources = data.hasEnoughResource();
-     LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
+     LOG.warn("DataNode.handleDiskError on : [" + failedVolumes +
+         "] Keep Running: " + hasEnoughResources);
      
      // If we have enough active valid volumes then we do not want to 
      // shutdown the DN completely.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 86b7899,b51b1fc..58d8de2
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@@ -98,15 -95,8 +98,15 @@@ public class DatanodeHttpServer impleme
      this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf);
      this.conf = conf;
  
 +    final ObjectStoreJerseyContainer finalContainer;
 +    if (objectStoreHandler != null) {
 +      finalContainer = objectStoreHandler.getObjectStoreJerseyContainer();
 +    } else {
 +      finalContainer = null;
 +    }
 +
      Configuration confForInfoServer = new Configuration(conf);
-     confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+     confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
      HttpServer2.Builder builder = new HttpServer2.Builder()
          .setName("datanode")
          .setConf(confForInfoServer)
@@@ -148,9 -138,16 +148,16 @@@
          .childHandler(new ChannelInitializer<SocketChannel>() {
          @Override
          protected void initChannel(SocketChannel ch) throws Exception {
-           ch.pipeline().addLast(new PortUnificationServerHandler(jettyAddr,
-               conf, confForCreate, restCsrfPreventionFilter,
-               finalContainer));
+           ChannelPipeline p = ch.pipeline();
+           p.addLast(new HttpRequestDecoder(),
+             new HttpResponseEncoder());
+           if (restCsrfPreventionFilter != null) {
+             p.addLast(new RestCsrfPreventionFilterHandler(
+                 restCsrfPreventionFilter));
+           }
+           p.addLast(
+               new ChunkedWriteHandler(),
 -              new URLDispatcher(jettyAddr, conf, confForCreate));
++              new URLDispatcher(jettyAddr, conf, confForCreate, finalContainer));
          }
        });
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index ae4e8f9,66ce9ee..b0e045b
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@@ -33,9 -34,9 +34,9 @@@ import com.google.common.annotations.Vi
  import com.google.common.base.Preconditions;
  
  /**
-- * NamespaceInfo is returned by the name-node in reply 
++ * NamespaceInfo is returned by the name-node in reply
   * to a data-node handshake.
-- * 
++ *
   */
  @InterfaceAudience.Private
  @InterfaceStability.Evolving
@@@ -106,18 -110,19 +110,19 @@@ public class NamespaceInfo extends Stor
      this.capabilities = capabilities;
    }
  
--  public NamespaceInfo(int nsID, String clusterID, String bpID, 
++  public NamespaceInfo(int nsID, String clusterID, String bpID,
        long cT) {
      this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
          VersionInfo.getVersion());
    }
  
    public NamespaceInfo(int nsID, String clusterID, String bpID,
-                        long cT, NodeType nodeType) {
+       long cT, HAServiceState st) {
      this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
-          VersionInfo.getVersion(), nodeType, CAPABILITIES_SUPPORTED);
+         VersionInfo.getVersion());
+     this.state = st;
    }
--  
++
    public long getCapabilities() {
      return capabilities;
    }
@@@ -141,7 -151,7 +151,7 @@@
    public String getBlockPoolID() {
      return blockPoolID;
    }
--  
++
    public String getSoftwareVersion() {
      return softwareVersion;
    }
@@@ -172,4 -186,4 +186,4 @@@
            "BPID=" + storage.getBlockPoolID() + ".");
      }
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message