hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1165103 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs: protocol/FSConstants.java protocol/HdfsConstants.java server/common/HdfsConstants.java server/common/HdfsServerConstants.java
Date Sun, 04 Sep 2011 19:38:51 GMT
Author: atm
Date: Sun Sep  4 19:38:50 2011
New Revision: 1165103

URL: http://svn.apache.org/viewvc?rev=1165103&view=rev
Log:
Follow-up commit for HDFS-1620 since I missed some files.

Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java?rev=1165103&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
(added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
Sun Sep  4 19:38:50 2011
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+/************************************
+ * Some handy constants
+ * 
+ ************************************/
+@InterfaceAudience.Private
+public final class HdfsConstants {
+  /* Hidden constructor */
+  private HdfsConstants() {
+  }
+
+  public static int MIN_BLOCKS_FOR_WRITE = 5;
+
+  // Long that indicates "leave current quota unchanged"
+  public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
+  public static final long QUOTA_RESET = -1L;
+
+  //
+  // Timeouts, constants
+  //
+  public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+  public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
+  public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
+
+  // We need to limit the length and depth of a path in the filesystem.
+  // HADOOP-438
+  // Currently we set the maximum length to 8k characters and the maximum depth
+  // to 1k.
+  public static int MAX_PATH_LENGTH = 8000;
+  public static int MAX_PATH_DEPTH = 1000;
+
+  // TODO mb@media-style.com: should be conf injected?
+  public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
+  public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
+      DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
+      DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
+  // Used for writing header etc.
+  public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
+      512);
+
+  public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
+
+  // SafeMode actions
+  public enum SafeModeAction {
+    SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
+  }
+
+  // type of the datanode report
+  public static enum DatanodeReportType {
+    ALL, LIVE, DEAD
+  }
+
+  // An invalid transaction ID that will never be seen in a real namesystem.
+  public static final long INVALID_TXID = -12345;
+
+  /**
+   * Distributed upgrade actions:
+   * 
+   * 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
+   * upgrade if it is stuck, no matter what the status is.
+   */
+  public static enum UpgradeAction {
+    GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
+  }
+
+  /**
+   * URI Scheme for hdfs://namenode/ URIs.
+   */
+  public static final String HDFS_URI_SCHEME = "hdfs";
+
+  /**
+   * Please see {@link LayoutVersion} on adding new layout version.
+   */
+  public static final int LAYOUT_VERSION = LayoutVersion
+      .getCurrentLayoutVersion();
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java?rev=1165103&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
(added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
Sun Sep  4 19:38:50 2011
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/************************************
+ * Some handy internal HDFS constants
+ *
+ ************************************/
+
+@InterfaceAudience.Private
+public final class HdfsServerConstants {
+  /* Hidden constructor */
+  private HdfsServerConstants() { }
+  
+  /**
+   * Type of the node
+   */
+  static public enum NodeType {
+    NAME_NODE,
+    DATA_NODE;
+  }
+
+  /** Startup options */
+  static public enum StartupOption{
+    FORMAT  ("-format"),
+    CLUSTERID ("-clusterid"),
+    GENCLUSTERID ("-genclusterid"),
+    REGULAR ("-regular"),
+    BACKUP  ("-backup"),
+    CHECKPOINT("-checkpoint"),
+    UPGRADE ("-upgrade"),
+    ROLLBACK("-rollback"),
+    FINALIZE("-finalize"),
+    IMPORT  ("-importCheckpoint");
+    
+    private String name = null;
+    
+    // Used only with format and upgrade options
+    private String clusterId = null;
+    
+    private StartupOption(String arg) {this.name = arg;}
+    public String getName() {return name;}
+    public NamenodeRole toNodeRole() {
+      switch(this) {
+      case BACKUP: 
+        return NamenodeRole.BACKUP;
+      case CHECKPOINT: 
+        return NamenodeRole.CHECKPOINT;
+      default:
+        return NamenodeRole.NAMENODE;
+      }
+    }
+    
+    public void setClusterId(String cid) {
+      clusterId = cid;
+    }
+    
+    public String getClusterId() {
+      return clusterId;
+    }
+  }
+
+  // Timeouts for communicating with DataNode for streaming writes/reads
+  public static int READ_TIMEOUT = 60 * 1000;
+  public static int READ_TIMEOUT_EXTENSION = 5 * 1000;
+  public static int WRITE_TIMEOUT = 8 * 60 * 1000;
+  public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
+  public static int DN_KEEPALIVE_TIMEOUT = 5 * 1000;
+
+  /**
+   * Defines the NameNode role.
+   */
+  static public enum NamenodeRole {
+    NAMENODE  ("NameNode"),
+    BACKUP    ("Backup Node"),
+    CHECKPOINT("Checkpoint Node");
+
+    private String description = null;
+    private NamenodeRole(String arg) {this.description = arg;}
+  
+    public String toString() {
+      return description;
+    }
+  }
+
+  /**
+   * Block replica states, which it can go through while being constructed.
+   */
+  static public enum ReplicaState {
+    /** Replica is finalized. The state when replica is not modified. */
+    FINALIZED(0),
+    /** Replica is being written to. */
+    RBW(1),
+    /** Replica is waiting to be recovered. */
+    RWR(2),
+    /** Replica is under recovery. */
+    RUR(3),
+    /** Temporary replica: created for replication and relocation only. */
+    TEMPORARY(4);
+
+    private int value;
+
+    private ReplicaState(int v) {
+      value = v;
+    }
+
+    public int getValue() {
+      return value;
+    }
+
+    public static ReplicaState getState(int v) {
+      return ReplicaState.values()[v];
+    }
+
+    /** Read from in */
+    public static ReplicaState read(DataInput in) throws IOException {
+      return values()[in.readByte()];
+    }
+
+    /** Write to out */
+    public void write(DataOutput out) throws IOException {
+      out.writeByte(ordinal());
+    }
+  }
+
+  /**
+   * States, which a block can go through while it is under construction.
+   */
+  static public enum BlockUCState {
+    /**
+     * Block construction completed.<br>
+     * The block has at least one {@link ReplicaState#FINALIZED} replica,
+     * and is not going to be modified.
+     */
+    COMPLETE,
+    /**
+     * The block is under construction.<br>
+     * It has been recently allocated for write or append.
+     */
+    UNDER_CONSTRUCTION,
+    /**
+     * The block is under recovery.<br>
+     * When a file lease expires its last block may not be {@link #COMPLETE}
+     * and needs to go through a recovery procedure, 
+     * which synchronizes the existing replicas contents.
+     */
+    UNDER_RECOVERY,
+    /**
+     * The block is committed.<br>
+     * The client reported that all bytes are written to data-nodes
+     * with the given generation stamp and block length, but no 
+     * {@link ReplicaState#FINALIZED} 
+     * replicas has yet been reported by data-nodes themselves.
+     */
+    COMMITTED;
+  }
+  
+  public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
+  public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
+}
+



Mime
View raw message