hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1177117 [2/2] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ hadoop-hdfs/s...
Date Thu, 29 Sep 2011 00:10:13 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Sep 29 00:09:56 2011
@@ -17,19 +17,31 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.mortbay.util.ajax.JSON;
 
 /** JSON Utilities */
 public class JsonUtil {
-  private static final ThreadLocal<Map<String, Object>> jsonMap
-      = new ThreadLocal<Map<String, Object>>() {
+  private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
     @Override
     protected Map<String, Object> initialValue() {
       return new TreeMap<String, Object>();
@@ -41,7 +53,54 @@ public class JsonUtil {
       m.clear();
       return m;
     }
-  };
+  }
+
+  private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
+  private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
+  private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
+  private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
+  private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
+
+  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
+
+  /** Convert a token object to a Json string. */
+  public static String toJsonString(final Token<? extends TokenIdentifier> token
+      ) throws IOException {
+    if (token == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = tokenMap.get();
+    m.put("urlString", token.encodeToUrlString());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to a Token. */
+  public static Token<? extends TokenIdentifier> toToken(
+      final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final Token<DelegationTokenIdentifier> token
+        = new Token<DelegationTokenIdentifier>();
+    token.decodeFromUrlString((String)m.get("urlString"));
+    return token;
+  }
+
+  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  public static Token<DelegationTokenIdentifier> toDelegationToken(
+      final Map<?, ?> m) throws IOException {
+    return (Token<DelegationTokenIdentifier>)toToken(m);
+  }
+
+  /** Convert a Json map to a Token of BlockTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  public static Token<BlockTokenIdentifier> toBlockToken(
+      final Map<?, ?> m) throws IOException {
+    return (Token<BlockTokenIdentifier>)toToken(m);
+  }
 
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
@@ -77,11 +136,10 @@ public class JsonUtil {
 
   /** Convert a HdfsFileStatus object to a Json string. */
   public static String toJsonString(final HdfsFileStatus status) {
-    final Map<String, Object> m = jsonMap.get();
     if (status == null) {
-      m.put("isNull", true);
+      return null;
     } else {
-      m.put("isNull", false);
+      final Map<String, Object> m = jsonMap.get();
       m.put("localName", status.getLocalName());
       m.put("isDir", status.isDir());
       m.put("isSymlink", status.isSymlink());
@@ -97,8 +155,8 @@ public class JsonUtil {
       m.put("modificationTime", status.getModificationTime());
       m.put("blockSize", status.getBlockSize());
       m.put("replication", status.getReplication());
+      return JSON.toString(m);
     }
-    return JSON.toString(m);
   }
 
   @SuppressWarnings("unchecked")
@@ -106,9 +164,9 @@ public class JsonUtil {
     return (Map<String, Object>) JSON.parse(jsonString);
   }
 
-  /** Convert a Json string to a HdfsFileStatus object. */
+  /** Convert a Json map to a HdfsFileStatus object. */
   public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
-    if ((Boolean)m.get("isNull")) {
+    if (m == null) {
       return null;
     }
 
@@ -130,4 +188,214 @@ public class JsonUtil {
         permission, owner, group,
         symlink, DFSUtil.string2Bytes(localName));
   }
+
+  /** Convert a LocatedBlock to a Json string. */
+  public static String toJsonString(final ExtendedBlock extendedblock) {
+    if (extendedblock == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = extendedBlockMap.get();
+    m.put("blockPoolId", extendedblock.getBlockPoolId());
+    m.put("blockId", extendedblock.getBlockId());
+    m.put("numBytes", extendedblock.getNumBytes());
+    m.put("generationStamp", extendedblock.getGenerationStamp());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to an ExtendedBlock object. */
+  public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
+    if (m == null) {
+      return null;
+    }
+    
+    final String blockPoolId = (String)m.get("blockPoolId");
+    final long blockId = (Long)m.get("blockId");
+    final long numBytes = (Long)m.get("numBytes");
+    final long generationStamp = (Long)m.get("generationStamp");
+    return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
+  }
+  
+  /** Convert a DatanodeInfo to a Json string. */
+  public static String toJsonString(final DatanodeInfo datanodeinfo) {
+    if (datanodeinfo == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = datanodeInfoMap.get();
+    m.put("name", datanodeinfo.getName());
+    m.put("storageID", datanodeinfo.getStorageID());
+    m.put("infoPort", datanodeinfo.getInfoPort());
+
+    m.put("ipcPort", datanodeinfo.getIpcPort());
+
+    m.put("capacity", datanodeinfo.getCapacity());
+    m.put("dfsUsed", datanodeinfo.getDfsUsed());
+    m.put("remaining", datanodeinfo.getRemaining());
+    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
+    m.put("lastUpdate", datanodeinfo.getLastUpdate());
+    m.put("xceiverCount", datanodeinfo.getXceiverCount());
+    m.put("networkLocation", datanodeinfo.getNetworkLocation());
+    m.put("hostName", datanodeinfo.getHostName());
+    m.put("adminState", datanodeinfo.getAdminState().name());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to an DatanodeInfo object. */
+  public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
+    if (m == null) {
+      return null;
+    }
+
+    return new DatanodeInfo(
+        (String)m.get("name"),
+        (String)m.get("storageID"),
+        (int)(long)(Long)m.get("infoPort"),
+        (int)(long)(Long)m.get("ipcPort"),
+
+        (Long)m.get("capacity"),
+        (Long)m.get("dfsUsed"),
+        (Long)m.get("remaining"),
+        (Long)m.get("blockPoolUsed"),
+        (Long)m.get("lastUpdate"),
+        (int)(long)(Long)m.get("xceiverCount"),
+        (String)m.get("networkLocation"),
+        (String)m.get("hostName"),
+        AdminStates.valueOf((String)m.get("adminState")));
+  }
+
+  /** Convert a DatanodeInfo[] to a Json string. */
+  public static String toJsonString(final DatanodeInfo[] array
+      ) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.length == 0) {
+      return "[]";
+    } else {
+      final StringBuilder b = new StringBuilder().append('[').append(
+          toJsonString(array[0]));
+      for(int i = 1; i < array.length; i++) {
+        b.append(", ").append(toJsonString(array[i]));
+      }
+      return b.append(']').toString();
+    }
+  }
+
+  /** Convert an Object[] to a DatanodeInfo[]. */
+  public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return EMPTY_DATANODE_INFO_ARRAY;
+    } else {
+      final DatanodeInfo[] array = new DatanodeInfo[objects.length];
+      for(int i = 0; i < array.length; i++) {
+        array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
+      }
+      return array;
+    }
+  }
+
+  /** Convert a LocatedBlock to a Json string. */
+  public static String toJsonString(final LocatedBlock locatedblock
+      ) throws IOException {
+    if (locatedblock == null) {
+      return null;
+    }
+ 
+    final Map<String, Object> m = locatedBlockMap.get();
+    m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
+    m.put("isCorrupt", locatedblock.isCorrupt());
+    m.put("startOffset", locatedblock.getStartOffset());
+    m.put("block", toJsonString(locatedblock.getBlock()));
+
+    m.put("locations", toJsonString(locatedblock.getLocations()));
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
+    final DatanodeInfo[] locations = toDatanodeInfoArray(
+        (Object[])JSON.parse((String)m.get("locations")));
+    final long startOffset = (Long)m.get("startOffset");
+    final boolean isCorrupt = (Boolean)m.get("isCorrupt");
+
+    final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
+    locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
+    return locatedblock;
+  }
+
+  /** Convert a LocatedBlock[] to a Json string. */
+  public static String toJsonString(final List<LocatedBlock> array
+      ) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.size() == 0) {
+      return "[]";
+    } else {
+      final StringBuilder b = new StringBuilder().append('[').append(
+          toJsonString(array.get(0)));
+      for(int i = 1; i < array.size(); i++) {
+        b.append(",\n  ").append(toJsonString(array.get(i)));
+      }
+      return b.append(']').toString();
+    }
+  }
+
+  /** Convert an Object[] to a List of LocatedBlock. 
+   * @throws IOException */
+  public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
+      ) throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return Collections.emptyList();
+    } else {
+      final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
+      for(int i = 0; i < objects.length; i++) {
+        list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
+      }
+      return list;
+    }
+  }
+
+  /** Convert LocatedBlocks to a Json string. */
+  public static String toJsonString(final LocatedBlocks locatedblocks
+      ) throws IOException {
+    if (locatedblocks == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = jsonMap.get();
+    m.put("fileLength", locatedblocks.getFileLength());
+    m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
+
+    m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
+    m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
+    m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
+      ) throws IOException {
+    if (m == null) {
+      return null;
+    }
+    
+    final long fileLength = (Long)m.get("fileLength");
+    final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
+    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
+        (Object[])JSON.parse((String) m.get("locatedBlocks")));
+    final LocatedBlock lastLocatedBlock = toLocatedBlock(
+        (Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
+    final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
+    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
+        lastLocatedBlock, isLastBlockComplete);
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Sep 29 00:09:56 2011
@@ -27,9 +27,12 @@ import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -45,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -54,7 +58,9 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
 import org.apache.hadoop.hdfs.web.resources.OwnerParam;
 import org.apache.hadoop.hdfs.web.resources.Param;
@@ -63,13 +69,16 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
@@ -82,17 +91,24 @@ public class WebHdfsFileSystem extends H
 
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
 
-  private UserGroupInformation ugi;
+  private final UserGroupInformation ugi;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   protected Path workingDir;
 
+  {
+    try {
+      ugi = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
 
-    ugi = UserGroupInformation.getCurrentUser();
     this.workingDir = getHomeDirectory();
   }
 
@@ -163,11 +179,11 @@ public class WebHdfsFileSystem extends H
     }
   }
 
-  private URL toUrl(final HttpOpParam.Op op, final Path fspath,
+  URL toUrl(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     //initialize URI path and query
     final String path = "/" + PATH_PREFIX
-        + makeQualified(fspath).toUri().getPath();
+        + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
     final String query = op.toQueryString()
         + '&' + new UserParam(ugi)
         + Param.toSortedString("&", parameters);
@@ -396,4 +412,41 @@ public class WebHdfsFileSystem extends H
     }
     return statuses;
   }
+
+  @Override
+  public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
+      ) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
+    final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
+    final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
+    token.setService(new Text(getCanonicalServiceName()));
+    return token;
+  }
+
+  @Override
+  public List<Token<?>> getDelegationTokens(final String renewer
+      ) throws IOException {
+    final Token<?>[] t = {getDelegationToken(renewer)};
+    return Arrays.asList(t);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(final FileStatus status,
+      final long offset, final long length) throws IOException {
+    if (status == null) {
+      return null;
+    }
+    return getFileBlockLocations(status.getPath(), offset, length);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(final Path p, 
+      final long offset, final long length) throws IOException {
+    statistics.incrementReadOps(1);
+
+    final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
+    final Map<String, Object> m = run(op, p, new OffsetParam(offset),
+        new LengthParam(length));
+    return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java Thu Sep 29 00:09:56 2011
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resou
 /** Access time parameter. */
 public class AccessTimeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "accessTime";
+  public static final String NAME = "accesstime";
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java Thu Sep 29 00:09:56 2011
@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configurat
 /** Block size parameter. */
 public class BlockSizeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "blockSize";
+  public static final String NAME = "blocksize";
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java Thu Sep 29 00:09:56 2011
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CommonConfig
 /** Buffer size parameter. */
 public class BufferSizeParam extends IntegerParam {
   /** Parameter name. */
-  public static final String NAME = "bufferSize";
+  public static final String NAME = "buffersize";
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java Thu Sep 29 00:09:56 2011
@@ -17,13 +17,12 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** Delegation token parameter. */
 public class DelegationParam extends StringParam {
   /** Parameter name. */
-  public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
+  public static final String NAME = "delegation";
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java Thu Sep 29 00:09:56 2011
@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 /** Http DELETE operation parameter. */
 public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "deleteOp";
-
   /** Delete operations. */
   public static enum Op implements HttpOpParam.Op {
     DELETE(HttpURLConnection.HTTP_OK),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java Thu Sep 29 00:09:56 2011
@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path;
 /** Destination path parameter. */
 public class DstPathParam extends StringParam {
   /** Parameter name. */
-  public static final String NAME = "dstPath";
+  public static final String NAME = "dstpath";
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Thu Sep 29 00:09:56 2011
@@ -21,16 +21,16 @@ import java.net.HttpURLConnection;
 
 /** Http GET operation parameter. */
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "getOp";
-
   /** Get operations. */
   public static enum Op implements HttpOpParam.Op {
     OPEN(HttpURLConnection.HTTP_OK),
+    GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
 
     GETFILESTATUS(HttpURLConnection.HTTP_OK),
     LISTSTATUS(HttpURLConnection.HTTP_OK),
 
+    GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
+
     NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
     final int expectedHttpResponseCode;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Thu Sep 29 00:09:56 2011
@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.web.resou
 /** Http operation parameter. */
 public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
     extends EnumParam<E> {
+  /** Parameter name. */
+  public static final String NAME = "op";
+
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java Thu Sep 29 00:09:56 2011
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resou
 /** Modification time parameter. */
 public class ModificationTimeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "modificationTime";
+  public static final String NAME = "modificationtime";
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java Thu Sep 29 00:09:56 2011
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
-/** Recursive parameter. */
+/** Overwrite parameter. */
 public class OverwriteParam extends BooleanParam {
   /** Parameter name. */
   public static final String NAME = "overwrite";

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java Thu Sep 29 00:09:56 2011
@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 /** Http POST operation parameter. */
 public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "postOp";
-
   /** Post operations. */
   public static enum Op implements HttpOpParam.Op {
     APPEND(HttpURLConnection.HTTP_OK),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Thu Sep 29 00:09:56 2011
@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 /** Http POST operation parameter. */
 public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "putOp";
-
   /** Put operations. */
   public static enum Op implements HttpOpParam.Op {
     CREATE(true, HttpURLConnection.HTTP_CREATED),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java Thu Sep 29 00:09:56 2011
@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Options;
 /** Rename option set parameter. */
 public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
   /** Parameter name. */
-  public static final String NAME = "renameOptions";
+  public static final String NAME = "renameoptions";
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:09:56 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1177115
 /hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Sep 29 00:09:56 2011
@@ -683,24 +683,4 @@ creations/deletions), or "all".</descrip
   </description>
 </property>
 
-<property>
-  <name>dfs.web.authentication.kerberos.principal</name>
-  <value>HTTP/${dfs.web.hostname}@${kerberos.realm}</value>
-  <description>
-    The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-
-    The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-    HTTP SPENGO specification.
-  </description>
-</property>
-
-<property>
-  <name>dfs.web.authentication.kerberos.keytab</name>
-  <value>${user.home}/dfs.web.keytab</value>
-  <description>
-    The Kerberos keytab file with the credentials for the
-    HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-  </description>
-</property>
-
 </configuration>

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:09:56 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1177115
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:09:56 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1177115
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:09:56 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1177115
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:09:56 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1173011
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1177115
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Thu Sep 29 00:09:56 2011
@@ -72,6 +72,7 @@ public class TestDFSPermission extends T
   final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
 
   private FileSystem fs;
+  private MiniDFSCluster cluster;
   private static Random r;
 
   static {
@@ -105,18 +106,25 @@ public class TestDFSPermission extends T
     }
   }
 
+  @Override
+  public void setUp() throws IOException {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster.waitActive();
+  }
+  
+  @Override
+  public void tearDown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+  
   /** This tests if permission setting in create, mkdir, and 
    * setPermission works correctly
    */
   public void testPermissionSetting() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testPermissionSetting(OpType.CREATE); // test file creation
-      testPermissionSetting(OpType.MKDIRS); // test directory creation
-    } finally {
-      cluster.shutdown();
-    }
+    testPermissionSetting(OpType.CREATE); // test file creation
+    testPermissionSetting(OpType.MKDIRS); // test directory creation
   }
 
   private void initFileSystem(short umask) throws Exception {
@@ -245,17 +253,22 @@ public class TestDFSPermission extends T
     }
   }
 
+  /**
+   * check that ImmutableFsPermission can be used as the argument
+   * to setPermission
+   */
+  public void testImmutableFsPermission() throws IOException {
+    fs = FileSystem.get(conf);
+
+    // set the permission of the root to be world-wide rwx
+    fs.setPermission(new Path("/"),
+        FsPermission.createImmutable((short)0777));
+  }
+  
   /* check if the ownership of a file/directory is set correctly */
   public void testOwnership() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testOwnership(OpType.CREATE); // test file creation
-      testOwnership(OpType.MKDIRS); // test directory creation
-    } finally {
-      fs.close();
-      cluster.shutdown();
-    }
+    testOwnership(OpType.CREATE); // test file creation
+    testOwnership(OpType.MKDIRS); // test directory creation
   }
 
   /* change a file/directory's owner and group.
@@ -342,9 +355,7 @@ public class TestDFSPermission extends T
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
   public void testPermissionChecking() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
-      cluster.waitActive();
       fs = FileSystem.get(conf);
 
       // set the permission of the root to be world-wide rwx
@@ -401,7 +412,6 @@ public class TestDFSPermission extends T
           parentPermissions, permissions, parentPaths, filePaths, dirPaths);
     } finally {
       fs.close();
-      cluster.shutdown();
     }
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Sep 29 00:09:56 2011
@@ -29,8 +29,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import junit.framework.Assert;
-
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 public class TestDFSUtil {
   /**
@@ -76,79 +74,141 @@ public class TestDFSUtil {
       }
     }
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount, 
-               corruptCount == 1);
-    
+    assertTrue("expected 1 corrupt files but got " + corruptCount,
+        corruptCount == 1);
+
     // test an empty location
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
   }
 
-  /** 
-   * Test for
-   * {@link DFSUtil#getNameServiceIds(Configuration)}
-   * {@link DFSUtil#getNameServiceId(Configuration)}
-   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+
+  private Configuration setupAddress(String key) {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
+    return conf;
+  }
+
+  /**
+   * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
+   * nameserviceId from the configuration returned
    */
   @Test
-  public void testMultipleNamenodes() throws IOException {
+  public void getNameServiceId() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+  
+  /**
+   * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
+   * nameserviceId for namenode is determined based on matching the address with
+   * local node's address
+   */
+  @Test
+  public void getNameNodeNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getBackupNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getSecondaryNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
+   * exception is thrown when multiple rpc addresses match the local node's
+   * address
+   */
+  @Test(expected = HadoopIllegalArgumentException.class)
+  public void testGetNameServiceIdException() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        "localhost:9000");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        "localhost:9001");
+    DFSUtil.getNamenodeNameServiceId(conf);
+    fail("Expected exception is not thrown");
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+   */
+  @Test
+  public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-    
-    // Test - The configured nameserviceIds are returned
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
-    
-    // Tests default nameserviceId is returned
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
-    
+  }
+
+  /**
+   * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
+   * (Configuration)}
+   */
+  @Test
+  public void testMultipleNamenodes() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN3_ADDRESS = "localhost:9002";
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
-    
-    Collection<InetSocketAddress> nnAddresses = 
-      DFSUtil.getNNServiceRpcAddresses(conf);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        NN2_ADDRESS);
+
+    Collection<InetSocketAddress> nnAddresses = DFSUtil
+        .getNNServiceRpcAddresses(conf);
     assertEquals(2, nnAddresses.size());
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
-    assertEquals(2, nameserviceIds.size());
     InetSocketAddress addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9000, addr.getPort());
     addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9001, addr.getPort());
-    
+
     // Test - can look up nameservice ID from service address
-    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
-    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn1", nameserviceId);
-    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn2", nameserviceId);
-    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress3,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertNull(nameserviceId);
+    checkNameServiceId(conf, NN1_ADDRESS, "nn1");
+    checkNameServiceId(conf, NN2_ADDRESS, "nn2");
+    checkNameServiceId(conf, NN3_ADDRESS, null);
   }
-  
-  /** 
+
+  public void checkNameServiceId(Configuration conf, String addr,
+      String expectedNameServiceId) {
+    InetSocketAddress s = NetUtils.createSocketAddr(addr);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals(expectedNameServiceId, nameserviceId);
+  }
+
+  /**
    * Test for
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    */
@@ -157,27 +217,25 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
-    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertTrue(isDefault);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertFalse(isDefault);
   }
-  
+
   /** Tests to ensure default namenode is used as fallback */
   @Test
   public void testDefaultNamenode() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String hdfs_default = "hdfs://localhost:9999/";
-    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
-    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
+    conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFS_FEDERATION_NAMESERVICES is not set, verify that
     // default namenode address is returned.
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(1, addrList.size());
@@ -191,26 +249,26 @@ public class TestDFSUtil {
   @Test
   public void testConfModification() throws IOException {
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
-    
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+
     // Set the nameservice specific keys with nameserviceId in the config key
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
     }
-    
+
     // Initialize generic keys from specific keys
-    NameNode.initializeGenericKeys(conf);
-    
+    NameNode.initializeGenericKeys(conf, nameserviceId);
+
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
-  
+
   /**
    * Tests for empty configuration, an exception is thrown from
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -238,16 +296,16 @@ public class TestDFSUtil {
     } catch (IOException expected) {
     }
   }
-  
+
   @Test
-  public void testGetServerInfo(){
+  public void testGetServerInfo() {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    Assert.assertEquals("0.0.0.0:50470", httpsport);
+    assertEquals("0.0.0.0:50470", httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    Assert.assertEquals("0.0.0.0:50070", httpport);
+    assertEquals("0.0.0.0:50070", httpport);
   }
 
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Thu Sep 29 00:09:56 2011
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
@@ -24,17 +28,15 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /** A class for testing quota-related commands */
 public class TestQuota {
@@ -841,6 +843,14 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     try {
+      
+      //Test for deafult NameSpace Quota
+      long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
+          .getNamesystem());
+      assertTrue(
+          "Default namespace quota expected as long max. But the value is :"
+              + nsQuota, nsQuota == Long.MAX_VALUE);
+      
       Path dir = new Path("/test");
       boolean exceededQuota = false;
       ContentSummary c;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Thu Sep 29 00:09:56 2011
@@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 
-import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,12 +38,16 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.apache.log4j.Level;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -56,12 +60,13 @@ public class TestDelegationToken {
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
+    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.set("hadoop.security.auth_to_local",
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster.Builder(config).build();
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
     cluster.waitActive();
     dtSecretManager = NameNodeAdapter.getDtSecretManager(
         cluster.getNamesystem());
@@ -154,6 +159,31 @@ public class TestDelegationToken {
   }
   
   @Test
+  public void testDelegationTokenWebHdfsApi() throws Exception {
+    ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+    final String uri = WebHdfsFileSystem.SCHEME  + "://"
+        + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    //get file system as JobTracker
+    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+        "JobTracker", new String[]{"user"});
+    final WebHdfsFileSystem webhdfs = ugi.doAs(
+        new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+      @Override
+      public WebHdfsFileSystem run() throws Exception {
+        return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
+      }
+    });
+
+    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+    LOG.info("A valid token should have non-null password, and should be renewed successfully");
+    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken(token, "JobTracker");
+  }
+
+  @Test
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     final Token<DelegationTokenIdentifier> token = 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java Thu Sep 29 00:09:56 2011
@@ -18,31 +18,34 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
+import org.junit.Before;
+import org.junit.Test;
 
-public class TestHost2NodesMap extends TestCase {
-  static private Host2NodesMap map = new Host2NodesMap();
-  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+public class TestHost2NodesMap {
+  private Host2NodesMap map = new Host2NodesMap();
+  private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
     new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
   };
-  private final static DatanodeDescriptor NULL_NODE = null; 
-  private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
+  private final DatanodeDescriptor NULL_NODE = null; 
+  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+      "/d1/r4");
 
-  static {
+  @Before
+  public void setup() {
     for(DatanodeDescriptor node:dataNodes) {
       map.add(node);
     }
     map.add(NULL_NODE);
   }
   
+  @Test
   public void testContains() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
       assertTrue(map.contains(dataNodes[i]));
@@ -51,6 +54,7 @@ public class TestHost2NodesMap extends T
     assertFalse(map.contains(NODE));
   }
 
+  @Test
   public void testGetDatanodeByHost() throws Exception {
     assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
     assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@@ -59,6 +63,7 @@ public class TestHost2NodesMap extends T
     assertTrue(null==map.getDatanodeByHost("h4"));
   }
 
+  @Test
   public void testGetDatanodeByName() throws Exception {
     assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
     assertTrue(map.getDatanodeByName("h1:5030")==null);
@@ -71,6 +76,7 @@ public class TestHost2NodesMap extends T
     assertTrue(map.getDatanodeByName(null)==null);
   }
 
+  @Test
   public void testRemove() throws Exception {
     assertFalse(map.remove(NODE));
     

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java Thu Sep 29 00:09:56 2011
@@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockSca
 
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       for (int i = 0; i < 2; i++) {
-        String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
+        String nsId = DFSUtil.getNamenodeNameServiceId(cluster
+            .getConfiguration(i));
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(",");
       }
@@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockSca
         LOG.info(ex.getMessage());
       }
 
-      namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
+      namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
           .toString());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java Thu Sep 29 00:09:56 2011
@@ -17,21 +17,24 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
 import org.apache.hadoop.hdfs.protocol.Block;
-import static org.junit.Assert.*;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
  * Unit test for ReplicasMap class
  */
 public class TestReplicasMap {
-  private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
-  private static final String bpid = "BP-TEST";
-  private static final  Block block = new Block(1234, 1234, 1234);
+  private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
+  private final String bpid = "BP-TEST";
+  private final  Block block = new Block(1234, 1234, 1234);
   
-  @BeforeClass
-  public static void setup() {
+  @Before
+  public void setup() {
     map.add(bpid, new FinalizedReplica(block, null, null));
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu Sep 29 00:09:56 2011
@@ -412,4 +412,11 @@ public abstract class FSImageTestUtil {
   public static FSImage getFSImage(NameNode node) {
     return node.getFSImage();
   }
+
+  /**
+   * get NameSpace quota.
+   */
+  public static long getNSQuota(FSNamesystem ns) {
+    return ns.dir.rootDir.getNsQuota();
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1177117&r1=1177116&r2=1177117&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Thu Sep 29 00:09:56 2011
@@ -18,17 +18,23 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -114,4 +120,42 @@ public class TestWebHdfsFileSystemContra
       // also okay for HDFS.
     }    
   }
+  
+  public void testGetFileBlockLocations() throws IOException {
+    final String f = "/test/testGetFileBlockLocations";
+    createFile(path(f));
+    final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
+    final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
+        new Path(f), 0L, 1L);
+    assertEquals(expected.length, computed.length);
+    for(int i = 0; i < computed.length; i++) {
+      assertEquals(expected[i].toString(), computed[i].toString());
+    }
+  }
+
+  public void testCaseInsensitive() throws IOException {
+    final Path p = new Path("/test/testCaseInsensitive");
+    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
+
+    //replace query with mix case letters
+    final URL url = webhdfs.toUrl(op, p);
+    WebHdfsFileSystem.LOG.info("url      = " + url);
+    final URL replaced = new URL(url.toString().replace(op.toQueryString(),
+        "Op=mkDIrs"));
+    WebHdfsFileSystem.LOG.info("replaced = " + replaced);
+
+    //connect with the replaced URL.
+    final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
+    conn.setRequestMethod(op.getType().toString());
+    conn.connect();
+    final BufferedReader in = new BufferedReader(new InputStreamReader(
+        conn.getInputStream()));
+    for(String line; (line = in.readLine()) != null; ) {
+      WebHdfsFileSystem.LOG.info("> " + line);
+    }
+
+    //check if the command successes.
+    assertTrue(fs.getFileStatus(p).isDirectory());
+  }
 }



Mime
View raw message