hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1177127 [2/2] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ hadoop-hdfs/s...
Date Thu, 29 Sep 2011 00:33:50 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Sep 29 00:33:34 2011
@@ -17,31 +17,19 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.mortbay.util.ajax.JSON;
 
 /** JSON Utilities */
 public class JsonUtil {
-  private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
+  private static final ThreadLocal<Map<String, Object>> jsonMap
+      = new ThreadLocal<Map<String, Object>>() {
     @Override
     protected Map<String, Object> initialValue() {
       return new TreeMap<String, Object>();
@@ -53,54 +41,7 @@ public class JsonUtil {
       m.clear();
       return m;
     }
-  }
-
-  private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
-  private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
-  private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
-  private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
-  private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
-
-  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
-
-  /** Convert a token object to a Json string. */
-  public static String toJsonString(final Token<? extends TokenIdentifier> token
-      ) throws IOException {
-    if (token == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = tokenMap.get();
-    m.put("urlString", token.encodeToUrlString());
-    return JSON.toString(m);
-  }
-
-  /** Convert a Json map to a Token. */
-  public static Token<? extends TokenIdentifier> toToken(
-      final Map<?, ?> m) throws IOException {
-    if (m == null) {
-      return null;
-    }
-
-    final Token<DelegationTokenIdentifier> token
-        = new Token<DelegationTokenIdentifier>();
-    token.decodeFromUrlString((String)m.get("urlString"));
-    return token;
-  }
-
-  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
-  @SuppressWarnings("unchecked")
-  public static Token<DelegationTokenIdentifier> toDelegationToken(
-      final Map<?, ?> m) throws IOException {
-    return (Token<DelegationTokenIdentifier>)toToken(m);
-  }
-
-  /** Convert a Json map to a Token of BlockTokenIdentifier. */
-  @SuppressWarnings("unchecked")
-  public static Token<BlockTokenIdentifier> toBlockToken(
-      final Map<?, ?> m) throws IOException {
-    return (Token<BlockTokenIdentifier>)toToken(m);
-  }
+  };
 
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
@@ -136,10 +77,11 @@ public class JsonUtil {
 
   /** Convert a HdfsFileStatus object to a Json string. */
   public static String toJsonString(final HdfsFileStatus status) {
+    final Map<String, Object> m = jsonMap.get();
     if (status == null) {
-      return null;
+      m.put("isNull", true);
     } else {
-      final Map<String, Object> m = jsonMap.get();
+      m.put("isNull", false);
       m.put("localName", status.getLocalName());
       m.put("isDir", status.isDir());
       m.put("isSymlink", status.isSymlink());
@@ -155,8 +97,8 @@ public class JsonUtil {
       m.put("modificationTime", status.getModificationTime());
       m.put("blockSize", status.getBlockSize());
       m.put("replication", status.getReplication());
-      return JSON.toString(m);
     }
+    return JSON.toString(m);
   }
 
   @SuppressWarnings("unchecked")
@@ -164,9 +106,9 @@ public class JsonUtil {
     return (Map<String, Object>) JSON.parse(jsonString);
   }
 
-  /** Convert a Json map to a HdfsFileStatus object. */
+  /** Convert a Json string to a HdfsFileStatus object. */
   public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
-    if (m == null) {
+    if ((Boolean)m.get("isNull")) {
       return null;
     }
 
@@ -188,214 +130,4 @@ public class JsonUtil {
         permission, owner, group,
         symlink, DFSUtil.string2Bytes(localName));
   }
-
-  /** Convert a LocatedBlock to a Json string. */
-  public static String toJsonString(final ExtendedBlock extendedblock) {
-    if (extendedblock == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = extendedBlockMap.get();
-    m.put("blockPoolId", extendedblock.getBlockPoolId());
-    m.put("blockId", extendedblock.getBlockId());
-    m.put("numBytes", extendedblock.getNumBytes());
-    m.put("generationStamp", extendedblock.getGenerationStamp());
-    return JSON.toString(m);
-  }
-
-  /** Convert a Json map to an ExtendedBlock object. */
-  public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
-    if (m == null) {
-      return null;
-    }
-    
-    final String blockPoolId = (String)m.get("blockPoolId");
-    final long blockId = (Long)m.get("blockId");
-    final long numBytes = (Long)m.get("numBytes");
-    final long generationStamp = (Long)m.get("generationStamp");
-    return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
-  }
-  
-  /** Convert a DatanodeInfo to a Json string. */
-  public static String toJsonString(final DatanodeInfo datanodeinfo) {
-    if (datanodeinfo == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = datanodeInfoMap.get();
-    m.put("name", datanodeinfo.getName());
-    m.put("storageID", datanodeinfo.getStorageID());
-    m.put("infoPort", datanodeinfo.getInfoPort());
-
-    m.put("ipcPort", datanodeinfo.getIpcPort());
-
-    m.put("capacity", datanodeinfo.getCapacity());
-    m.put("dfsUsed", datanodeinfo.getDfsUsed());
-    m.put("remaining", datanodeinfo.getRemaining());
-    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
-    m.put("lastUpdate", datanodeinfo.getLastUpdate());
-    m.put("xceiverCount", datanodeinfo.getXceiverCount());
-    m.put("networkLocation", datanodeinfo.getNetworkLocation());
-    m.put("hostName", datanodeinfo.getHostName());
-    m.put("adminState", datanodeinfo.getAdminState().name());
-    return JSON.toString(m);
-  }
-
-  /** Convert a Json map to an DatanodeInfo object. */
-  public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
-    if (m == null) {
-      return null;
-    }
-
-    return new DatanodeInfo(
-        (String)m.get("name"),
-        (String)m.get("storageID"),
-        (int)(long)(Long)m.get("infoPort"),
-        (int)(long)(Long)m.get("ipcPort"),
-
-        (Long)m.get("capacity"),
-        (Long)m.get("dfsUsed"),
-        (Long)m.get("remaining"),
-        (Long)m.get("blockPoolUsed"),
-        (Long)m.get("lastUpdate"),
-        (int)(long)(Long)m.get("xceiverCount"),
-        (String)m.get("networkLocation"),
-        (String)m.get("hostName"),
-        AdminStates.valueOf((String)m.get("adminState")));
-  }
-
-  /** Convert a DatanodeInfo[] to a Json string. */
-  public static String toJsonString(final DatanodeInfo[] array
-      ) throws IOException {
-    if (array == null) {
-      return null;
-    } else if (array.length == 0) {
-      return "[]";
-    } else {
-      final StringBuilder b = new StringBuilder().append('[').append(
-          toJsonString(array[0]));
-      for(int i = 1; i < array.length; i++) {
-        b.append(", ").append(toJsonString(array[i]));
-      }
-      return b.append(']').toString();
-    }
-  }
-
-  /** Convert an Object[] to a DatanodeInfo[]. */
-  public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
-    if (objects == null) {
-      return null;
-    } else if (objects.length == 0) {
-      return EMPTY_DATANODE_INFO_ARRAY;
-    } else {
-      final DatanodeInfo[] array = new DatanodeInfo[objects.length];
-      for(int i = 0; i < array.length; i++) {
-        array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
-      }
-      return array;
-    }
-  }
-
-  /** Convert a LocatedBlock to a Json string. */
-  public static String toJsonString(final LocatedBlock locatedblock
-      ) throws IOException {
-    if (locatedblock == null) {
-      return null;
-    }
- 
-    final Map<String, Object> m = locatedBlockMap.get();
-    m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
-    m.put("isCorrupt", locatedblock.isCorrupt());
-    m.put("startOffset", locatedblock.getStartOffset());
-    m.put("block", toJsonString(locatedblock.getBlock()));
-
-    m.put("locations", toJsonString(locatedblock.getLocations()));
-    return JSON.toString(m);
-  }
-
-  /** Convert a Json map to LocatedBlock. */
-  public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
-    if (m == null) {
-      return null;
-    }
-
-    final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
-    final DatanodeInfo[] locations = toDatanodeInfoArray(
-        (Object[])JSON.parse((String)m.get("locations")));
-    final long startOffset = (Long)m.get("startOffset");
-    final boolean isCorrupt = (Boolean)m.get("isCorrupt");
-
-    final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
-    locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
-    return locatedblock;
-  }
-
-  /** Convert a LocatedBlock[] to a Json string. */
-  public static String toJsonString(final List<LocatedBlock> array
-      ) throws IOException {
-    if (array == null) {
-      return null;
-    } else if (array.size() == 0) {
-      return "[]";
-    } else {
-      final StringBuilder b = new StringBuilder().append('[').append(
-          toJsonString(array.get(0)));
-      for(int i = 1; i < array.size(); i++) {
-        b.append(",\n  ").append(toJsonString(array.get(i)));
-      }
-      return b.append(']').toString();
-    }
-  }
-
-  /** Convert an Object[] to a List of LocatedBlock. 
-   * @throws IOException */
-  public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
-      ) throws IOException {
-    if (objects == null) {
-      return null;
-    } else if (objects.length == 0) {
-      return Collections.emptyList();
-    } else {
-      final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
-      for(int i = 0; i < objects.length; i++) {
-        list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
-      }
-      return list;
-    }
-  }
-
-  /** Convert LocatedBlocks to a Json string. */
-  public static String toJsonString(final LocatedBlocks locatedblocks
-      ) throws IOException {
-    if (locatedblocks == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = jsonMap.get();
-    m.put("fileLength", locatedblocks.getFileLength());
-    m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
-
-    m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
-    m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
-    m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
-    return JSON.toString(m);
-  }
-
-  /** Convert a Json map to LocatedBlock. */
-  public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
-      ) throws IOException {
-    if (m == null) {
-      return null;
-    }
-    
-    final long fileLength = (Long)m.get("fileLength");
-    final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
-    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
-        (Object[])JSON.parse((String) m.get("locatedBlocks")));
-    final LocatedBlock lastLocatedBlock = toLocatedBlock(
-        (Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
-    final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
-    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete);
-  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Sep 29 00:33:34 2011
@@ -27,12 +27,9 @@ import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -48,7 +45,6 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -58,9 +54,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
-import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
-import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
 import org.apache.hadoop.hdfs.web.resources.OwnerParam;
 import org.apache.hadoop.hdfs.web.resources.Param;
@@ -69,16 +63,13 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
-import org.apache.hadoop.hdfs.web.resources.RenewerParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
@@ -91,24 +82,17 @@ public class WebHdfsFileSystem extends H
 
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
 
-  private final UserGroupInformation ugi;
+  private UserGroupInformation ugi;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   protected Path workingDir;
 
-  {
-    try {
-      ugi = UserGroupInformation.getCurrentUser();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
 
+    ugi = UserGroupInformation.getCurrentUser();
     this.workingDir = getHomeDirectory();
   }
 
@@ -179,11 +163,11 @@ public class WebHdfsFileSystem extends H
     }
   }
 
-  URL toUrl(final HttpOpParam.Op op, final Path fspath,
+  private URL toUrl(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
     //initialize URI path and query
     final String path = "/" + PATH_PREFIX
-        + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
+        + makeQualified(fspath).toUri().getPath();
     final String query = op.toQueryString()
         + '&' + new UserParam(ugi)
         + Param.toSortedString("&", parameters);
@@ -412,41 +396,4 @@ public class WebHdfsFileSystem extends H
     }
     return statuses;
   }
-
-  @Override
-  public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
-      ) throws IOException {
-    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
-    final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
-    final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
-    token.setService(new Text(getCanonicalServiceName()));
-    return token;
-  }
-
-  @Override
-  public List<Token<?>> getDelegationTokens(final String renewer
-      ) throws IOException {
-    final Token<?>[] t = {getDelegationToken(renewer)};
-    return Arrays.asList(t);
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final FileStatus status,
-      final long offset, final long length) throws IOException {
-    if (status == null) {
-      return null;
-    }
-    return getFileBlockLocations(status.getPath(), offset, length);
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final Path p, 
-      final long offset, final long length) throws IOException {
-    statistics.incrementReadOps(1);
-
-    final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
-    final Map<String, Object> m = run(op, p, new OffsetParam(offset),
-        new LengthParam(length));
-    return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
-  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java Thu Sep 29 00:33:34 2011
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resou
 /** Access time parameter. */
 public class AccessTimeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "accesstime";
+  public static final String NAME = "accessTime";
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java Thu Sep 29 00:33:34 2011
@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configurat
 /** Block size parameter. */
 public class BlockSizeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "blocksize";
+  public static final String NAME = "blockSize";
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java Thu Sep 29 00:33:34 2011
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CommonConfig
 /** Buffer size parameter. */
 public class BufferSizeParam extends IntegerParam {
   /** Parameter name. */
-  public static final String NAME = "buffersize";
+  public static final String NAME = "bufferSize";
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java Thu Sep 29 00:33:34 2011
@@ -17,12 +17,13 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
+import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** Delegation token parameter. */
 public class DelegationParam extends StringParam {
   /** Parameter name. */
-  public static final String NAME = "delegation";
+  public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java Thu Sep 29 00:33:34 2011
@@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
 
 /** Http DELETE operation parameter. */
 public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
+  /** Parameter name. */
+  public static final String NAME = "deleteOp";
+
   /** Delete operations. */
   public static enum Op implements HttpOpParam.Op {
     DELETE(HttpURLConnection.HTTP_OK),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java Thu Sep 29 00:33:34 2011
@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path;
 /** Destination path parameter. */
 public class DstPathParam extends StringParam {
   /** Parameter name. */
-  public static final String NAME = "dstpath";
+  public static final String NAME = "dstPath";
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Thu Sep 29 00:33:34 2011
@@ -21,16 +21,16 @@ import java.net.HttpURLConnection;
 
 /** Http GET operation parameter. */
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
+  /** Parameter name. */
+  public static final String NAME = "getOp";
+
   /** Get operations. */
   public static enum Op implements HttpOpParam.Op {
     OPEN(HttpURLConnection.HTTP_OK),
-    GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
 
     GETFILESTATUS(HttpURLConnection.HTTP_OK),
     LISTSTATUS(HttpURLConnection.HTTP_OK),
 
-    GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
-
     NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
     final int expectedHttpResponseCode;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Thu Sep 29 00:33:34 2011
@@ -20,9 +20,6 @@ package org.apache.hadoop.hdfs.web.resou
 /** Http operation parameter. */
 public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
     extends EnumParam<E> {
-  /** Parameter name. */
-  public static final String NAME = "op";
-
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java Thu Sep 29 00:33:34 2011
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resou
 /** Modification time parameter. */
 public class ModificationTimeParam extends LongParam {
   /** Parameter name. */
-  public static final String NAME = "modificationtime";
+  public static final String NAME = "modificationTime";
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java Thu Sep 29 00:33:34 2011
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
-/** Overwrite parameter. */
+/** Recursive parameter. */
 public class OverwriteParam extends BooleanParam {
   /** Parameter name. */
   public static final String NAME = "overwrite";

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java Thu Sep 29 00:33:34 2011
@@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
 
 /** Http POST operation parameter. */
 public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
+  /** Parameter name. */
+  public static final String NAME = "postOp";
+
   /** Post operations. */
   public static enum Op implements HttpOpParam.Op {
     APPEND(HttpURLConnection.HTTP_OK),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Thu Sep 29 00:33:34 2011
@@ -21,6 +21,9 @@ import java.net.HttpURLConnection;
 
 /** Http POST operation parameter. */
 public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
+  /** Parameter name. */
+  public static final String NAME = "putOp";
+
   /** Put operations. */
   public static enum Op implements HttpOpParam.Op {
     CREATE(true, HttpURLConnection.HTTP_CREATED),

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java Thu Sep 29 00:33:34 2011
@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Options;
 /** Rename option set parameter. */
 public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
   /** Parameter name. */
-  public static final String NAME = "renameoptions";
+  public static final String NAME = "renameOptions";
   /** Default parameter value. */
   public static final String DEFAULT = "";
 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Sep 29 00:33:34 2011
@@ -683,4 +683,24 @@ creations/deletions), or "all".</descrip
   </description>
 </property>
 
+<property>
+  <name>dfs.web.authentication.kerberos.principal</name>
+  <value>HTTP/${dfs.web.hostname}@${kerberos.realm}</value>
+  <description>
+    The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+    The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+    HTTP SPENGO specification.
+  </description>
+</property>
+
+<property>
+  <name>dfs.web.authentication.kerberos.keytab</name>
+  <value>${user.home}/dfs.web.keytab</value>
+  <description>
+    The Kerberos keytab file with the credentials for the
+    HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Thu Sep 29 00:33:34 2011
@@ -72,7 +72,6 @@ public class TestDFSPermission extends T
   final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
 
   private FileSystem fs;
-  private MiniDFSCluster cluster;
   private static Random r;
 
   static {
@@ -106,25 +105,18 @@ public class TestDFSPermission extends T
     }
   }
 
-  @Override
-  public void setUp() throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    cluster.waitActive();
-  }
-  
-  @Override
-  public void tearDown() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-  
   /** This tests if permission setting in create, mkdir, and 
    * setPermission works correctly
    */
   public void testPermissionSetting() throws Exception {
-    testPermissionSetting(OpType.CREATE); // test file creation
-    testPermissionSetting(OpType.MKDIRS); // test directory creation
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    try {
+      cluster.waitActive();
+      testPermissionSetting(OpType.CREATE); // test file creation
+      testPermissionSetting(OpType.MKDIRS); // test directory creation
+    } finally {
+      cluster.shutdown();
+    }
   }
 
   private void initFileSystem(short umask) throws Exception {
@@ -253,22 +245,17 @@ public class TestDFSPermission extends T
     }
   }
 
-  /**
-   * check that ImmutableFsPermission can be used as the argument
-   * to setPermission
-   */
-  public void testImmutableFsPermission() throws IOException {
-    fs = FileSystem.get(conf);
-
-    // set the permission of the root to be world-wide rwx
-    fs.setPermission(new Path("/"),
-        FsPermission.createImmutable((short)0777));
-  }
-  
   /* check if the ownership of a file/directory is set correctly */
   public void testOwnership() throws Exception {
-    testOwnership(OpType.CREATE); // test file creation
-    testOwnership(OpType.MKDIRS); // test directory creation
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    try {
+      cluster.waitActive();
+      testOwnership(OpType.CREATE); // test file creation
+      testOwnership(OpType.MKDIRS); // test directory creation
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
   }
 
   /* change a file/directory's owner and group.
@@ -355,7 +342,9 @@ public class TestDFSPermission extends T
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
   public void testPermissionChecking() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
+      cluster.waitActive();
       fs = FileSystem.get(conf);
 
       // set the permission of the root to be world-wide rwx
@@ -412,6 +401,7 @@ public class TestDFSPermission extends T
           parentPermissions, permissions, parentPaths, filePaths, dirPaths);
     } finally {
       fs.close();
+      cluster.shutdown();
     }
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Sep 29 00:33:34 2011
@@ -29,7 +29,8 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.hadoop.HadoopIllegalArgumentException;
+import junit.framework.Assert;
+
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -39,7 +40,8 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+
 
 public class TestDFSUtil {
   /**
@@ -74,141 +76,79 @@ public class TestDFSUtil {
       }
     }
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount,
-        corruptCount == 1);
-
+    assertTrue("expected 1 corrupt files but got " + corruptCount, 
+               corruptCount == 1);
+    
     // test an empty location
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
   }
 
-
-  private Configuration setupAddress(String key) {
-    HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
-    return conf;
-  }
-
-  /**
-   * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
-   * nameserviceId from the configuration returned
-   */
-  @Test
-  public void getNameServiceId() {
-    HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
-  }
-  
-  /**
-   * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
-   * nameserviceId for namenode is determined based on matching the address with
-   * local node's address
-   */
-  @Test
-  public void getNameNodeNameServiceId() {
-    Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
-  }
-
-  /**
-   * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
-   * nameserviceId for backup node is determined based on matching the address
-   * with local node's address
-   */
-  @Test
-  public void getBackupNameServiceId() {
-    Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
-    assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
-  }
-
-  /**
-   * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
-   * nameserviceId for backup node is determined based on matching the address
-   * with local node's address
-   */
-  @Test
-  public void getSecondaryNameServiceId() {
-    Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
-    assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
-  }
-
-  /**
-   * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
-   * exception is thrown when multiple rpc addresses match the local node's
-   * address
-   */
-  @Test(expected = HadoopIllegalArgumentException.class)
-  public void testGetNameServiceIdException() {
-    HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
-        "localhost:9000");
-    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
-        "localhost:9001");
-    DFSUtil.getNamenodeNameServiceId(conf);
-    fail("Expected exception is not thrown");
-  }
-
-  /**
-   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+  /** 
+   * Test for
+   * {@link DFSUtil#getNameServiceIds(Configuration)}
+   * {@link DFSUtil#getNameServiceId(Configuration)}
+   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
    */
   @Test
-  public void testGetNameServiceIds() {
+  public void testMultipleNamenodes() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    
+    // Test - The configured nameserviceIds are returned
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
-  }
-
-  /**
-   * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
-   * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
-   * (Configuration)}
-   */
-  @Test
-  public void testMultipleNamenodes() throws IOException {
-    HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    
+    // Tests default nameserviceId is returned
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
+    
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN3_ADDRESS = "localhost:9002";
-    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
-        NN1_ADDRESS);
-    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
-        NN2_ADDRESS);
-
-    Collection<InetSocketAddress> nnAddresses = DFSUtil
-        .getNNServiceRpcAddresses(conf);
+    conf.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
+    
+    Collection<InetSocketAddress> nnAddresses = 
+      DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(2, nnAddresses.size());
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
+    assertEquals(2, nameserviceIds.size());
     InetSocketAddress addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9000, addr.getPort());
     addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9001, addr.getPort());
-
+    
     // Test - can look up nameservice ID from service address
-    checkNameServiceId(conf, NN1_ADDRESS, "nn1");
-    checkNameServiceId(conf, NN2_ADDRESS, "nn2");
-    checkNameServiceId(conf, NN3_ADDRESS, null);
-  }
-
-  public void checkNameServiceId(Configuration conf, String addr,
-      String expectedNameServiceId) {
-    InetSocketAddress s = NetUtils.createSocketAddr(addr);
-    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals(expectedNameServiceId, nameserviceId);
+    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress1,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", nameserviceId);
+    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
+    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress2,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn2", nameserviceId);
+    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
+    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
+        conf, testAddress3,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertNull(nameserviceId);
   }
-
-  /**
+  
+  /** 
    * Test for
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    */
@@ -217,25 +157,27 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
-    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+    
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertTrue(isDefault);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertFalse(isDefault);
   }
-
+  
   /** Tests to ensure default namenode is used as fallback */
   @Test
   public void testDefaultNamenode() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String hdfs_default = "hdfs://localhost:9999/";
-    conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
-    // If DFS_FEDERATION_NAMESERVICES is not set, verify that
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
     // default namenode address is returned.
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(1, addrList.size());
@@ -249,26 +191,26 @@ public class TestDFSUtil {
   @Test
   public void testConfModification() throws IOException {
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
-
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNameServiceId(conf);
+    
     // Set the nameservice specific keys with nameserviceId in the config key
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
     }
-
+    
     // Initialize generic keys from specific keys
-    NameNode.initializeGenericKeys(conf, nameserviceId);
-
+    NameNode.initializeGenericKeys(conf);
+    
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
-
+  
   /**
    * Tests for empty configuration, an exception is thrown from
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -296,16 +238,16 @@ public class TestDFSUtil {
     } catch (IOException expected) {
     }
   }
-
+  
   @Test
-  public void testGetServerInfo() {
+  public void testGetServerInfo(){
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    assertEquals("0.0.0.0:50470", httpsport);
+    Assert.assertEquals("0.0.0.0:50470", httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    assertEquals("0.0.0.0:50070", httpport);
+    Assert.assertEquals("0.0.0.0:50070", httpport);
   }
 
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Thu Sep 29 00:33:34 2011
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
@@ -28,15 +24,17 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+
 import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** A class for testing quota-related commands */
 public class TestQuota {
@@ -843,14 +841,6 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     try {
-      
-      //Test for deafult NameSpace Quota
-      long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
-          .getNamesystem());
-      assertTrue(
-          "Default namespace quota expected as long max. But the value is :"
-              + nsQuota, nsQuota == Long.MAX_VALUE);
-      
       Path dir = new Path("/test");
       boolean exceededQuota = false;
       ContentSummary c;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Thu Sep 29 00:33:34 2011
@@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,16 +38,12 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.apache.log4j.Level;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -60,13 +56,12 @@ public class TestDelegationToken {
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
-    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.set("hadoop.security.auth_to_local",
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
     dtSecretManager = NameNodeAdapter.getDtSecretManager(
         cluster.getNamesystem());
@@ -159,31 +154,6 @@ public class TestDelegationToken {
   }
   
   @Test
-  public void testDelegationTokenWebHdfsApi() throws Exception {
-    ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
-    final String uri = WebHdfsFileSystem.SCHEME  + "://"
-        + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    //get file system as JobTracker
-    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
-        "JobTracker", new String[]{"user"});
-    final WebHdfsFileSystem webhdfs = ugi.doAs(
-        new PrivilegedExceptionAction<WebHdfsFileSystem>() {
-      @Override
-      public WebHdfsFileSystem run() throws Exception {
-        return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
-      }
-    });
-
-    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
-    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-    byte[] tokenId = token.getIdentifier();
-    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
-    LOG.info("A valid token should have non-null password, and should be renewed successfully");
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
-    dtSecretManager.renewToken(token, "JobTracker");
-  }
-
-  @Test
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     final Token<DelegationTokenIdentifier> token = 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java Thu Sep 29 00:33:34 2011
@@ -18,34 +18,31 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import junit.framework.TestCase;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.junit.Before;
-import org.junit.Test;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
 
-public class TestHost2NodesMap {
-  private Host2NodesMap map = new Host2NodesMap();
-  private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+public class TestHost2NodesMap extends TestCase {
+  static private Host2NodesMap map = new Host2NodesMap();
+  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
     new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
   };
-  private final DatanodeDescriptor NULL_NODE = null; 
-  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
-      "/d1/r4");
+  private final static DatanodeDescriptor NULL_NODE = null; 
+  private final static DatanodeDescriptor NODE = 
+    new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
 
-  @Before
-  public void setup() {
+  static {
     for(DatanodeDescriptor node:dataNodes) {
       map.add(node);
     }
     map.add(NULL_NODE);
   }
   
-  @Test
   public void testContains() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
       assertTrue(map.contains(dataNodes[i]));
@@ -54,7 +51,6 @@ public class TestHost2NodesMap {
     assertFalse(map.contains(NODE));
   }
 
-  @Test
   public void testGetDatanodeByHost() throws Exception {
     assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
     assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@@ -63,7 +59,6 @@ public class TestHost2NodesMap {
     assertTrue(null==map.getDatanodeByHost("h4"));
   }
 
-  @Test
   public void testGetDatanodeByName() throws Exception {
     assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
     assertTrue(map.getDatanodeByName("h1:5030")==null);
@@ -76,7 +71,6 @@ public class TestHost2NodesMap {
     assertTrue(map.getDatanodeByName(null)==null);
   }
 
-  @Test
   public void testRemove() throws Exception {
     assertFalse(map.remove(NODE));
     

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java Thu Sep 29 00:33:34 2011
@@ -96,8 +96,7 @@ public class TestMulitipleNNDataBlockSca
 
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       for (int i = 0; i < 2; i++) {
-        String nsId = DFSUtil.getNamenodeNameServiceId(cluster
-            .getConfiguration(i));
+        String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(",");
       }
@@ -117,7 +116,7 @@ public class TestMulitipleNNDataBlockSca
         LOG.info(ex.getMessage());
       }
 
-      namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
+      namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
           .getConfiguration(2)));
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
           .toString());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java Thu Sep 29 00:33:34 2011
@@ -17,24 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.junit.Before;
+import static org.junit.Assert.*;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
  * Unit test for ReplicasMap class
  */
 public class TestReplicasMap {
-  private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
-  private final String bpid = "BP-TEST";
-  private final  Block block = new Block(1234, 1234, 1234);
+  private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
+  private static final String bpid = "BP-TEST";
+  private static final  Block block = new Block(1234, 1234, 1234);
   
-  @Before
-  public void setup() {
+  @BeforeClass
+  public static void setup() {
     map.add(bpid, new FinalizedReplica(block, null, null));
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu Sep 29 00:33:34 2011
@@ -412,11 +412,4 @@ public abstract class FSImageTestUtil {
   public static FSImage getFSImage(NameNode node) {
     return node.getFSImage();
   }
-
-  /**
-   * get NameSpace quota.
-   */
-  public static long getNSQuota(FSNamesystem ns) {
-    return ns.dir.rootDir.getNsQuota();
-  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Thu Sep 29 00:33:34 2011
@@ -18,23 +18,17 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import java.io.BufferedReader;
 import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.HttpURLConnection;
 import java.net.URI;
-import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -120,42 +114,4 @@ public class TestWebHdfsFileSystemContra
       // also okay for HDFS.
     }    
   }
-  
-  public void testGetFileBlockLocations() throws IOException {
-    final String f = "/test/testGetFileBlockLocations";
-    createFile(path(f));
-    final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
-    final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
-        new Path(f), 0L, 1L);
-    assertEquals(expected.length, computed.length);
-    for(int i = 0; i < computed.length; i++) {
-      assertEquals(expected[i].toString(), computed[i].toString());
-    }
-  }
-
-  public void testCaseInsensitive() throws IOException {
-    final Path p = new Path("/test/testCaseInsensitive");
-    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
-    final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
-
-    //replace query with mix case letters
-    final URL url = webhdfs.toUrl(op, p);
-    WebHdfsFileSystem.LOG.info("url      = " + url);
-    final URL replaced = new URL(url.toString().replace(op.toQueryString(),
-        "Op=mkDIrs"));
-    WebHdfsFileSystem.LOG.info("replaced = " + replaced);
-
-    //connect with the replaced URL.
-    final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
-    conn.setRequestMethod(op.getType().toString());
-    conn.connect();
-    final BufferedReader in = new BufferedReader(new InputStreamReader(
-        conn.getInputStream()));
-    for(String line; (line = in.readLine()) != null; ) {
-      WebHdfsFileSystem.LOG.info("> " + line);
-    }
-
-    //check if the command successes.
-    assertTrue(fs.getFileStatus(p).isDirectory());
-  }
 }



Mime
View raw message