hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1462652 [6/6] - in /hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs: ./ src/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/libwebhdfs/ src/contrib/libwebhdfs/src/ src/main/bin/ src/mai...
Date Fri, 29 Mar 2013 21:33:37 GMT
Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1462652&r1=1462651&r2=1462652&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
(original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
Fri Mar 29 21:33:35 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -26,78 +27,270 @@ import static org.mockito.Mockito.mock;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URL;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.web.resources.DelegationParam;
-import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
-import org.apache.hadoop.hdfs.web.resources.PutOpParam;
-import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
+import org.apache.hadoop.hdfs.web.resources.*;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.*;
 
 public class TestWebHdfsUrl {
+  // NOTE: port is never used 
+  final URI uri = URI.create(WebHdfsFileSystem.SCHEME + "://" + "127.0.0.1:0");
 
-  @Test
-  public void testDelegationTokenInUrl() throws IOException {
+  @Before
+  public void resetUGI() {
+    UserGroupInformation.setConfiguration(new Configuration());
+  }
+  
+  @Test(timeout=4000)
+  public void testSimpleAuthParamsInUrl() throws IOException {
+    Configuration conf = new Configuration();
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+    Path fsPath = new Path("/");
+
+    // send user+token
+    URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString()
+        },
+        fileStatusUrl);
+  }
+
+  @Test(timeout=4000)
+  public void testSimpleProxyAuthParamsInUrl() throws IOException {
+    Configuration conf = new Configuration();
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+    Path fsPath = new Path("/");
+
+    // send real+effective
+    URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString()
+    },
+        fileStatusUrl);
+  }
+
+  @Test(timeout=4000)
+  public void testSecureAuthParamsInUrl() throws IOException {
+    Configuration conf = new Configuration();
+    // fake turning on security so api thinks it should use tokens
+    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    ugi.setAuthenticationMethod(KERBEROS);
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+    Path fsPath = new Path("/");
+    String tokenString = webhdfs.getDelegationToken().encodeToUrlString();
+
+    // send user
+    URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString()
+        },
+        getTokenUrl);
+
+    // send user
+    URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
+        fsPath, new TokenArgumentParam(tokenString));
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString(),
+        },
+        renewTokenUrl);
+
+    // send token
+    URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
+        fsPath, new TokenArgumentParam(tokenString));
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString(),
+        },
+        cancelTokenUrl);
+    
+    // send token
+    URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new DelegationParam(tokenString).toString()
+        },
+        fileStatusUrl);
+
+    // wipe out internal token to simulate auth always required
+    webhdfs.setDelegationToken(null);
+
+    // send user
+    cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
+        fsPath, new TokenArgumentParam(tokenString));
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString(),
+        },
+        cancelTokenUrl);
+
+    // send user
+    fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString()
+        },
+        fileStatusUrl);    
+  }
+
+  @Test(timeout=4000)
+  public void testSecureProxyAuthParamsInUrl() throws IOException {
     Configuration conf = new Configuration();
-    final String uri = WebHdfsFileSystem.SCHEME + "://" + "127.0.0.1:9071";
-    // Turn on security
-    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    // fake turning on security so api thinks it should use tokens
+    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
-        ugi.getUserName()), null, null);
-    FSNamesystem namesystem = mock(FSNamesystem.class);
-    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
-        86400000, 86400000, 86400000, 86400000, namesystem);
-    dtSecretManager.startThreads();
-    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
-        dtId, dtSecretManager);
-    token.setService(new Text("127.0.0.1:9071"));
-    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
-    ugi.addToken(token);
-    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(
-        URI.create(uri), conf);
-    String tokenString = token.encodeToUrlString();
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    ugi.setAuthenticationMethod(KERBEROS);
+    ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
     Path fsPath = new Path("/");
+    String tokenString = webhdfs.getDelegationToken().encodeToUrlString();
+
+    // send real+effective
+    URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString()
+        },
+        getTokenUrl);
+
+    // send real+effective
     URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString(),
+        },
+        renewTokenUrl);
+
+    // send token
     URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    Assert.assertEquals(
-        generateUrlQueryPrefix(PutOpParam.Op.RENEWDELEGATIONTOKEN,
-            ugi.getUserName())
-            + "&token=" + tokenString, renewTokenUrl.getQuery());
-    Token<DelegationTokenIdentifier> delegationToken = new Token<DelegationTokenIdentifier>(
-        token);
-    delegationToken.setKind(WebHdfsFileSystem.TOKEN_KIND);
-    Assert.assertEquals(
-        generateUrlQueryPrefix(PutOpParam.Op.CANCELDELEGATIONTOKEN,
-            ugi.getUserName())
-            + "&token="
-            + tokenString
-            + "&"
-            + DelegationParam.NAME
-            + "="
-            + delegationToken.encodeToUrlString(), cancelTokenUrl.getQuery());
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString(),
+        },
+        cancelTokenUrl);
+    
+    // send token
+    URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new DelegationParam(tokenString).toString()
+        },
+        fileStatusUrl);
+
+    // wipe out internal token to simulate auth always required
+    webhdfs.setDelegationToken(null);
+    
+    // send real+effective
+    cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
+        fsPath, new TokenArgumentParam(tokenString));
+    checkQueryParams(
+        new String[]{
+            PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString(),
+            new TokenArgumentParam(tokenString).toString()
+        },
+        cancelTokenUrl);
+    
+    // send real+effective
+    fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.GETFILESTATUS.toQueryString(),
+            new UserParam(ugi.getRealUser().getShortUserName()).toString(),
+            new DoAsParam(ugi.getShortUserName()).toString()
+        },
+        fileStatusUrl);    
+  }
+  
+  private void checkQueryParams(String[] expected, URL url) {
+    Arrays.sort(expected);
+    String[] query = url.getQuery().split("&");
+    Arrays.sort(query);
+    assertEquals(Arrays.toString(expected), Arrays.toString(query));
   }
 
-  private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) {
-    return "op=" + op.toString() + "&user.name=" + username;
+  private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
+      Configuration conf) throws IOException {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
+          ugi.getUserName()), null, null);
+      FSNamesystem namesystem = mock(FSNamesystem.class);
+      DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
+          86400000, 86400000, 86400000, 86400000, namesystem);
+      dtSecretManager.startThreads();
+      Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
+          dtId, dtSecretManager);
+      SecurityUtil.setTokenService(
+          token, NetUtils.createSocketAddr(uri.getAuthority()));
+      token.setKind(WebHdfsFileSystem.TOKEN_KIND);
+      ugi.addToken(token);
+    }
+    return (WebHdfsFileSystem) FileSystem.get(uri, conf);
   }
   
-  @Test
+  @Test(timeout=4000)
   public void testSelectHdfsDelegationToken() throws Exception {
     SecurityUtilTestHelper.setTokenServiceUseIp(true);
 
@@ -186,8 +379,5 @@ public class TestWebHdfsUrl {
     public int getDefaultPort() {
       return super.getDefaultPort();
     }
-    // don't automatically get a token
-    @Override
-    protected void initDelegationToken() throws IOException {}
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1462652&r1=1462651&r2=1462652&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
(original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
Fri Mar 29 21:33:35 2013
@@ -17,18 +17,22 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.util.Arrays;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
 public class TestParam {
   public static final Log LOG = LogFactory.getLog(TestParam.class);
 
@@ -265,4 +269,20 @@ public class TestParam {
     UserParam userParam = new UserParam("a$");
     assertNotNull(userParam.getValue());
   }
+  
+  @Test
+  public void testConcatSourcesParam() {
+    final String[] strings = {"/", "/foo", "/bar"};
+    for(int n = 0; n < strings.length; n++) {
+      final String[] sub = new String[n]; 
+      final Path[] paths = new Path[n];
+      for(int i = 0; i < paths.length; i++) {
+        paths[i] = new Path(sub[i] = strings[i]);
+      }
+
+      final String expected = StringUtils.join(",", Arrays.asList(sub));
+      final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
+      Assert.assertEquals(expected, computed.getValue());
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1462652&r1=1462651&r2=1462652&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
(original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
Fri Mar 29 21:33:35 2013
@@ -26,12 +26,23 @@ import static org.junit.Assert.fail;
 import java.util.HashMap;
 import java.util.Map;
 
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestNetworkTopology {
+  private static final Log LOG = LogFactory.getLog(TestNetworkTopology.class);
   private final static NetworkTopology cluster = new NetworkTopology();
   private DatanodeDescriptor dataNodes[];
   
@@ -213,4 +224,65 @@ public class TestNetworkTopology {
       }
     }
   }
+
+  @Test(timeout=180000)
+  public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
+    // start a cluster
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      // bad rack topology
+      String racks[] = { "/a/b", "/c" };
+      String hosts[] = { "foo1.example.com", "foo2.example.com" };
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).
+          racks(racks).hosts(hosts).build();
+      cluster.waitActive();
+      
+      NamenodeProtocols nn = cluster.getNameNodeRpc();
+      Assert.assertNotNull(nn);
+      
+      // Wait for one DataNode to register.
+      // The other DataNode will not be able to register up because of the rack mismatch.
+      DatanodeInfo[] info;
+      while (true) {
+        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
+        Assert.assertFalse(info.length == 2);
+        if (info.length == 1) {
+          break;
+        }
+        Thread.sleep(1000);
+      }
+      // Set the network topology of the other node to the match the network
+      // topology of the node that came up.
+      int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
+      int invalidIdx = validIdx == 1 ? 0 : 1;
+      StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
+      LOG.info("datanode " + validIdx + " came up with network location " + 
+        info[0].getNetworkLocation());
+
+      // Restart the DN with the invalid topology and wait for it to register.
+      cluster.restartDataNode(invalidIdx);
+      Thread.sleep(5000);
+      while (true) {
+        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
+        if (info.length == 2) {
+          break;
+        }
+        if (info.length == 0) {
+          LOG.info("got no valid DNs");
+        } else if (info.length == 1) {
+          LOG.info("got one valid DN: " + info[0].getHostName() +
+              " (at " + info[0].getNetworkLocation() + ")");
+        }
+        Thread.sleep(1000);
+      }
+      Assert.assertEquals(info[0].getNetworkLocation(),
+                          info[1].getNetworkLocation());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1462652&r1=1462651&r2=1462652&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
(original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
Fri Mar 29 21:33:35 2013
@@ -5457,6 +5457,39 @@
         </comparator>
      </comparators>
     </test>
+
+    <test> <!-- TESTED-->
+      <description>checksum: checksum of files(relative path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes dir0/data120bytes</command>
+        <command>-fs NAMENODE -checksum dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^dir0/data120bytes\tMD5-of-0MD5-of-512CRC32C\t000002000000000000000000a58cdc3c0967fc8cddb7fed5960d06f2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^dir0/data15bytes\tMD5-of-0MD5-of-512CRC32C\t0000020000000000000000007267e9528002723a30939aefc238d665</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^dir0/data30bytes\tMD5-of-0MD5-of-512CRC32C\t000002000000000000000000fc09371298117c4943cf089b4bd79c96</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^dir0/data60bytes\tMD5-of-0MD5-of-512CRC32C\t000002000000000000000000009476431d851dd7b0a8d057a404d7b9</expected-output>
+        </comparator>
+      </comparators>
+    </test>
     
     <!-- Tests for moveToLocal -->
     <!-- Not yet implemented -->



Mime
View raw message