hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vvasu...@apache.org
Subject [03/50] [abbrv] hadoop git commit: HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)
Date Wed, 16 Nov 2016 13:46:14 GMT
HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca68f9cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca68f9cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca68f9cb

Branch: refs/heads/YARN-3926
Commit: ca68f9cb5bc78e996c0daf8024cf0e7a4faef12a
Parents: 86ac1ad
Author: Vinayakumar B <vinayakumarb@apache.org>
Authored: Thu Nov 10 16:51:33 2016 +0530
Committer: Vinayakumar B <vinayakumarb@apache.org>
Committed: Thu Nov 10 16:51:33 2016 +0530

----------------------------------------------------------------------
 .../web/resources/NamenodeWebHdfsMethods.java   | 31 +++++++++++++++++---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md    |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |  3 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 21 ++++++++++++-
 4 files changed, 51 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 15195e0..5d9b12a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -424,6 +424,18 @@ public class NamenodeWebHdfsMethods {
         excludeDatanodes, createFlagParam, noredirect);
   }
 
+  /** Validate all required params. */
+  @SuppressWarnings("rawtypes")
+  private void validateOpParams(HttpOpParam<?> op, Param... params) {
+    for (Param param : params) {
+      if (param.getValue() == null || param.getValueString() == null || param
+          .getValueString().isEmpty()) {
+        throw new IllegalArgumentException("Required param " + param.getName()
+            + " for op: " + op.getValueString() + " is null or empty");
+      }
+    }
+  }
+
   /** Handle HTTP PUT request. */
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
@@ -576,6 +588,7 @@ public class NamenodeWebHdfsMethods {
     }
     case CREATESYMLINK:
     {
+      validateOpParams(op, destination);
       np.createSymlink(destination.getValue(), fullpath,
           PermissionParam.getDefaultSymLinkFsPermission(),
           createParent.getValue());
@@ -583,6 +596,7 @@ public class NamenodeWebHdfsMethods {
     }
     case RENAME:
     {
+      validateOpParams(op, destination);
       final EnumSet<Options.Rename> s = renameOptions.getValue();
       if (s.isEmpty()) {
         final boolean b = np.rename(fullpath, destination.getValue());
@@ -621,6 +635,7 @@ public class NamenodeWebHdfsMethods {
     }
     case RENEWDELEGATIONTOKEN:
     {
+      validateOpParams(op, delegationTokenArgument);
       final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
       token.decodeFromUrlString(delegationTokenArgument.getValue());
       final long expiryTime = np.renewDelegationToken(token);
@@ -629,16 +644,19 @@ public class NamenodeWebHdfsMethods {
     }
     case CANCELDELEGATIONTOKEN:
     {
+      validateOpParams(op, delegationTokenArgument);
       final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
       token.decodeFromUrlString(delegationTokenArgument.getValue());
       np.cancelDelegationToken(token);
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case MODIFYACLENTRIES: {
+      validateOpParams(op, aclPermission);
       np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case REMOVEACLENTRIES: {
+      validateOpParams(op, aclPermission);
       np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
@@ -651,10 +669,12 @@ public class NamenodeWebHdfsMethods {
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case SETACL: {
+      validateOpParams(op, aclPermission);
       np.setAcl(fullpath, aclPermission.getAclPermission(true));
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case SETXATTR: {
+      validateOpParams(op, xattrName, xattrSetFlag);
       np.setXAttr(
           fullpath,
           XAttrHelper.buildXAttr(xattrName.getXAttrName(),
@@ -662,6 +682,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case REMOVEXATTR: {
+      validateOpParams(op, xattrName);
       np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
@@ -676,6 +697,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case RENAMESNAPSHOT: {
+      validateOpParams(op, oldSnapshotName, snapshotName);
       np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
           snapshotName.getValue());
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
@@ -794,15 +816,13 @@ public class NamenodeWebHdfsMethods {
     }
     case CONCAT:
     {
+      validateOpParams(op, concatSrcs);
       np.concat(fullpath, concatSrcs.getAbsolutePaths());
       return Response.ok().build();
     }
     case TRUNCATE:
     {
-      if (newLength.getValue() == null) {
-        throw new IllegalArgumentException(
-            "newLength parameter is Missing");
-      }
+      validateOpParams(op, newLength);
       // We treat each rest request as a separate client.
       final boolean b = np.truncate(fullpath, newLength.getValue(),
           "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
@@ -1033,6 +1053,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case GETXATTRS: {
+      validateOpParams(op, xattrEncoding);
       List<String> names = null;
       if (xattrNames != null) {
         names = Lists.newArrayListWithCapacity(xattrNames.size());
@@ -1054,6 +1075,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case CHECKACCESS: {
+      validateOpParams(op, fsAction);
       np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
       return Response.ok().build();
     }
@@ -1222,6 +1244,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case DELETESNAPSHOT: {
+      validateOpParams(op, snapshotName);
       np.deleteSnapshot(fullpath, snapshotName.getValue());
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 1d7d704..eda1350 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -1203,7 +1203,8 @@ Delegation Token Operations
 
 * Submit a HTTP GET request.
 
-        curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>&service=<SERVICE>&kind=<KIND>"
+        curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN
+                    [&renewer=<USER>][&service=<SERVICE>][&kind=<KIND>]"
 
     The client receives a response with a [`Token` JSON object](#Token_JSON_Schema):
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index c526484..8aa5dc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -287,7 +287,8 @@ public class FSXAttrBaseTest {
     } catch (NullPointerException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
     } catch (RemoteException e) {
-      GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
+      GenericTestUtils.assertExceptionContains("Required param xattr.name for "
+          + "op: SETXATTR is null or empty", e);
     }
     
     // Set xattr with empty name: "user."

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 23d543d..5386a45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -527,6 +528,15 @@ public class TestWebHDFS {
       final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
       Assert.assertTrue(webHdfs.exists(s1path));
 
+      // delete operation snapshot name as null
+      try {
+        webHdfs.deleteSnapshot(foo, null);
+        fail("Expected IllegalArgumentException");
+      } catch (RemoteException e) {
+        Assert.assertEquals("Required param snapshotname for "
+            + "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
+      }
+
       // delete the two snapshots
       webHdfs.deleteSnapshot(foo, "s1");
       assertFalse(webHdfs.exists(s1path));
@@ -585,6 +595,15 @@ public class TestWebHDFS {
       final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
       Assert.assertTrue(webHdfs.exists(s1path));
 
+      // rename s1 to s2 with oldsnapshotName as null
+      try {
+        webHdfs.renameSnapshot(foo, null, "s2");
+        fail("Expected IllegalArgumentException");
+      } catch (RemoteException e) {
+        Assert.assertEquals("Required param oldsnapshotname for "
+            + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
+      }
+
       // rename s1 to s2
       webHdfs.renameSnapshot(foo, "s1", "s2");
       assertFalse(webHdfs.exists(s1path));
@@ -643,7 +662,7 @@ public class TestWebHDFS {
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
-          WebHdfsConstants.WEBHDFS_SCHEME);
+            WebHdfsConstants.WEBHDFS_SCHEME);
       Assert.assertNull(webHdfs.getDelegationToken(null));
     } finally {
       if (cluster != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message