drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ve...@apache.org
Subject [4/4] drill git commit: DRILL-2514: Part2 - Add impersonation tests using Hadoop MiniDFSCluster.
Date Tue, 21 Apr 2015 22:21:39 GMT
DRILL-2514: Part2 - Add impersonation tests using Hadoop MiniDFSCluster.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/2a484251
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/2a484251
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/2a484251

Branch: refs/heads/master
Commit: 2a484251be48b0443318626b1364044db5473124
Parents: 40c9040
Author: vkorukanti <venki.korukanti@gmail.com>
Authored: Tue Mar 24 15:12:01 2015 -0700
Committer: vkorukanti <venki.korukanti@gmail.com>
Committed: Tue Apr 21 13:16:01 2015 -0700

----------------------------------------------------------------------
 exec/java-exec/pom.xml                          |  12 +
 .../java/org/apache/drill/BaseTestQuery.java    |  12 +
 .../impersonation/BaseTestImpersonation.java    |  89 ++++++
 .../TestImpersonationMetadata.java              | 300 ++++++++++++++++++
 .../impersonation/TestImpersonationQueries.java | 304 +++++++++++++++++++
 pom.xml                                         | 121 +++++++-
 6 files changed, 837 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/exec/java-exec/pom.xml
----------------------------------------------------------------------
diff --git a/exec/java-exec/pom.xml b/exec/java-exec/pom.xml
index f5313ca..82426ef 100644
--- a/exec/java-exec/pom.xml
+++ b/exec/java-exec/pom.xml
@@ -351,6 +351,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
     </dependency>
     <dependency>
@@ -363,6 +369,12 @@
       <artifactId>avro-mapred</artifactId>
       <version>1.7.7</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <classifier>tests</classifier>
+    </dependency>
   </dependencies>
 
   <profiles>

http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java b/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
index 2ff4de7..b02051b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
@@ -186,6 +186,7 @@ public class BaseTestQuery extends ExecTest {
    * @param properties
    */
   public static void updateClient(Properties properties) throws Exception {
+    Preconditions.checkState(bits != null && bits[0] != null, "Drillbits are not
setup.");
     if (client != null) {
       client.close();
       client = null;
@@ -194,6 +195,17 @@ public class BaseTestQuery extends ExecTest {
     client = QueryTestUtil.createClient(config, serviceSet, MAX_WIDTH_PER_NODE, properties);
   }
 
+  /*
+   * Close the current <i>client</i> and open a new client for the given user.
All tests executed
+   * after this method call use the new <i>client</i>.
+   * @param user
+   */
+  public static void updateClient(String user) throws Exception {
+    final Properties props = new Properties();
+    props.setProperty("user", user);
+    updateClient(props);
+  }
+
   protected static BufferAllocator getAllocator() {
     return allocator;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
new file mode 100644
index 0000000..274f5f7
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/BaseTestImpersonation.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.impersonation;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import org.apache.commons.io.FileUtils;
+import org.apache.drill.PlanTestBase;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.store.dfs.WorkspaceConfig;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+import java.io.File;
+import java.util.Map;
+import java.util.Properties;
+
+public class BaseTestImpersonation extends PlanTestBase {
+  protected static final String processUser = System.getProperty("user.name");
+
+  protected static MiniDFSCluster dfsCluster;
+  protected static Configuration conf;
+  protected static String miniDfsStoragePath;
+
+  protected static void startMiniDfsCluster(String testClass) throws Exception {
+    Preconditions.checkArgument(!Strings.isNullOrEmpty(testClass), "Expected a non-null and
non-empty test class name");
+    conf = new Configuration();
+
+    // Set the MiniDfs base dir to be the temp directory of the test, so that all files created
within the MiniDfs
+    // are properly cleanup when test exits.
+    miniDfsStoragePath = System.getProperty("java.io.tmpdir") + Path.SEPARATOR + testClass;
+    conf.set("hdfs.minidfs.basedir", miniDfsStoragePath);
+
+    // Set the proxyuser settings so that the user who is running the Drillbits/MiniDfs can
impersonate other users.
+    conf.set(String.format("hadoop.proxyuser.%s.hosts", processUser), "*");
+    conf.set(String.format("hadoop.proxyuser.%s.groups", processUser), "*");
+
+    // Start the MiniDfs cluster
+    dfsCluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .format(true)
+        .build();
+
+    final Properties props = cloneDefaultTestConfigProperties();
+    props.setProperty(ExecConstants.IMPERSONATION_ENABLED, "true");
+
+    updateTestCluster(1, DrillConfig.create(props));
+  }
+
+  protected static void createAndAddWorkspace(FileSystem fs, String name, String path, short
permissions, String owner,
+      String group,
+      Map<String, WorkspaceConfig> workspaces) throws Exception {
+    final Path dirPath = new Path(path);
+    FileSystem.mkdirs(fs, dirPath, new FsPermission(permissions));
+    fs.setOwner(dirPath, owner, group);
+    final WorkspaceConfig ws = new WorkspaceConfig(path, true, "parquet");
+    workspaces.put(name, ws);
+  }
+
+  protected static void stopMiniDfsCluster() throws Exception {
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+      dfsCluster = null;
+    }
+
+    if (miniDfsStoragePath != null) {
+      FileUtils.deleteQuietly(new File(miniDfsStoragePath));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
new file mode 100644
index 0000000..411660f
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationMetadata.java
@@ -0,0 +1,300 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.impersonation;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.drill.exec.store.dfs.FileSystemConfig;
+import org.apache.drill.exec.store.dfs.WorkspaceConfig;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.core.StringContains.containsString;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Tests impersonation on metadata related queries as SHOW FILES, SHOW TABLES, CREATE VIEW
and CREATE TABLE
+ */
+public class TestImpersonationMetadata extends BaseTestImpersonation {
+  private static final String MINIDFS_STORAGE_PLUGIN_NAME = "minidfs" + TestImpersonationMetadata.class.getSimpleName();
+
+  private static final String user1 = "drillTestUser1";
+  private static final String user2 = "drillTestUser2";
+
+  private static final String group0 = "drillTestGrp0";
+  private static final String group1 = "drillTestGrp1";
+
+  static {
+    UserGroupInformation.createUserForTesting(user1, new String[]{ group1, group0 });
+    UserGroupInformation.createUserForTesting(user2, new String[]{ group1 });
+  }
+
+  @BeforeClass
+  public static void addMiniDfsBasedStorage() throws Exception {
+    startMiniDfsCluster(TestImpersonationMetadata.class.getSimpleName());
+
+    final StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
+    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) pluginRegistry.getPlugin("dfs").getConfig();
+
+    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
+    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
+
+    Map<String, WorkspaceConfig> workspaces = Maps.newHashMap(lfsPluginConfig.workspaces);
+
+    createTestWorkspaces(workspaces);
+
+    miniDfsPluginConfig.workspaces = workspaces;
+    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
+    miniDfsPluginConfig.setEnabled(true);
+
+    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, miniDfsPluginConfig, true);
+  }
+
+  private static void createTestWorkspaces(Map<String, WorkspaceConfig> workspaces)
throws Exception {
+    // Create "/tmp" folder and set permissions to "777"
+    final FileSystem fs = dfsCluster.getFileSystem();
+    final Path tmpPath = new Path("/tmp");
+    fs.delete(tmpPath, true);
+    FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));
+
+    // Create /drillTestGrp0_700 directory with permissions 700 (owned by user running the
tests)
+    createAndAddWorkspace(fs, "drillTestGrp0_700", "/drillTestGrp0_700", (short)0700, processUser,
group0, workspaces);
+
+    // Create /drillTestGrp0_750 directory with permissions 750 (owned by user running the
tests)
+    createAndAddWorkspace(fs, "drillTestGrp0_750", "/drillTestGrp0_750", (short)0750, processUser,
group0, workspaces);
+
+    // Create /drillTestGrp0_755 directory with permissions 755 (owned by user running the
tests)
+    createAndAddWorkspace(fs, "drillTestGrp0_755", "/drillTestGrp0_755", (short)0755, processUser,
group0, workspaces);
+
+    // Create /drillTestGrp0_770 directory with permissions 770 (owned by user running the
tests)
+    createAndAddWorkspace(fs, "drillTestGrp0_770", "/drillTestGrp0_770", (short)0770, processUser,
group0, workspaces);
+
+    // Create /drillTestGrp0_777 directory with permissions 777 (owned by user running the
tests)
+    createAndAddWorkspace(fs, "drillTestGrp0_777", "/drillTestGrp0_777", (short)0777, processUser,
group0, workspaces);
+
+    // Create /drillTestGrp1_700 directory with permissions 700 (owned by user1)
+    createAndAddWorkspace(fs, "drillTestGrp1_700", "/drillTestGrp1_700", (short)0700, user1,
group1, workspaces);
+  }
+
+  @Test
+  public void testShowFilesInWSWithUserAndGroupPermissionsForQueryUser() throws Exception
{
+    updateClient(user1);
+
+    // Try show tables in schema "drillTestGrp1_700" which is owned by "user1"
+    test(String.format("SHOW FILES IN %s.drillTestGrp1_700", MINIDFS_STORAGE_PLUGIN_NAME));
+
+    // Try show tables in schema "drillTestGrp0_750" which is owned by "processUser" and
has group permissions for
+    // "user1"
+    test(String.format("SHOW FILES IN %s.drillTestGrp0_750", MINIDFS_STORAGE_PLUGIN_NAME));
+  }
+
+  @Test
+  public void testShowFilesInWSWithOtherPermissionsForQueryUser() throws Exception {
+    updateClient(user2);
+    // Try show tables in schema "drillTestGrp0_755" which is owned by "processUser" and
group0. "user2" is not part
+    // of the "group0"
+    test(String.format("SHOW FILES IN %s.drillTestGrp0_755", MINIDFS_STORAGE_PLUGIN_NAME));
+  }
+
+  @Test
+  public void testShowFilesInWSWithNoPermissionsForQueryUser() throws Exception {
+    UserRemoteException ex = null;
+
+    updateClient(user2);
+    try {
+      // Try show tables in schema "drillTestGrp1_700" which is owned by "user1"
+      test(String.format("SHOW FILES IN %s.drillTestGrp1_700", MINIDFS_STORAGE_PLUGIN_NAME));
+    } catch(UserRemoteException e) {
+      ex = e;
+    }
+
+    assertNotNull("UserRemoteException is expected", ex);
+    assertThat(ex.getMessage(),
+        containsString("Permission denied: user=drillTestUser2, " +
+        "access=READ_EXECUTE, inode=\"/drillTestGrp1_700\":drillTestUser1:drillTestGrp1:drwx------"));
+  }
+
+  @Test
+  public void testShowSchemasSanityCheck() throws Exception {
+    test("SHOW SCHEMAS");
+  }
+
+  @Test
+  public void testCreateViewInDirWithUserPermissionsForQueryUser() throws Exception {
+    final String viewSchema = MINIDFS_STORAGE_PLUGIN_NAME + ".drillTestGrp1_700"; // Workspace
dir owned by "user1"
+    testCreateViewTestHelper(user1, viewSchema, "view1");
+  }
+
+  @Test
+  public void testCreateViewInDirWithGroupPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user1" is part
of "group0"
+    final String viewSchema = MINIDFS_STORAGE_PLUGIN_NAME + ".drillTestGrp0_770";
+    testCreateViewTestHelper(user1, viewSchema, "view1");
+  }
+
+  @Test
+  public void testCreateViewInDirWithOtherPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user2" is not
part of "group0"
+    final String viewSchema = MINIDFS_STORAGE_PLUGIN_NAME + ".drillTestGrp0_777";
+    testCreateViewTestHelper(user2, viewSchema, "view1");
+  }
+
+  private static void testCreateViewTestHelper(String user, String viewSchema,
+      String viewName) throws Exception {
+    try {
+      updateClient(user);
+
+      test("USE " + viewSchema);
+
+      test("CREATE VIEW " + viewName + " AS SELECT " +
+          "c_custkey, c_nationkey FROM cp.`tpch/customer.parquet` ORDER BY c_custkey;");
+
+      testBuilder()
+          .sqlQuery("SHOW TABLES")
+          .unOrdered()
+          .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
+          .baselineValues(viewSchema, viewName)
+          .go();
+
+      test("SHOW FILES");
+
+      testBuilder()
+          .sqlQuery("SELECT * FROM " + viewName + " LIMIT 1")
+          .ordered()
+          .baselineColumns("c_custkey", "c_nationkey")
+          .baselineValues(1, 15)
+          .go();
+
+    } finally {
+      test("DROP VIEW " + viewSchema + "." + viewName);
+    }
+  }
+
+  @Test
+  public void testCreateViewInWSWithNoPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user2" is not
part of "group0"
+    final String viewSchema = MINIDFS_STORAGE_PLUGIN_NAME + ".drillTestGrp0_755";
+    final String viewName = "view1";
+
+    updateClient(user2);
+
+    test("USE " + viewSchema);
+
+    test("CREATE VIEW " + viewName + " AS SELECT " +
+        "c_custkey, c_nationkey FROM cp.`tpch/customer.parquet` ORDER BY c_custkey;");
+
+    // SHOW TABLES is expected to return no records as view creation fails above.
+    testBuilder()
+        .sqlQuery("SHOW TABLES")
+        .expectsEmptyResultSet()
+        .go();
+
+    test("SHOW FILES");
+  }
+
+  @Test
+  public void testCreateTableInDirWithUserPermissionsForQueryUser() throws Exception {
+    final String tableWS = "drillTestGrp1_700"; // Workspace dir owned by "user1"
+    testCreateTableTestHelper(user1, tableWS, "table1");
+  }
+
+  @Test
+  public void testCreateTableInDirWithGroupPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user1" is part
of "group0"
+    final String tableWS = "drillTestGrp0_770";
+    testCreateTableTestHelper(user1, tableWS, "table1");
+  }
+
+  @Test
+  public void testCreateTableInDirWithOtherPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user2" is not
part of "group0"
+    final String tableWS = "drillTestGrp0_777";
+    testCreateTableTestHelper(user2, tableWS, "table1");
+  }
+
+  private static void testCreateTableTestHelper(String user, String tableWS,
+      String tableName) throws Exception {
+    try {
+      updateClient(user);
+
+      test("USE " + Joiner.on(".").join(MINIDFS_STORAGE_PLUGIN_NAME, tableWS));
+
+      test("CREATE TABLE " + tableName + " AS SELECT " +
+          "c_custkey, c_nationkey FROM cp.`tpch/customer.parquet` ORDER BY c_custkey;");
+
+      test("SHOW FILES");
+
+      testBuilder()
+          .sqlQuery("SELECT * FROM " + tableName + " LIMIT 1")
+          .ordered()
+          .baselineColumns("c_custkey", "c_nationkey")
+          .baselineValues(1, 15)
+          .go();
+
+    } finally {
+      // There is no drop table, we need to delete the table directory through FileSystem
object
+      final FileSystem fs = dfsCluster.getFileSystem();
+      final Path tablePath = new Path(Path.SEPARATOR + tableWS + Path.SEPARATOR + tableName);
+      if (fs.exists(tablePath)) {
+        fs.delete(tablePath, true);
+      }
+    }
+  }
+
+  @Test
+  public void testCreateTableInWSWithNoPermissionsForQueryUser() throws Exception {
+    // Workspace dir owned by "processUser", workspace group is "group0" and "user2" is not
part of "group0"
+    final String tableWS = "drillTestGrp0_755";
+    final String tableName = "table1";
+
+    UserRemoteException ex = null;
+
+    try {
+      updateClient(user2);
+
+      test("USE " + Joiner.on(".").join(MINIDFS_STORAGE_PLUGIN_NAME, tableWS));
+
+      test("CREATE TABLE " + tableName + " AS SELECT " +
+          "c_custkey, c_nationkey FROM cp.`tpch/customer.parquet` ORDER BY c_custkey;");
+    } catch(UserRemoteException e) {
+      ex = e;
+    }
+
+    assertNotNull("UserRemoteException is expected", ex);
+    assertThat(ex.getMessage(),
+        containsString("Permission denied: user=drillTestUser2, access=WRITE, inode=\"/drillTestGrp0_755\""));
+  }
+
+  @AfterClass
+  public static void removeMiniDfsBasedStorage() throws Exception {
+    getDrillbitContext().getStorage().deletePlugin(MINIDFS_STORAGE_PLUGIN_NAME);
+    stopMiniDfsCluster();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
new file mode 100644
index 0000000..14392c9
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/impersonation/TestImpersonationQueries.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.impersonation;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.dotdrill.DotDrillType;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.drill.exec.store.dfs.FileSystemConfig;
+import org.apache.drill.exec.store.dfs.WorkspaceConfig;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.core.StringContains.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Test queries involving direct impersonation and multilevel impersonation including join
queries where each side is
+ * a nested view.
+ */
+public class TestImpersonationQueries extends BaseTestImpersonation {
+  private static final String MINIDFS_STORAGE_PLUGIN_NAME = "minidfs" + TestImpersonationQueries.class.getSimpleName();
+
+  private static final String[] org1Users = { "user0_1", "user1_1", "user2_1", "user3_1",
"user4_1", "user5_1" };
+  private static final String[] org1Groups = { "group0_1", "group1_1", "group2_1", "group3_1",
"group4_1", "group5_1" };
+  private static final String[] org2Users = { "user0_2", "user1_2", "user2_2", "user3_2",
"user4_2", "user5_2" };
+  private static final String[] org2Groups = { "group0_2", "group1_2", "group2_2", "group3_2",
"group4_2", "group5_2" };
+
+  static {
+    // "user0_1" belongs to "groups0_1". From "user1_1" onwards each user belongs to corresponding
group and the group
+    // before it, i.e "user1_1" belongs to "group1_1" and "group0_1" and so on.
+    UserGroupInformation.createUserForTesting(org1Users[0], new String[] { org1Groups[0]
});
+    for(int i=1; i<org1Users.length; i++) {
+      UserGroupInformation.createUserForTesting(org1Users[i], new String[] { org1Groups[i],
org1Groups[i-1] });
+    }
+
+    UserGroupInformation.createUserForTesting(org2Users[0], new String[] { org2Groups[0]
});
+    for(int i=1; i<org2Users.length; i++) {
+      UserGroupInformation.createUserForTesting(org2Users[i], new String[] { org2Groups[i],
org2Groups[i-1] });
+    }
+  }
+
+  @BeforeClass
+  public static void addMiniDfsBasedStorageAndGenerateTestData() throws Exception {
+    startMiniDfsCluster(TestImpersonationQueries.class.getSimpleName());
+
+    final StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
+    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) pluginRegistry.getPlugin("dfs").getConfig();
+
+    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
+    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
+
+    Map<String, WorkspaceConfig> workspaces = Maps.newHashMap(lfsPluginConfig.workspaces);
+
+    createTestWorkspaces(workspaces);
+
+    miniDfsPluginConfig.workspaces = workspaces;
+    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
+    miniDfsPluginConfig.setEnabled(true);
+
+    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, miniDfsPluginConfig, true);
+
+    // Create test tables/views
+
+    // Create copy of "lineitem" table in /user/user0_1 owned by user0_1:group0_1 with permissions
750. Only user0_1
+    // has access to data in created "lineitem" table.
+    createTestTable(org1Users[0], org1Groups[0], "lineitem");
+
+    // Create copy of "orders" table in /user/user0_2 owned by user0_2:group0_2 with permissions
750. Only user0_2
+    // has access to data in created "orders" table.
+    createTestTable(org2Users[0], org2Groups[0], "orders");
+
+    createNestedTestViewsOnLineItem();
+    createNestedTestViewsOnOrders();
+  }
+
+  private static String getUserHome(String user) {
+    return "/user/" + user;
+  }
+
+  // Return the user workspace for given user.
+  private static String getWSSchema(String user) {
+    return MINIDFS_STORAGE_PLUGIN_NAME + "." + user;
+  }
+
+  private static void createTestWorkspaces(Map<String, WorkspaceConfig> workspaces)
throws Exception {
+    // Create "/tmp" folder and set permissions to "777"
+    final FileSystem fs = dfsCluster.getFileSystem();
+    final Path tmpPath = new Path("/tmp");
+    fs.delete(tmpPath, true);
+    FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));
+
+    // create user directory (ex. "/user/user0_1", with ownership "user0_1:group0_1" and
perms 755) for every user.
+    for(int i=0; i<org1Users.length; i++) {
+      final String user = org1Users[i];
+      final String group = org1Groups[i];
+      createAndAddWorkspace(fs, user, getUserHome(user), (short)0755, user, group, workspaces);
+    }
+
+    // create user directory (ex. "/user/user0_2", with ownership "user0_2:group0_2" and
perms 755) for every user.
+    for(int i=0; i<org2Users.length; i++) {
+      final String user = org2Users[i];
+      final String group = org2Groups[i];
+      createAndAddWorkspace(fs, user, getUserHome(user), (short)0755, user, group, workspaces);
+    }
+  }
+
+  private static void createTestTable(String user, String group, String tableName) throws
Exception {
+    updateClient(user);
+    test("USE " + getWSSchema(user));
+    test(String.format("CREATE TABLE %s as SELECT * FROM cp.`tpch/%s.parquet`;", tableName,
tableName));
+
+    // Change the ownership and permissions manually. Currently there is no option to specify
the default permissions
+    // and ownership for new tables.
+    final Path tablePath = new Path(getUserHome(user), tableName);
+    final FileSystem fs = dfsCluster.getFileSystem();
+
+    fs.setOwner(tablePath, user, group);
+    fs.setPermission(tablePath, new FsPermission((short)0750));
+  }
+
+  private static void createNestedTestViewsOnLineItem() throws Exception {
+    // Input table "lineitem"
+    // /user/user0_1     lineitem      750    user0_1:group0_1
+
+    // Create a view on top of lineitem table
+    // /user/user1_1    u1_lineitem    750    user1_1:group1_1
+    createView(org1Users[1], org1Groups[1], (short)0750, "u1_lineitem", getWSSchema(org1Users[0]),
"lineitem");
+
+    // Create a view on top of u1_lineitem view
+    // /user/user2_1    u2_lineitem    750    user2_1:group2_1
+    createView(org1Users[2], org1Groups[2], (short)0750, "u2_lineitem", getWSSchema(org1Users[1]),
"u1_lineitem");
+
+    // Create a view on top of u2_lineitem view
+    // /user/user2_1    u22_lineitem    750    user2_1:group2_1
+    createView(org1Users[2], org1Groups[2], (short)0750, "u22_lineitem", getWSSchema(org1Users[2]),
"u2_lineitem");
+
+    // Create a view on top of u22_lineitem view
+    // /user/user3_1    u3_lineitem    750    user3_1:group3_1
+    createView(org1Users[3], org1Groups[3], (short)0750, "u3_lineitem", getWSSchema(org1Users[2]),
"u22_lineitem");
+
+    // Create a view on top of u3_lineitem view
+    // /user/user4_1    u4_lineitem    755    user4_1:group4_1
+    createView(org1Users[4], org1Groups[4], (short)0755, "u4_lineitem", getWSSchema(org1Users[3]),
"u3_lineitem");
+  }
+
+  private static void createNestedTestViewsOnOrders() throws Exception {
+    // Input table "orders"
+    // /user/user0_2     orders      750    user0_2:group0_2
+
+    // Create a view on top of orders table
+    // /user/user1_2    u1_orders    750    user1_2:group1_2
+    createView(org2Users[1], org2Groups[1], (short)0750, "u1_orders", getWSSchema(org2Users[0]),
"orders");
+
+    // Create a view on top of u1_orders view
+    // /user/user2_2    u2_orders    750    user2_2:group2_2
+    createView(org2Users[2], org2Groups[2], (short)0750, "u2_orders", getWSSchema(org2Users[1]),
"u1_orders");
+
+    // Create a view on top of u2_orders view
+    // /user/user2_2    u22_orders    750    user2_2:group2_2
+    createView(org2Users[2], org2Groups[2], (short)0750, "u22_orders", getWSSchema(org2Users[2]),
"u2_orders");
+
+    // Create a view on top of u22_orders view (permissions of this view (755) are different
from permissions of the
+    // corresponding view in "lineitem" nested views to have a join query of "lineitem" and
"orders" nested views
+    // without exceeding the maximum number of allowed user hops in chained impersonation.
+    // /user/user3_2    u3_orders    750    user3_2:group3_2
+    createView(org2Users[3], org2Groups[3], (short)0755, "u3_orders", getWSSchema(org2Users[2]),
"u22_orders");
+
+    // Create a view on top of u3_orders view
+    // /user/user4_2    u4_orders    755    user4_2:group4_2
+    createView(org2Users[4], org2Groups[4], (short)0755, "u4_orders", getWSSchema(org2Users[3]),
"u3_orders");
+  }
+
+  private static void createView(final String viewOwner, final String viewGroup, final short
viewPerms,
+      final String newViewName, final String fromSourceSchema, final String fromSourceTableName)
throws Exception {
+    updateClient(viewOwner);
+    test(String.format("ALTER SESSION SET `%s`='%o';", ExecConstants.NEW_VIEW_DEFAULT_PERMS_KEY,
viewPerms));
+    test(String.format("CREATE VIEW %s.%s AS SELECT * FROM %s.%s;",
+        getWSSchema(viewOwner), newViewName, fromSourceSchema, fromSourceTableName));
+
+    // Verify the view file created has the expected permissions and ownership
+    Path viewFilePath = new Path(getUserHome(viewOwner), newViewName + DotDrillType.VIEW.getEnding());
+    FileStatus status = dfsCluster.getFileSystem().getFileStatus(viewFilePath);
+    assertEquals(viewGroup, status.getGroup());
+    assertEquals(viewOwner, status.getOwner());
+    assertEquals(viewPerms, status.getPermission().toShort());
+  }
+
+  @Test
+  public void testDirectImpersonation_HasUserReadPermissions() throws Exception {
+    // Table lineitem is owned by "user0_1:group0_1" with permissions 750. Try to read the
table as "user0_1". We
+    // shouldn't expect any errors.
+    updateClient(org1Users[0]);
+    test(String.format("SELECT * FROM %s.lineitem ORDER BY l_orderkey LIMIT 1", getWSSchema(org1Users[0])));
+  }
+
+  @Test
+  public void testDirectImpersonation_HasGroupReadPermissions() throws Exception {
+    // Table lineitem is owned by "user0_1:group0_1" with permissions 750. Try to read the
table as "user1_1". We
+    // shouldn't expect any errors as "user1_1" is part of the "group0_1"
+    updateClient(org1Users[1]);
+    test(String.format("SELECT * FROM %s.lineitem ORDER BY l_orderkey LIMIT 1", getWSSchema(org1Users[0])));
+  }
+
+  @Test
+  public void testDirectImpersonation_NoReadPermissions() throws Exception {
+    UserRemoteException ex = null;
+    try {
+      // Table lineitem is owned by "user0_1:group0_1" with permissions 750. Now try to read
the table as "user2_1". We
+      // should expect a permission denied error as "user2_1" is not part of the "group0_1"
+      updateClient(org1Users[2]);
+      test(String.format("SELECT * FROM %s.lineitem ORDER BY l_orderkey LIMIT 1", getWSSchema(org1Users[0])));
+    } catch(UserRemoteException e) {
+      ex = e;
+    }
+
+    assertNotNull("UserRemoteException is expected", ex);
+    assertThat(ex.getMessage(), containsString("PERMISSION ERROR: " +
+            "Not authorized to read table [lineitem] in schema [minidfsTestImpersonationQueries.user0_1]"));
+  }
+
+
+  @Test
+  public void testMultiLevelImpersonationEqualToMaxUserHops() throws Exception {
+    updateClient(org1Users[4]);
+    test(String.format("SELECT * from %s.u4_lineitem LIMIT 1;", getWSSchema(org1Users[4])));
+  }
+
+  @Test
+  public void testMultiLevelImpersonationExceedsMaxUserHops() throws Exception {
+    UserRemoteException ex = null;
+
+    try {
+      updateClient(org1Users[5]);
+      test(String.format("SELECT * from %s.u4_lineitem LIMIT 1;", getWSSchema(org1Users[4])));
+    } catch(UserRemoteException e) {
+      ex = e;
+    }
+
+    assertNotNull("UserRemoteException is expected", ex);
+    assertThat(ex.getMessage(),
+        containsString("Cannot issue token for view expansion as issuing the token exceeds
the maximum allowed number " +
+            "of user hops (3) in chained impersonation"));
+  }
+
+  @Test
+  public void testMultiLevelImpersonationJoinEachSideReachesMaxUserHops() throws Exception
{
+    updateClient(org1Users[4]);
+    test(String.format("SELECT * from %s.u4_lineitem l JOIN %s.u3_orders o ON l.l_orderkey
= o.o_orderkey LIMIT 1;",
+        getWSSchema(org1Users[4]), getWSSchema(org2Users[3])));
+  }
+
+  @Test
+  public void testMultiLevelImpersonationJoinOneSideExceedsMaxUserHops() throws Exception
{
+    UserRemoteException ex = null;
+
+    try {
+      updateClient(org1Users[4]);
+      test(String.format("SELECT * from %s.u4_lineitem l JOIN %s.u4_orders o ON l.l_orderkey
= o.o_orderkey LIMIT 1;",
+          getWSSchema(org1Users[4]), getWSSchema(org2Users[4])));
+    } catch(UserRemoteException e) {
+      ex = e;
+    }
+
+    assertNotNull("UserRemoteException is expected", ex);
+    assertThat(ex.getMessage(),
+        containsString("Cannot issue token for view expansion as issuing the token exceeds
the maximum allowed number " +
+            "of user hops (3) in chained impersonation"));
+  }
+
+  @AfterClass
+  public static void removeMiniDfsBasedStorage() throws Exception {
+    getDrillbitContext().getStorage().deletePlugin(MINIDFS_STORAGE_PLUGIN_NAME);
+    stopMiniDfsCluster();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/2a484251/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 91707fa..558646e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -32,7 +32,7 @@
     <target.gen.source.path>${project.basedir}/target/generated-sources</target.gen.source.path>
     <proto.cas.path>${project.basedir}/src/main/protobuf/</proto.cas.path>
     <dep.junit.version>4.11</dep.junit.version>
-    <dep.slf4j.version>1.7.5</dep.slf4j.version>
+    <dep.slf4j.version>1.7.6</dep.slf4j.version>
     <parquet.version>1.6.0rc3-drill-r0.1</parquet.version>
   </properties>
 
@@ -705,6 +705,99 @@
           </dependency>
           <dependency>
             <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>2.4.1</version>
+            <scope>test</scope>
+            <classifier>tests</classifier>
+            <exclusions>
+              <exclusion>
+                <groupId>org.mortbay.jetty</groupId>
+                <artifactId>servlet-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.mortbay.jetty</groupId>
+                <artifactId>servlet-api-2.5</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.mortbay.jetty</groupId>
+                <artifactId>servlet-api-2.5</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>javax.servlet</groupId>
+                <artifactId>servlet-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.mortbay.jetty</groupId>
+                <artifactId>jetty-util</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-yarn-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <artifactId>jets3t</artifactId>
+                <groupId>net.java.dev.jets3t</groupId>
+              </exclusion>
+              <exclusion>
+                <artifactId>log4j</artifactId>
+                <groupId>log4j</groupId>
+              </exclusion>
+              <exclusion>
+                <artifactId>slf4j-log4j12</artifactId>
+                <groupId>org.slf4j</groupId>
+              </exclusion>
+              <exclusion>
+                <artifactId>mockito-all</artifactId>
+                <groupId>org.mockito</groupId>
+              </exclusion>
+              <exclusion>
+                <artifactId>commons-logging-api</artifactId>
+                <groupId>commons-logging</groupId>
+              </exclusion>
+              <exclusion>
+                <artifactId>commons-logging</artifactId>
+                <groupId>commons-logging</groupId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.sun.jersey</groupId>
+                <artifactId>jersey-core</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.sun.jersey</groupId>
+                <artifactId>jersey-server</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.sun.jersey</groupId>
+                <artifactId>jersey-json</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.sun.jersey</groupId>
+                <artifactId>jersey-client</artifactId>
+              </exclusion>
+              <exclusion>
+                <artifactId>core</artifactId>
+                <groupId>org.eclipse.jdt</groupId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.codehaus.jackson</groupId>
+                <artifactId>jackson-core-asl</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.codehaus.jackson</groupId>
+                <artifactId>jackson-mapper-asl</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.codehaus.jackson</groupId>
+                <artifactId>jackson-xc</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.codehaus.jackson</groupId>
+                <artifactId>jackson-jaxrs</artifactId>
+              </exclusion>
+            </exclusions>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-client</artifactId>
             <version>2.4.1</version>
             <exclusions>
@@ -999,6 +1092,32 @@
               </exclusion>
             </exclusions>
           </dependency>
+          <!-- Test Dependencies -->
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <version>2.4.1</version>
+            <scope>test</scope>
+            <classifier>tests</classifier>
+            <exclusions>
+              <exclusion>
+                <groupId>commons-logging</groupId>
+                <artifactId>commons-logging</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.mortbay.jetty</groupId>
+                <artifactId>servlet-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>javax.servlet</groupId>
+                <artifactId>servlet-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>log4j</groupId>
+                <artifactId>log4j</artifactId>
+              </exclusion>
+            </exclusions>
+          </dependency>
         </dependencies>
       </dependencyManagement>
     </profile>


Mime
View raw message