Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id D8235200B7E for ; Tue, 6 Sep 2016 18:43:38 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id D6B2A160AD3; Tue, 6 Sep 2016 16:43:38 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id ABB02160ACE for ; Tue, 6 Sep 2016 18:43:37 +0200 (CEST) Received: (qmail 25781 invoked by uid 500); 6 Sep 2016 16:43:35 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 25569 invoked by uid 99); 6 Sep 2016 16:43:35 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 06 Sep 2016 16:43:35 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 07CE4E08AD; Tue, 6 Sep 2016 16:43:35 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: cnauroth@apache.org To: common-commits@hadoop.apache.org Date: Tue, 06 Sep 2016 16:43:37 -0000 Message-Id: In-Reply-To: <99b9cf187a7d42009393da29b21bbd46@git.apache.org> References: <99b9cf187a7d42009393da29b21bbd46@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [04/50] [abbrv] hadoop git commit: HDFS-10584. Allow long-running Mover tool to login with keytab. Contributed by Rakesh R. archived-at: Tue, 06 Sep 2016 16:43:39 -0000 HDFS-10584. Allow long-running Mover tool to login with keytab. Contributed by Rakesh R. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e806db71 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e806db71 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e806db71 Branch: refs/heads/HADOOP-13345 Commit: e806db719053a5b2a7b14f47e6f2962e70008d25 Parents: a445b82 Author: Zhe Zhang Authored: Fri Aug 26 16:43:25 2016 -0700 Committer: Zhe Zhang Committed: Fri Aug 26 16:43:45 2016 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 9 + .../apache/hadoop/hdfs/server/mover/Mover.java | 23 +- .../src/main/resources/hdfs-default.xml | 40 ++++ .../hadoop/hdfs/server/mover/TestMover.java | 212 +++++++++++++++---- 4 files changed, 245 insertions(+), 39 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b4cce4a..2eff3b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -487,6 +487,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_MOVER_MOVERTHREADS_DEFAULT = 1000; public static final String DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY = "dfs.mover.retry.max.attempts"; public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10; + public static final String DFS_MOVER_KEYTAB_ENABLED_KEY = + "dfs.mover.keytab.enabled"; + public static final boolean DFS_MOVER_KEYTAB_ENABLED_DEFAULT = false; + public static final String DFS_MOVER_ADDRESS_KEY = "dfs.mover.address"; + public static final String DFS_MOVER_ADDRESS_DEFAULT= "0.0.0.0:0"; + public static final String DFS_MOVER_KEYTAB_FILE_KEY = + "dfs.mover.keytab.file"; + public static final String DFS_MOVER_KERBEROS_PRINCIPAL_KEY = + "dfs.mover.kerberos.principal"; public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address"; public static final int DFS_DATANODE_DEFAULT_PORT = 9866; http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index cd37b15b..fdb6cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -47,7 +47,10 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -57,6 +60,7 @@ import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.net.InetSocketAddress; import java.net.URI; import java.text.DateFormat; import java.util.*; @@ -579,6 +583,22 @@ public class Mover { } } + private static void checkKeytabAndInit(Configuration conf) + throws IOException { + if (conf.getBoolean(DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_KEY, + DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_DEFAULT)) { + LOG.info("Keytab is configured, will login using keytab."); + UserGroupInformation.setConfiguration(conf); + String addr = conf.get(DFSConfigKeys.DFS_MOVER_ADDRESS_KEY, + DFSConfigKeys.DFS_MOVER_ADDRESS_DEFAULT); + InetSocketAddress socAddr = NetUtils.createSocketAddr(addr, 0, + DFSConfigKeys.DFS_MOVER_ADDRESS_KEY); + SecurityUtil.login(conf, DFSConfigKeys.DFS_MOVER_KEYTAB_FILE_KEY, + DFSConfigKeys.DFS_MOVER_KERBEROS_PRINCIPAL_KEY, + socAddr.getHostName()); + } + } + static int run(Map> namenodes, Configuration conf) throws IOException, InterruptedException { final long sleeptime = @@ -588,7 +608,8 @@ public class Mover { DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; AtomicInteger retryCount = new AtomicInteger(0); LOG.info("namenodes = " + namenodes); - + + checkKeytabAndInit(conf); List connectors = Collections.emptyList(); try { connectors = NameNodeConnector.newNameNodeConnectors(namenodes, http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index ce880d3..b9a0812 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3712,6 +3712,46 @@ + dfs.mover.keytab.enabled + false + + Set to true to enable login using a keytab for Kerberized Hadoop. + + + + + dfs.mover.address + 0.0.0.0:0 + + The hostname used for a keytab based Kerberos login. Keytab based login + can be enabled with dfs.mover.keytab.enabled. + + + + + dfs.mover.keytab.file + + + The keytab file used by the Mover to login as its + service principal. The principal name is configured with + dfs.mover.kerberos.principal. Keytab based login can be + enabled with dfs.mover.keytab.enabled. + + + + + dfs.mover.kerberos.principal + + + The Mover principal. This is typically set to + mover/_HOST@REALM.TLD. The Mover will substitute _HOST with its + own fully qualified hostname at startup. The _HOST placeholder + allows using the same configuration setting on different servers. + Keytab based login can be enabled with dfs.mover.keytab.enabled. + + + + dfs.namenode.audit.log.async false http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index f382243..b7b750b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -17,21 +17,47 @@ */ package org.apache.hadoop.hdfs.server.mover; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; + +import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.*; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -47,14 +73,27 @@ import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; import org.apache.hadoop.hdfs.server.mover.Mover.MLocation; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class TestMover { +import com.google.common.base.Supplier; +import com.google.common.collect.Maps; - static final int DEFAULT_BLOCK_SIZE = 100; +public class TestMover { + private static final Logger LOG = LoggerFactory.getLogger(TestMover.class); + private static final int DEFAULT_BLOCK_SIZE = 100; + private File keytabFile; + private String principal; static { TestBalancer.initTestSetup(); @@ -116,14 +155,11 @@ public class TestMover { } } - @Test - public void testScheduleBlockWithinSameNode() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); + private void testWithinSameNode(Configuration conf) throws Exception { final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( - new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }) + new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) .build(); try { cluster.waitActive(); @@ -133,13 +169,11 @@ public class TestMover { dfs.mkdirs(dir); // write to DISK dfs.setStoragePolicy(dir, "HOT"); - { - final FSDataOutputStream out = dfs.create(new Path(file)); - out.writeChars("testScheduleWithinSameNode"); - out.close(); - } + final FSDataOutputStream out = dfs.create(new Path(file)); + out.writeChars("testScheduleWithinSameNode"); + out.close(); - //verify before movement + // verify before movement LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); StorageType[] storageTypes = lb.getStorageTypes(); for (StorageType storageType : storageTypes) { @@ -148,21 +182,49 @@ public class TestMover { // move to ARCHIVE dfs.setStoragePolicy(dir, "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), - new String[] { "-p", dir.toString() }); - Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc); + new String[] {"-p", dir.toString()}); + Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); - // Wait till namenode notified - Thread.sleep(3000); - lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); - storageTypes = lb.getStorageTypes(); - for (StorageType storageType : storageTypes) { - Assert.assertTrue(StorageType.ARCHIVE == storageType); - } + // Wait till namenode notified about the block location details + waitForLocatedBlockWithArchiveStorageType(dfs, file, 3); } finally { cluster.shutdown(); } } + private void waitForLocatedBlockWithArchiveStorageType( + final DistributedFileSystem dfs, final String file, + int expectedArchiveCount) throws Exception { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + LocatedBlock lb = null; + try { + lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); + } catch (IOException e) { + LOG.error("Exception while getting located blocks", e); + return false; + } + int archiveCount = 0; + for (StorageType storageType : lb.getStorageTypes()) { + if (StorageType.ARCHIVE == storageType) { + archiveCount++; + } + } + LOG.info("Archive replica count, expected={} and actual={}", + expectedArchiveCount, archiveCount); + return expectedArchiveCount == archiveCount; + } + }, 100, 3000); + } + + @Test + public void testScheduleBlockWithinSameNode() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + testWithinSameNode(conf); + } + private void checkMovePaths(List actual, Path... expected) { Assert.assertEquals(expected.length, actual.size()); for (Path p : expected) { @@ -334,19 +396,10 @@ public class TestMover { dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() }); - Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc); + Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); - // Wait till namenode notified - Thread.sleep(3000); - lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); - storageTypes = lb.getStorageTypes(); - int archiveCount = 0; - for (StorageType storageType : storageTypes) { - if (StorageType.ARCHIVE == storageType) { - archiveCount++; - } - } - Assert.assertEquals(archiveCount, 2); + // Wait till namenode notified about the block location details + waitForLocatedBlockWithArchiveStorageType(dfs, file, 2); } finally { cluster.shutdown(); } @@ -514,7 +567,7 @@ public class TestMover { // run Mover int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir }); - Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc); + Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); // verify storage types and locations locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen); @@ -562,4 +615,87 @@ public class TestMover { cluster.shutdown(); } } + + private void initSecureConf(Configuration conf) throws Exception { + String username = "mover"; + File baseDir = GenericTestUtils.getTestDir(TestMover.class.getSimpleName()); + FileUtil.fullyDelete(baseDir); + Assert.assertTrue(baseDir.mkdirs()); + + Properties kdcConf = MiniKdc.createConf(); + MiniKdc kdc = new MiniKdc(kdcConf, baseDir); + kdc.start(); + + SecurityUtil.setAuthenticationMethod( + UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + UserGroupInformation.setConfiguration(conf); + KerberosName.resetDefaultRealm(); + Assert.assertTrue("Expected configuration to enable security", + UserGroupInformation.isSecurityEnabled()); + + keytabFile = new File(baseDir, username + ".keytab"); + String keytab = keytabFile.getAbsolutePath(); + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost"; + principal = username + "/" + krbInstance + "@" + kdc.getRealm(); + String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm(); + kdc.createPrincipal(keytabFile, username, username + "/" + krbInstance, + "HTTP/" + krbInstance); + + conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, principal); + conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, principal); + conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); + conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); + conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); + + conf.setBoolean(DFS_MOVER_KEYTAB_ENABLED_KEY, true); + conf.set(DFS_MOVER_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_MOVER_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_MOVER_KERBEROS_PRINCIPAL_KEY, principal); + + String keystoresDir = baseDir.getAbsolutePath(); + String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestMover.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); + + conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getClientSSLConfigFileName()); + conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + KeyStoreTestUtil.getServerSSLConfigFileName()); + initConf(conf); + } + + /** + * Test Mover runs fine when logging in with a keytab in kerberized env. + * Reusing testWithinSameNode here for basic functionality testing. + */ + @Test(timeout = 300000) + public void testMoverWithKeytabs() throws Exception { + final Configuration conf = new HdfsConfiguration(); + try { + initSecureConf(conf); + final UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(principal, + keytabFile.getAbsolutePath()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + // verify that mover runs Ok. + testWithinSameNode(conf); + // verify that UGI was logged in using keytab. + Assert.assertTrue(UserGroupInformation.isLoginKeytabBased()); + return null; + } + }); + } finally { + // Reset UGI so that other tests are not affected. + UserGroupInformation.reset(); + UserGroupInformation.setConfiguration(new Configuration()); + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org