hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1077086 [2/2] - in /hadoop/common/branches/branch-0.20-security-patches/src: core/org/apache/hadoop/security/ hdfs/ hdfs/org/apache/hadoop/hdfs/ hdfs/org/apache/hadoop/hdfs/protocol/ hdfs/org/apache/hadoop/hdfs/server/balancer/ hdfs/org/ap...
Date Fri, 04 Mar 2011 03:39:04 GMT
Added: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java?rev=1077086&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
(added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
Fri Mar  4 03:39:02 2011
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.security.ExportedAccessKeys;
+
+public class KeyUpdateCommand extends DatanodeCommand {
+  private ExportedAccessKeys keys;
+
+  KeyUpdateCommand() {
+    this(new ExportedAccessKeys());
+  }
+
+  public KeyUpdateCommand(ExportedAccessKeys keys) {
+    super(DatanodeProtocol.DNA_ACCESSKEYUPDATE);
+    this.keys = keys;
+  }
+
+  public ExportedAccessKeys getExportedKeys() {
+    return this.keys;
+  }
+
+  // ///////////////////////////////////////////////
+  // Writable
+  // ///////////////////////////////////////////////
+  static { // register a ctor
+    WritableFactories.setFactory(KeyUpdateCommand.class, new WritableFactory() {
+      public Writable newInstance() {
+        return new KeyUpdateCommand();
+      }
+    });
+  }
+
+  /**
+   */
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    keys.write(out);
+  }
+
+  /**
+   */
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    keys.readFields(in);
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
Fri Mar  4 03:39:02 2011
@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.ExportedAccessKeys;
 
 /*****************************************************************************
  * Protocol that a secondary NameNode uses to communicate with the NameNode.
@@ -30,9 +31,9 @@ import org.apache.hadoop.ipc.VersionedPr
  *****************************************************************************/
 public interface NamenodeProtocol extends VersionedProtocol {
   /**
-   * 2: Added getEditLogSize(), rollEditLog(), rollFSImage().
+   * 3: new method added: getAccessKeys()
    */
-  public static final long versionID = 2L;
+  public static final long versionID = 3L;
 
   /** Get a list of blocks belonged to <code>datanode</code>
     * whose total size is equal to <code>size</code>
@@ -46,6 +47,14 @@ public interface NamenodeProtocol extend
   throws IOException;
 
   /**
+   * Get the current access keys
+   * 
+   * @return ExportedAccessKeys containing current access keys
+   * @throws IOException 
+   */
+  public ExportedAccessKeys getAccessKeys() throws IOException;
+
+  /**
    * Get the size of the current edit log (in bytes).
    * @return The number of bytes in the current edit log.
    * @throws IOException

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
Fri Mar  4 03:39:02 2011
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
@@ -178,6 +179,7 @@ public class TestDataTransferProtocol ex
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);           // number of downstream targets
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     
     // bad bytes per checksum
@@ -213,6 +215,7 @@ public class TestDataTransferProtocol ex
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     sendOut.writeInt((int)512);
     sendOut.writeInt(4);           // size of packet
@@ -240,6 +243,7 @@ public class TestDataTransferProtocol ex
     Text.writeString(sendOut, "cl");// clientID
     sendOut.writeBoolean(false); // no src node info
     sendOut.writeInt(0);
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     sendOut.writeInt((int)512);    // checksum size
     sendOut.writeInt(8);           // size of packet
@@ -269,6 +273,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(fileLen);
     recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
     // negative block start offset
@@ -280,6 +285,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(-1L);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
 
@@ -292,6 +298,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(fileLen);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -306,6 +313,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(0);
     sendOut.writeLong(-1-random.nextInt(oneMil));
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -320,6 +328,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(0);
     sendOut.writeLong(fileLen + 1);
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -332,6 +341,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeLong(0);
     sendOut.writeLong(fileLen);
     Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
     readFile(fileSys, file, fileLen);
   }
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
Fri Mar  4 03:39:02 2011
@@ -34,13 +34,13 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.AccessTokenHandler;
 
 import junit.framework.TestCase;
 /**
  * This class tests if a balancer schedules tasks correctly.
  */
 public class TestBalancer extends TestCase {
-  private static final Configuration CONF = new Configuration();
   final private static long CAPACITY = 500L;
   final private static String RACK0 = "/rack0";
   final private static String RACK1 = "/rack1";
@@ -56,14 +56,18 @@ public class TestBalancer extends TestCa
   private Random r = new Random();
 
   static {
-    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
-    CONF.setLong("dfs.heartbeat.interval", 1L);
-    CONF.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
-    CONF.setLong("dfs.balancer.movedWinWidth", 2000L);
     Balancer.setBlockMoveWaitTime(1000L) ;
   }
 
+  private void initConf(Configuration conf) {
+    conf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false);
+    conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    conf.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    conf.setLong("dfs.heartbeat.interval", 1L);
+    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    conf.setLong("dfs.balancer.movedWinWidth", 2000L);
+  }
+
   /* create a file with a length of <code>fileLen</code> */
   private void createFile(long fileLen, short replicationFactor)
   throws IOException {
@@ -77,11 +81,11 @@ public class TestBalancer extends TestCa
   /* fill up a cluster with <code>numNodes</code> datanodes 
    * whose used space to be <code>size</code>
    */
-  private Block[] generateBlocks(long size, short numNodes) throws IOException {
-    cluster = new MiniDFSCluster( CONF, numNodes, true, null);
+  private Block[] generateBlocks(Configuration conf, long size, short numNodes) throws IOException
{
+    cluster = new MiniDFSCluster( conf, numNodes, true, null);
     try {
       cluster.waitActive();
-      client = DFSClient.createNamenode(CONF);
+      client = DFSClient.createNamenode(conf);
 
       short replicationFactor = (short)(numNodes-1);
       long fileLen = size/replicationFactor;
@@ -140,7 +144,7 @@ public class TestBalancer extends TestCa
    * then redistribute blocks according the required distribution.
    * Afterwards a balancer is running to balance the cluster.
    */
-  private void testUnevenDistribution(
+  private void testUnevenDistribution(Configuration conf,
       long distribution[], long capacities[], String[] racks) throws Exception {
     int numDatanodes = distribution.length;
     if (capacities.length != numDatanodes || racks.length != numDatanodes) {
@@ -154,18 +158,18 @@ public class TestBalancer extends TestCa
     }
 
     // fill the cluster
-    Block[] blocks = generateBlocks(totalUsedSpace, (short)numDatanodes);
+    Block[] blocks = generateBlocks(conf, totalUsedSpace, (short)numDatanodes);
 
     // redistribute blocks
     Block[][] blocksDN = distributeBlocks(
         blocks, (short)(numDatanodes-1), distribution);
 
     // restart the cluster: do NOT format the cluster
-    CONF.set("dfs.safemode.threshold.pct", "0.0f"); 
-    cluster = new MiniDFSCluster(0, CONF, numDatanodes,
+    conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+    cluster = new MiniDFSCluster(0, conf, numDatanodes,
         false, true, null, racks, capacities);
     cluster.waitActive();
-    client = DFSClient.createNamenode(CONF);
+    client = DFSClient.createNamenode(conf);
 
     cluster.injectBlocks(blocksDN);
 
@@ -173,7 +177,7 @@ public class TestBalancer extends TestCa
     for(long capacity:capacities) {
       totalCapacity += capacity;
     }
-    runBalancer(totalUsedSpace, totalCapacity);
+    runBalancer(conf, totalUsedSpace, totalCapacity);
   }
 
   /* wait for one heartbeat */
@@ -194,15 +198,15 @@ public class TestBalancer extends TestCa
    * @param newCapacity new node's capacity
    * @param new 
    */
-  private void test(long[] capacities, String[] racks, 
+  private void test(Configuration conf, long[] capacities, String[] racks, 
       long newCapacity, String newRack) throws Exception {
     int numOfDatanodes = capacities.length;
     assertEquals(numOfDatanodes, racks.length);
-    cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, 
+    cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null, 
         racks, capacities);
     try {
       cluster.waitActive();
-      client = DFSClient.createNamenode(CONF);
+      client = DFSClient.createNamenode(conf);
 
       long totalCapacity=0L;
       for(long capacity:capacities) {
@@ -212,25 +216,25 @@ public class TestBalancer extends TestCa
       long totalUsedSpace = totalCapacity*3/10;
       createFile(totalUsedSpace/numOfDatanodes, (short)numOfDatanodes);
       // start up an empty node with the same capacity and on the same rack
-      cluster.startDataNodes(CONF, 1, true, null,
+      cluster.startDataNodes(conf, 1, true, null,
           new String[]{newRack}, new long[]{newCapacity});
 
       totalCapacity += newCapacity;
 
       // run balancer and validate results
-      runBalancer(totalUsedSpace, totalCapacity);
+      runBalancer(conf, totalUsedSpace, totalCapacity);
     } finally {
       cluster.shutdown();
     }
   }
 
   /* Start balancer and check if the cluster is balanced after the run */
-  private void runBalancer( long totalUsedSpace, long totalCapacity )
+  private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity )
   throws Exception {
     waitForHeartBeat(totalUsedSpace, totalCapacity);
 
     // start rebalancing
-    balancer = new Balancer(CONF);
+    balancer = new Balancer(conf);
     balancer.run(new String[0]);
 
     waitForHeartBeat(totalUsedSpace, totalCapacity);
@@ -258,18 +262,27 @@ public class TestBalancer extends TestCa
   /** Test a cluster with even distribution, 
    * then a new empty node is added to the cluster*/
   public void testBalancer0() throws Exception {
+    Configuration conf = new Configuration();
+    initConf(conf);
     /** one-node cluster test*/
     // add an empty node with half of the CAPACITY & the same rack
-    test(new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
+    test(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
 
     /** two-node cluster test */
-    test(new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+    test(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
         CAPACITY, RACK2);
+    
+    /** End-to-end testing of access token, involving NN, DN, and Balancer */
+    Configuration newConf = new Configuration(conf);
+    newConf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, true);
+    test(newConf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
   }
 
   /** Test unevenly distributed cluster */
   public void testBalancer1() throws Exception {
-    testUnevenDistribution(
+    Configuration conf = new Configuration();
+    initConf(conf);
+    testUnevenDistribution(conf,
         new long[] {50*CAPACITY/100, 10*CAPACITY/100},
         new long[]{CAPACITY, CAPACITY},
         new String[] {RACK0, RACK1});

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
Fri Mar  4 03:39:02 2011
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessToken;
 /**
  * This class tests if block replacement request to data nodes work correctly.
  */
@@ -231,6 +232,7 @@ public class TestBlockReplacement extend
     out.writeLong(block.getGenerationStamp());
     Text.writeString(out, source.getStorageID());
     sourceProxy.write(out);
+    AccessToken.DUMMY_TOKEN.write(out);
     out.flush();
     // receiveResponse
     DataInputStream reply = new DataInputStream(sock.getInputStream());

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
Fri Mar  4 03:39:02 2011
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.AccessToken;
 
 import junit.framework.TestCase;
 
@@ -119,6 +120,7 @@ public class TestDiskError extends TestC
       Text.writeString( out, "" );
       out.writeBoolean(false); // Not sending src node information
       out.writeInt(0);
+      AccessToken.DUMMY_TOKEN.write(out);
       
       // write check header
       out.writeByte( 1 );

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestAccessToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestAccessToken.java?rev=1077086&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestAccessToken.java
(added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestAccessToken.java
Fri Mar  4 03:39:02 2011
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.io.TestWritable;
+
+import junit.framework.TestCase;
+
+/** Unit tests for access tokens */
+public class TestAccessToken extends TestCase {
+  long accessKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
+  long accessTokenLifetime = 2 * 60 * 1000; // 2 mins
+  long blockID1 = 0L;
+  long blockID2 = 10L;
+  long blockID3 = -108L;
+
+  /** test Writable */
+  public void testWritable() throws Exception {
+    TestWritable.testWritable(ExportedAccessKeys.DUMMY_KEYS);
+    AccessTokenHandler handler = new AccessTokenHandler(true,
+        accessKeyUpdateInterval, accessTokenLifetime);
+    ExportedAccessKeys keys = handler.exportKeys();
+    TestWritable.testWritable(keys);
+    TestWritable.testWritable(AccessToken.DUMMY_TOKEN);
+    AccessToken token = handler.generateToken(blockID3, EnumSet
+        .allOf(AccessTokenHandler.AccessMode.class));
+    TestWritable.testWritable(token);
+  }
+
+  private void tokenGenerationAndVerification(AccessTokenHandler master,
+      AccessTokenHandler slave) throws Exception {
+    // single-mode tokens
+    for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
+        .values()) {
+      // generated by master
+      AccessToken token1 = master.generateToken(blockID1, EnumSet.of(mode));
+      assertTrue(master.checkAccess(token1, null, blockID1, mode));
+      assertTrue(slave.checkAccess(token1, null, blockID1, mode));
+      // generated by slave
+      AccessToken token2 = slave.generateToken(blockID2, EnumSet.of(mode));
+      assertTrue(master.checkAccess(token2, null, blockID2, mode));
+      assertTrue(slave.checkAccess(token2, null, blockID2, mode));
+    }
+    // multi-mode tokens
+    AccessToken mtoken = master.generateToken(blockID3, EnumSet
+        .allOf(AccessTokenHandler.AccessMode.class));
+    for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
+        .values()) {
+      assertTrue(master.checkAccess(mtoken, null, blockID3, mode));
+      assertTrue(slave.checkAccess(mtoken, null, blockID3, mode));
+    }
+  }
+
+  /** test access key and token handling */
+  public void testAccessTokenHandler() throws Exception {
+    AccessTokenHandler masterHandler = new AccessTokenHandler(true,
+        accessKeyUpdateInterval, accessTokenLifetime);
+    AccessTokenHandler slaveHandler = new AccessTokenHandler(false,
+        accessKeyUpdateInterval, accessTokenLifetime);
+    ExportedAccessKeys keys = masterHandler.exportKeys();
+    slaveHandler.setKeys(keys);
+    tokenGenerationAndVerification(masterHandler, slaveHandler);
+    // key updating
+    masterHandler.updateKeys();
+    tokenGenerationAndVerification(masterHandler, slaveHandler);
+    keys = masterHandler.exportKeys();
+    slaveHandler.setKeys(keys);
+    tokenGenerationAndVerification(masterHandler, slaveHandler);
+  }
+
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/browseBlock.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/browseBlock.jsp?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/browseBlock.jsp
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/browseBlock.jsp
Fri Mar  4 03:39:02 2011
@@ -12,6 +12,8 @@
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.net.DNS"
+  import="org.apache.hadoop.security.AccessToken"
+  import="org.apache.hadoop.security.AccessTokenHandler"
   import="org.apache.hadoop.util.*"
   import="java.text.DateFormat"
 %>
@@ -190,6 +192,26 @@
     }
     blockId = Long.parseLong(blockIdStr);
 
+    final DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
+    
+    AccessToken accessToken = AccessToken.DUMMY_TOKEN;
+    if (JspHelper.conf
+        .getBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
+      List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,
+          Long.MAX_VALUE).getLocatedBlocks();
+      if (blks == null || blks.size() == 0) {
+        out.print("Can't locate file blocks");
+        dfs.close();
+        return;
+      }
+      for (int i = 0; i < blks.size(); i++) {
+        if (blks.get(i).getBlock().getBlockId() == blockId) {
+          accessToken = blks.get(i).getAccessToken();
+          break;
+        }
+      }
+    }
+    
     String blockGenStamp = null;
     long genStamp = 0;
     blockGenStamp = req.getParameter("genstamp");
@@ -240,7 +262,6 @@
     out.print("<hr>");
 
     //Determine the prev & next blocks
-    DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     long nextStartOffset = 0;
     long nextBlockSize = 0;
     String nextBlockIdStr = null;
@@ -355,7 +376,7 @@
     try {
     jspHelper.streamBlockInAscii(
             new InetSocketAddress(req.getServerName(), datanodePort), blockId, 
-            genStamp, blockSize, startOffset, chunkSizeToView, out);
+            accessToken, genStamp, blockSize, startOffset, chunkSizeToView, out);
     } catch (Exception e){
         out.print(e);
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/tail.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/tail.jsp?rev=1077086&r1=1077085&r2=1077086&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/tail.jsp (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/webapps/datanode/tail.jsp Fri
Mar  4 03:39:02 2011
@@ -12,6 +12,7 @@
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.net.DNS"
+  import="org.apache.hadoop.security.AccessToken"
   import="org.apache.hadoop.util.*"
   import="org.apache.hadoop.net.NetUtils"
   import="java.text.DateFormat"
@@ -82,6 +83,7 @@
     LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
     long blockSize = lastBlk.getBlock().getNumBytes();
     long blockId = lastBlk.getBlock().getBlockId();
+    AccessToken accessToken = lastBlk.getAccessToken();
     long genStamp = lastBlk.getBlock().getGenerationStamp();
     DatanodeInfo chosenNode;
     try {
@@ -98,7 +100,7 @@
     else startOffset = 0;
 
     out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\"
READONLY>");
-    jspHelper.streamBlockInAscii(addr, blockId, genStamp, blockSize, startOffset, chunkSizeToView,
out);
+    jspHelper.streamBlockInAscii(addr, blockId, accessToken, genStamp, blockSize, startOffset,
chunkSizeToView, out);
     out.print("</textarea>");
     dfs.close();
   }



Mime
View raw message