hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r1026740 - in /hadoop/hdfs/trunk: ./ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ src/test/hd...
Date Sun, 24 Oct 2010 04:48:07 GMT
Author: hairong
Date: Sun Oct 24 04:48:06 2010
New Revision: 1026740

URL: http://svn.apache.org/viewvc?rev=1026740&view=rev
Log:
HDFS-1435. Provide an option to store fsimage compressed. Contributed by Hairong Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/hdfs-default.xml
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sun Oct 24 04:48:06 2010
@@ -35,6 +35,8 @@ Trunk (unreleased changes)
 
     HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. (shv)
 
+    HDFS-1435. Provide an option to store fsimage compressed. (hairong)
+
   IMPROVEMENTS
 
     HDFS-1304. Add a new unit test for HftpFileSystem.open(..).  (szetszwo)

Modified: hadoop/hdfs/trunk/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/hdfs-default.xml?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/trunk/src/java/hdfs-default.xml Sun Oct 24 04:48:06 2010
@@ -529,4 +529,19 @@ creations/deletions), or "all".</descrip
   </description>
 </property>
 
+<property>
+  <name>dfs.image.compress</name>
+  <value>false</value>
+  <description>Should the dfs image be compressed?
+  </description>
+</property>
+
+<property>
+  <name>dfs.image.compression.codec</name>
+  <value>org.apache.hadoop.io.compress.DefaultCodec</value>
+  <description>If the dfs image is compressed, how should they be compressed?
+               This has to be a codec defined in io.compression.codecs.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Sun Oct 24 04:48:06
2010
@@ -198,6 +198,14 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
   public static final int     DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
 
+  // property for fsimage compression
+  public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress";
+  public static final boolean DFS_IMAGE_COMPRESS_DEFAULT = false;
+  public static final String DFS_IMAGE_COMPRESSION_CODEC_KEY =
+                                   "dfs.image.compression.codec";
+  public static final String DFS_IMAGE_COMPRESSION_CODEC_DEFAULT =
+                                   "org.apache.hadoop.io.compress.DefaultCodec";
+
   //Keys with no defaults
   public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
   public static final String  DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Sun Oct 24
04:48:06 2010
@@ -91,8 +91,7 @@ public interface FSConstants {
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -24;
+  public static final int LAYOUT_VERSION = -25;
   // Current version: 
-  // -24: added new OP_[GET|RENEW|CANCEL]_DELEGATION_TOKEN and
-  // OP_UPDATE_MASTER_KEY.
+  // -25: support iamge compression.
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sun
Oct 24 04:48:06 2010
@@ -102,16 +102,8 @@ class FSDirectory implements Closeable {
   private final NameCache<ByteArray> nameCache;
 
   /** Access an existing dfs name directory. */
-  FSDirectory(FSNamesystem ns, Configuration conf) {
-    this(new FSImage(), ns, conf);
-    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, 
-                       DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
-      NameNode.LOG.info("set FSImage.restoreFailedStorage");
-      fsImage.setRestoreFailedStorage(true);
-    }
-    
-    fsImage.setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
-                                FSImage.getCheckpointEditsDirs(conf, null));
+  FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
+    this(new FSImage(conf), ns, conf);
   }
 
   FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Sun Oct
24 04:48:06 2010
@@ -73,6 +73,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
@@ -149,6 +151,13 @@ public class FSImage extends Storage {
   private Collection<URI> checkpointEditsDirs;
 
   /**
+   * Image compression related fields
+   */
+  private boolean compressImage = false;  // if image should be compressed
+  private CompressionCodec saveCodec;     // the compression codec
+  private CompressionCodecFactory codecFac;  // all the supported codecs
+
+  /**
    * Can fs-image be rolled?
    */
   volatile protected CheckpointStates ckptState = FSImage.CheckpointStates.START; 
@@ -165,6 +174,34 @@ public class FSImage extends Storage {
     this((FSNamesystem)null);
   }
 
+  /**
+   * Constructor
+   * @param conf Configuration
+   */
+  FSImage(Configuration conf) throws IOException {
+    this();
+    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
+      NameNode.LOG.info("set FSImage.restoreFailedStorage");
+      setRestoreFailedStorage(true);
+    }
+    setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
+        FSImage.getCheckpointEditsDirs(conf, null));
+    this.compressImage = conf.getBoolean(
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT);
+     this.codecFac = new CompressionCodecFactory(conf);
+     if (this.compressImage) {
+       String codecClassName = conf.get(
+           DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
+           DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
+       this.saveCodec = codecFac.getCodecByClassName(codecClassName);
+       if (this.saveCodec == null) {
+         throw new IOException("Not supported codec: " + codecClassName);
+       }
+     }
+   }
+ 
   FSImage(FSNamesystem ns) {
     super(NodeType.NAME_NODE);
     this.editLog = new FSEditLog(this);
@@ -633,6 +670,7 @@ public class FSImage extends Storage {
     // replace real image with the checkpoint image
     FSImage realImage = fsNamesys.getFSImage();
     assert realImage == this;
+    ckptImage.codecFac = realImage.codecFac;
     fsNamesys.dir.fsImage = ckptImage;
     // load from the checkpoint dirs
     try {
@@ -1004,16 +1042,11 @@ public class FSImage extends Storage {
     // Recover from previous interrupted checkpoint, if any
     needToSave |= recoverInterruptedCheckpoint(latestNameSD, latestEditsSD);
 
-    long startTime = now();
-    long imageSize = getImageFile(latestNameSD, NameNodeFile.IMAGE).length();
-
     //
     // Load in bits
     //
     latestNameSD.read();
     needToSave |= loadFSImage(getImageFile(latestNameSD, NameNodeFile.IMAGE));
-    LOG.info("Image file of size " + imageSize + " loaded in " 
-        + (now() - startTime)/1000 + " seconds.");
     
     // Load latest edits
     if (latestNameCheckpointTime > latestEditsCheckpointTime)
@@ -1034,6 +1067,7 @@ public class FSImage extends Storage {
     assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
     assert curFile != null : "curFile is null";
 
+    long startTime = now();   
     FSNamesystem fsNamesys = getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
 
@@ -1041,8 +1075,8 @@ public class FSImage extends Storage {
     // Load in bits
     //
     boolean needToSave = true;
-    DataInputStream in = new DataInputStream(new BufferedInputStream(
-                              new FileInputStream(curFile)));
+    FileInputStream fin = new FileInputStream(curFile);
+    DataInputStream in = new DataInputStream(fin);
     try {
       /*
        * Note: Remove any checks for version earlier than 
@@ -1056,6 +1090,8 @@ public class FSImage extends Storage {
        */
       // read image version: first appeared in version -1
       int imgVersion = in.readInt();
+      needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
+
       // read namespaceID: first appeared in version -2
       this.namespaceID = in.readInt();
 
@@ -1074,8 +1110,27 @@ public class FSImage extends Storage {
         fsNamesys.setGenerationStamp(genstamp); 
       }
 
-      needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
-
+      // read compression related info
+      boolean isCompressed = false;
+      if (imgVersion <= -25) {  // -25: 1st version providing compression option
+        isCompressed = in.readBoolean();
+        if (isCompressed) {
+          String codecClassName = Text.readString(in);
+          CompressionCodec loadCodec = codecFac.getCodecByClassName(codecClassName);
+          if (loadCodec == null) {
+            throw new IOException("Image compression codec not supported: "
+                                 + codecClassName);
+          }
+          in = new DataInputStream(loadCodec.createInputStream(fin));
+          LOG.info("Loading image file " + curFile +
+              " compressed using codec " + codecClassName);
+        }
+      }
+      if (!isCompressed) {
+        // use buffered input stream
+        in = new DataInputStream(new BufferedInputStream(fin));
+      }
+      
       // read file info
       short replication = fsNamesys.getDefaultReplication();
 
@@ -1183,6 +1238,9 @@ public class FSImage extends Storage {
       in.close();
     }
     
+    LOG.info("Image file of size " + curFile.length() + " loaded in " 
+        + (now() - startTime)/1000 + " seconds.");
+
     return needToSave;
   }
 
@@ -1259,13 +1317,26 @@ public class FSImage extends Storage {
     // Write out data
     //
     FileOutputStream fos = new FileOutputStream(newFile);
-    DataOutputStream out = new DataOutputStream(
-      new BufferedOutputStream(fos));
+    DataOutputStream out = new DataOutputStream(fos);
     try {
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(namespaceID);
       out.writeLong(fsDir.rootDir.numItemsInTree());
       out.writeLong(fsNamesys.getGenerationStamp());
+      
+      // write compression info
+      out.writeBoolean(compressImage);
+      if (compressImage) {
+        String codecClassName = saveCodec.getClass().getCanonicalName();
+        Text.writeString(out, codecClassName);
+        out = new DataOutputStream(saveCodec.createOutputStream(fos));
+        LOG.info("Saving image file " + newFile +
+            " compressed using codec " + codecClassName);
+      } else {
+        // use a buffered output stream
+        out = new DataOutputStream(new BufferedOutputStream(fos));
+      }
+
       byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH];
       ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
       // save the root

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Sun Oct 24 04:48:06 2010
@@ -166,7 +166,7 @@ public class SecondaryNameNode implement
                                   "/tmp/hadoop/dfs/namesecondary");
     checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                   "/tmp/hadoop/dfs/namesecondary");    
-    checkpointImage = new CheckpointStorage();
+    checkpointImage = new CheckpointStorage(conf);
     checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
 
     // Initialize other scheduling parameters from the configuration
@@ -581,8 +581,8 @@ public class SecondaryNameNode implement
   static class CheckpointStorage extends FSImage {
     /**
      */
-    CheckpointStorage() throws IOException {
-      super();
+    CheckpointStorage(Configuration conf) throws IOException {
+      super(conf);
     }
 
     @Override

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
Sun Oct 24 04:48:06 2010
@@ -23,6 +23,7 @@ import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -30,6 +31,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 
 /**
@@ -116,7 +119,8 @@ import org.apache.hadoop.security.token.
 class ImageLoaderCurrent implements ImageLoader {
   protected final DateFormat dateFormat = 
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
-  private static int [] versions = {-16, -17, -18, -19, -20, -21, -22, -23, -24};
+  private static int [] versions = 
+           {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25};
   private int imageVersion = 0;
 
   /* (non-Javadoc)
@@ -151,6 +155,22 @@ class ImageLoaderCurrent implements Imag
 
       v.visit(ImageElement.GENERATION_STAMP, in.readLong());
 
+      if (imageVersion <= -25) {
+        boolean isCompressed = in.readBoolean();
+        v.visit(ImageElement.IS_COMPRESSED, imageVersion);
+        if (isCompressed) {
+          String codecClassName = Text.readString(in);
+          v.visit(ImageElement.COMPRESS_CODEC, codecClassName);
+          CompressionCodecFactory codecFac = new CompressionCodecFactory(
+              new Configuration());
+          CompressionCodec codec = codecFac.getCodecByClassName(codecClassName);
+          if (codec == null) {
+            throw new IOException("Image compression codec not supported: "
+                + codecClassName);
+          }
+          in = new DataInputStream(codec.createInputStream(in));
+        }
+      }
       processINodes(in, v, numInodes, skipBlocks);
 
       processINodesUC(in, v, skipBlocks);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
Sun Oct 24 04:48:06 2010
@@ -33,6 +33,8 @@ abstract class ImageVisitor {
     FS_IMAGE,
     IMAGE_VERSION,
     NAMESPACE_ID,
+    IS_COMPRESSED,
+    COMPRESS_CODEC,
     LAYOUT_VERSION,
     NUM_INODES,
     GENERATION_STAMP,

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1026740&r1=1026739&r2=1026740&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Sun Oct 24 04:48:06 2010
@@ -36,9 +36,13 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
@@ -348,4 +352,57 @@ public class TestStartup extends TestCas
         cluster.shutdown();
     }
   }
+  
+  public void testCompression() throws IOException {
+    LOG.info("Test compressing image.");
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+    conf.set("dfs.http.address", "127.0.0.1:0");
+    File base_dir = new File(System.getProperty(
+        "test.build.data", "build/test/data"), "dfs/");
+    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
+    conf.setBoolean("dfs.permissions", false);
+
+    NameNode.format(conf);
+
+    // create an uncompressed image
+    LOG.info("Create an uncompressed fsimage");
+    NameNode namenode = new NameNode(conf);
+    namenode.getNamesystem().mkdirs("/test",
+        new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
+    assertTrue(namenode.getFileInfo("/test").isDir());
+    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    namenode.saveNamespace();
+    namenode.stop();
+    namenode.join();
+
+    // compress image using default codec
+    LOG.info("Read an uncomressed image and store it compressed using default codec.");
+    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
+    checkNameSpace(conf);
+
+    // read image compressed using the default and compress it using Gzip codec
+    LOG.info("Read a compressed image and store it using a different codec.");
+    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
+        "org.apache.hadoop.io.compress.GzipCodec");
+    checkNameSpace(conf);
+
+    // read an image compressed in Gzip and store it uncompressed
+    LOG.info("Read an compressed iamge and store it as uncompressed.");
+    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
+    checkNameSpace(conf);
+
+    // read an uncomrpessed image and store it uncompressed
+    LOG.info("Read an uncompressed image and store it as uncompressed.");
+    checkNameSpace(conf);
+  }
+
+  private void checkNameSpace(Configuration conf) throws IOException {
+    NameNode namenode = new NameNode(conf);
+    assertTrue(namenode.getFileInfo("/test").isDir());
+    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    namenode.saveNamespace();
+    namenode.stop();
+    namenode.join();
+  }
 }



Mime
View raw message