hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r953803 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ src/java/org/apache/hadoop/hdfs/util/ src/test/hdfs/org/apache/h...
Date Fri, 11 Jun 2010 18:15:18 GMT
Author: suresh
Date: Fri Jun 11 18:15:17 2010
New Revision: 953803

URL: http://svn.apache.org/viewvc?rev=953803&view=rev
Log:
HDFS-1110. Reuses objects for commonly used file names in namenode to reduce the heap usage.
Contributed by Suresh Srinivas.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/ByteArray.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=953803&r1=953802&r2=953803&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun 11 18:15:17 2010
@@ -9,6 +9,9 @@ Trunk (unreleased changes)
 
   IMPROVEMENTS
 
+    HDFS-1110. Reuses objects for commonly used file names in namenode to
+    reduce the heap usage. (suresh)
+
     HDFS-1096. fix for prev. commit. (boryas)
 
     HDFS-1096. allow dfsadmin/mradmin refresh of superuser proxy group

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=953803&r1=953802&r2=953803&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Jun 11 18:15:17
2010
@@ -205,4 +205,6 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal";
   public static final String  DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
   public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
+  public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
+  public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=953803&r1=953802&r2=953803&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri
Jun 11 18:15:17 2010
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.util.ByteArray;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -64,6 +65,12 @@ class FSDirectory implements Closeable {
   private static final long UNKNOWN_DISK_SPACE = -1;
   private final int lsLimit;  // max list limit
   
+  /**
+   * Caches frequently used file names used in {@link INode} to reuse 
+   * byte[] objects and reduce heap usage.
+   */
+  private final NameCache<ByteArray> nameCache;
+
   /** Access an existing dfs name directory. */
   FSDirectory(FSNamesystem ns, Configuration conf) {
     this(new FSImage(), ns, conf);
@@ -72,6 +79,7 @@ class FSDirectory implements Closeable {
       NameNode.LOG.info("set FSImage.restoreFailedStorage");
       fsImage.setRestoreFailedStorage(true);
     }
+    
     fsImage.setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
                                 FSImage.getCheckpointEditsDirs(conf, null));
   }
@@ -86,6 +94,13 @@ class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
     this.lsLimit = configuredLimit>0 ?
         configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
+    
+    int threshold = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
+        DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
+    NameNode.LOG.info("Caching file names occuring more than " + threshold
+        + " times ");
+    nameCache = new NameCache<ByteArray>(threshold);
   }
     
   private FSNamesystem getFSNamesystem() {
@@ -119,6 +134,7 @@ class FSDirectory implements Closeable {
     }
     synchronized (this) {
       this.ready = true;
+      this.nameCache.initialized();
       this.notifyAll();
     }
   }
@@ -270,6 +286,7 @@ class FSDirectory implements Closeable {
       try {
         newParent = rootDir.addToParent(src, newNode, parentINode,
                                         false, propagateModTime);
+        cacheName(newNode);
       } catch (FileNotFoundException e) {
         return null;
       }
@@ -1396,7 +1413,9 @@ class FSDirectory implements Closeable {
         long childDiskspace, boolean inheritPermission) 
   throws QuotaExceededException, UnresolvedLinkException {
     byte[][] components = INode.getPathComponents(src);
-    child.setLocalName(components[components.length-1]);
+    byte[] path = components[components.length-1];
+    child.setLocalName(path);
+    cacheName(child);
     INode[] inodes = new INode[components.length];
     synchronized (rootDir) {
       rootDir.getExistingPathINodes(components, inodes, false);
@@ -1852,4 +1871,20 @@ class FSDirectory implements Closeable {
     }
     return newNode;
   }
+  
+  /**
+   * Caches frequently used file names to reuse file name objects and
+   * reduce heap size.
+   */
+  void cacheName(INode inode) {
+    // Name is cached only for files
+    if (inode.isDirectory() || inode.isLink()) {
+      return;
+    }
+    ByteArray name = new ByteArray(inode.getLocalNameBytes());
+    name = nameCache.put(name);
+    if (name != null) {
+      inode.setLocalName(name.getBytes());
+    }
+  }
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java?rev=953803&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java Fri Jun
11 18:15:17 2010
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Caches frequently used names to facilitate reuse.
+ * (example: byte[] representation of the file name in {@link INode}).
+ * 
+ * This class is used by initially adding all the file names. Cache
+ * tracks the number of times a name is used in a transient map. It promotes 
+ * a name used more than {@code useThreshold} to the cache.
+ * 
+ * One all the names are added, {@link #initialized()} should be called to
+ * finish initialization. The transient map where use count is tracked is
+ * discarded and cache is ready for use.
+ * 
+ * <p>
+ * This class must be synchronized externally.
+ * 
+ * @param <K> name to be added to the cache
+ */
+class NameCache<K> {
+  /**
+   * Class for tracking use count of a name
+   */
+  private class UseCount {
+    int count;
+    final K value;  // Internal value for the name
+
+    UseCount(final K value) {
+      count = 1;
+      this.value = value;
+    }
+    
+    void increment() {
+      count++;
+    }
+    
+    int get() {
+      return count;
+    }
+  }
+
+  static final Log LOG = LogFactory.getLog(NameCache.class.getName());
+
+  /** indicates initialization is in progress */
+  private boolean initialized = false;
+
+  /** names used more than {@code useThreshold} is added to the cache */
+  private final int useThreshold;
+
+  /** of times a cache look up was successful */
+  private int lookups = 0;
+
+  /** Cached names */
+  final HashMap<K, K> cache = new HashMap<K, K>();
+
+  /** Names and with number of occurrences tracked during initialization */
+  Map<K, UseCount> transientMap = new HashMap<K, UseCount>();
+
+  /**
+   * Constructor
+   * @param useThreshold names occurring more than this is promoted to the
+   *          cache
+   */
+  NameCache(int useThreshold) {
+    this.useThreshold = useThreshold;
+  }
+  
+  /**
+   * Add a given name to the cache or track use count.
+   * exist. If the name already exists, then the internal value is returned.
+   * 
+   * @param name name to be looked up
+   * @return internal value for the name if found; otherwise null
+   */
+  K put(final K name) {
+    K internal = cache.get(name);
+    if (internal != null) {
+      lookups++;
+      return internal;
+    }
+
+    // Track the usage count only during initialization
+    if (!initialized) {
+      UseCount useCount = transientMap.get(name);
+      if (useCount != null) {
+        useCount.increment();
+        if (useCount.get() >= useThreshold) {
+          promote(name);
+        }
+        return useCount.value;
+      }
+      useCount = new UseCount(name);
+      transientMap.put(name, useCount);
+    }
+    return null;
+  }
+  
+  /**
+   * Lookup count when a lookup for a name returned cached object
+   * @return number of successful lookups
+   */
+  int getLookupCount() {
+    return lookups;
+  }
+
+  /**
+   * Size of the cache
+   * @return Number of names stored in the cache
+   */
+  int size() {
+    return cache.size();
+  }
+
+  /**
+   * Mark the name cache as initialized. The use count is no longer tracked
+   * and the transient map used for initializing the cache is discarded to
+   * save heap space.
+   */
+  void initialized() {
+    LOG.info("initialized with " + size() + " entries " + lookups + " lookups");
+    this.initialized = true;
+    transientMap.clear();
+    transientMap = null;
+  }
+  
+  /** Promote a frequently used name to the cache */
+  private void promote(final K name) {
+    transientMap.remove(name);
+    cache.put(name, name);
+    lookups += useThreshold;
+  }
+}

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java?rev=953803&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
(added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
Fri Jun 11 18:15:17 2010
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map.Entry;
+
+/**
+ * File name distribution visitor. 
+ * <p>
+ * It analyzes file names in fsimage and prints the following information: 
+ * <li>Number of unique file names</li> 
+ * <li>Number file names and the corresponding number range of files that use 
+ * these same names</li>
+ * <li>Heap saved if the file name objects are reused</li>
+ */
+public class NameDistributionVisitor extends TextWriterImageVisitor {
+  HashMap<String, Integer> counts = new HashMap<String, Integer>();
+
+  public NameDistributionVisitor(String filename, boolean printToScreen)
+      throws IOException {
+    super(filename, printToScreen);
+  }
+
+  @Override
+  void finish() throws IOException {
+    final int BYTEARRAY_OVERHEAD = 24;
+
+    write("Total unique file names " + counts.size());
+    // Columns: Frequency of file occurrence, savings in heap, total files using
+    // the name and number of file names
+    final long stats[][] = { { 100000, 0, 0, 0 },
+                             { 10000, 0, 0, 0 },
+                             { 1000, 0, 0, 0 },
+                             { 100, 0, 0, 0 },
+                             { 10, 0, 0, 0 },
+                             { 5, 0, 0, 0 },
+                             { 4, 0, 0, 0 },
+                             { 3, 0, 0, 0 },
+                             { 2, 0, 0, 0 }};
+
+    int highbound = Integer.MIN_VALUE;
+    for (Entry<String, Integer> entry : counts.entrySet()) {
+      highbound = Math.max(highbound, entry.getValue());
+      for (int i = 0; i < stats.length; i++) {
+        if (entry.getValue() >= stats[i][0]) {
+          stats[i][1] += (BYTEARRAY_OVERHEAD + entry.getKey().length())
+              * (entry.getValue() - 1);
+          stats[i][2] += entry.getValue();
+          stats[i][3]++;
+          break;
+        }
+      }
+    }
+
+    long lowbound = 0;
+    long totalsavings = 0;
+    for (long[] stat : stats) {
+      lowbound = stat[0];
+      totalsavings += stat[1];
+      String range = lowbound == highbound ? " " + lowbound :
+          " between " + lowbound + "-" + highbound;
+      write("\n" + stat[3] + " names are used by " + stat[2] + " files"
+          + range + " times. Heap savings ~" + stat[1] + " bytes.");
+      highbound = (int) stat[0] - 1;
+    }
+    write("\n\nTotal saved heap ~" + totalsavings + "bytes.\n");
+    super.finish();
+  }
+
+  @Override
+  void visit(ImageElement element, String value) throws IOException {
+    if (element == ImageElement.INODE_PATH) {
+      String filename = value.substring(value.lastIndexOf("/") + 1);
+      if (counts.containsKey(filename)) {
+        counts.put(filename, counts.get(filename) + 1);
+      } else {
+        counts.put(filename, 1);
+      }
+    }
+  }
+
+  @Override
+  void leaveEnclosingElement() throws IOException {
+  }
+
+  @Override
+  void start() throws IOException {
+  }
+
+  @Override
+  void visitEnclosingElement(ImageElement element) throws IOException {
+  }
+
+  @Override
+  void visitEnclosingElement(ImageElement element, ImageElement key,
+      String value) throws IOException {
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java?rev=953803&r1=953802&r2=953803&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
Fri Jun 11 18:15:17 2010
@@ -72,6 +72,9 @@ public class OfflineImageViewer {
     "    -maxSize specifies the range [0, maxSize] of file sizes to be\n" +
     "     analyzed (128GB by default).\n" +
     "    -step defines the granularity of the distribution. (2MB by default)\n" +
+    "  * NameDistribution: This processor analyzes the file names\n" +
+    "    in the image and prints total number of file names and how frequently" +
+    "    file names are reused.\n" +
     "\n" + 
     "Required command line arguments:\n" +
     "-i,--inputFile <arg>   FSImage file to process.\n" +
@@ -233,6 +236,8 @@ public class OfflineImageViewer {
       long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
       int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
       v = new FileDistributionVisitor(outputFile, maxSize, step);
+    } else if (processor.equals("NameDistribution")) {
+      v = new NameDistributionVisitor(outputFile, printToScreen);
     } else {
       v = new LsImageVisitor(outputFile, printToScreen);
       skipBlocks = false;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java?rev=953803&r1=953802&r2=953803&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java
Fri Jun 11 18:15:17 2010
@@ -96,11 +96,11 @@ abstract class TextWriterImageVisitor ex
     if(printToScreen)
       System.out.print(toWrite);
 
-      try {
-        fw.write(toWrite);
-      } catch (IOException e) {
-        okToWrite = false;
-        throw e;
-      }
+    try {
+      fw.write(toWrite);
+    } catch (IOException e) {
+      okToWrite = false;
+      throw e;
+    }
   }
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/ByteArray.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/ByteArray.java?rev=953803&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/ByteArray.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/ByteArray.java Fri Jun 11 18:15:17
2010
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.util.Arrays;
+
+/** 
+ * Wrapper for byte[] to use byte[] as key in HashMap
+ */
+public class ByteArray {
+  private int hash = 0; // cache the hash code
+  private final byte[] bytes;
+  
+  public ByteArray(byte[] bytes) {
+    this.bytes = bytes;
+  }
+  
+  public byte[] getBytes() {
+    return bytes;
+  }
+  
+  @Override
+  public int hashCode() {
+    if (hash == 0) {
+      hash = Arrays.hashCode(bytes);
+    }
+    return hash;
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof ByteArray)) {
+      return false;
+    }
+    return Arrays.equals(bytes, ((ByteArray)o).bytes);
+  }
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java?rev=953803&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
(added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
Fri Jun 11 18:15:17 2010
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+
+/**
+ * Test for {@link NameCache} class
+ */
+public class TestNameCache {
+  @Test
+  public void testDictionary() throws Exception {
+    // Create dictionary with useThreshold 2
+    NameCache<String> cache = 
+      new NameCache<String>(2);
+    String[] matching = {"part1", "part10000000", "fileabc", "abc", "filepart"};
+    String[] notMatching = {"spart1", "apart", "abcd", "def"};
+
+    for (String s : matching) {
+      // Add useThreshold times so the names are promoted to dictionary
+      cache.put(s);
+      assertTrue(s == cache.put(s));
+    }
+    for (String s : notMatching) {
+      // Add < useThreshold times so the names are not promoted to dictionary
+      cache.put(s);
+    }
+    
+    // Mark dictionary as initialized
+    cache.initialized();
+    
+    for (String s : matching) {
+      verifyNameReuse(cache, s, true);
+    }
+    // Check dictionary size
+    assertEquals(matching.length, cache.size());
+    
+    for (String s : notMatching) {
+      verifyNameReuse(cache, s, false);
+    }
+  }
+
+  private void verifyNameReuse(NameCache<String> cache, String s, boolean reused) {
+    cache.put(s);
+    int lookupCount = cache.getLookupCount();
+    if (reused) {
+      // Dictionary returns non null internal value
+      assertNotNull(cache.put(s));
+      // Successful lookup increments lookup count
+      assertEquals(lookupCount + 1, cache.getLookupCount());
+    } else {
+      // Dictionary returns null - since name is not in the dictionary
+      assertNull(cache.put(s));
+      // Lookup count remains the same
+      assertEquals(lookupCount, cache.getLookupCount());
+    }
+  }
+}



Mime
View raw message