hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject hadoop git commit: HDFS-7790. Do not create optional fields in DFSInputStream unless they are needed (cmccabe)
Date Fri, 13 Feb 2015 01:49:02 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk 46b6d23e8 -> 871cb5615


HDFS-7790. Do not create optional fields in DFSInputStream unless they are needed (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871cb561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871cb561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871cb561

Branch: refs/heads/trunk
Commit: 871cb56152e6039ff56c6fabfcd45451029471c3
Parents: 46b6d23
Author: Colin Patrick Mccabe <cmccabe@cloudera.com>
Authored: Thu Feb 12 11:12:26 2015 -0800
Committer: Colin Patrick Mccabe <cmccabe@cloudera.com>
Committed: Thu Feb 12 17:48:51 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 22 ++++++++++++++------
 2 files changed, 19 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871cb561/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 09ae2e7..9117fc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -631,6 +631,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7684. The host:port settings of the daemons should be trimmed before
     use. (Anu Engineer via aajisaka)
 
+    HDFS-7790. Do not create optional fields in DFSInputStream unless they are
+    needed (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871cb561/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 25c23e1..09d6513 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -127,8 +127,15 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
    * The value type can be either ByteBufferPool or ClientMmap, depending on
    * whether we this is a memory-mapped buffer or not.
    */
-  private final IdentityHashStore<ByteBuffer, Object>
+  private IdentityHashStore<ByteBuffer, Object> extendedReadBuffers;
+
+  private synchronized IdentityHashStore<ByteBuffer, Object>
+        getExtendedReadBuffers() {
+    if (extendedReadBuffers == null) {
       extendedReadBuffers = new IdentityHashStore<ByteBuffer, Object>(0);
+    }
+    return extendedReadBuffers;
+  }
 
   public static class ReadStatistics {
     public ReadStatistics() {
@@ -236,7 +243,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
   private final ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes =
              new ConcurrentHashMap<DatanodeInfo, DatanodeInfo>();
 
-  private final byte[] oneByteBuf = new byte[1]; // used for 'int read()'
+  private byte[] oneByteBuf; // used for 'int read()'
 
   void addToDeadNodes(DatanodeInfo dnInfo) {
     deadNodes.put(dnInfo, dnInfo);
@@ -670,7 +677,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
     }
     dfsClient.checkOpen();
 
-    if (!extendedReadBuffers.isEmpty()) {
+    if ((extendedReadBuffers != null) && (!extendedReadBuffers.isEmpty())) {
       final StringBuilder builder = new StringBuilder();
       extendedReadBuffers.visitAll(new IdentityHashStore.Visitor<ByteBuffer, Object>()
{
         private String prefix = "";
@@ -690,6 +697,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
 
   @Override
   public synchronized int read() throws IOException {
+    if (oneByteBuf == null) {
+      oneByteBuf = new byte[1];
+    }
     int ret = read( oneByteBuf, 0, 1 );
     return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
   }
@@ -1708,7 +1718,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
     }
     buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
     if (buffer != null) {
-      extendedReadBuffers.put(buffer, bufferPool);
+      getExtendedReadBuffers().put(buffer, bufferPool);
     }
     return buffer;
   }
@@ -1787,7 +1797,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
       buffer.position((int)blockPos);
       buffer.limit((int)(blockPos + length));
-      extendedReadBuffers.put(buffer, clientMmap);
+      getExtendedReadBuffers().put(buffer, clientMmap);
       synchronized (infoLock) {
         readStatistics.addZeroCopyBytes(length);
       }
@@ -1808,7 +1818,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
   @Override
   public synchronized void releaseBuffer(ByteBuffer buffer) {
     if (buffer == EMPTY_BUFFER) return;
-    Object val = extendedReadBuffers.remove(buffer);
+    Object val = getExtendedReadBuffers().remove(buffer);
     if (val == null) {
       throw new IllegalArgumentException("tried to release a buffer " +
           "that was not created by this stream, " + buffer);


Mime
View raw message