incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [2/2] git commit: Fixing issue with Hadoop 1 HDFS where the metadata is not replaced in the namenode in a timely manner when using the sync method.
Date Sat, 11 Apr 2015 14:23:29 GMT
Fixing issue with Hadoop 1 HDFS where the metadata is not replaced in the namenode in a timely
manner when using the sync method.


Project: http://git-wip-us.apache.org/repos/asf/incubator-blur/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-blur/commit/ec4b5260
Tree: http://git-wip-us.apache.org/repos/asf/incubator-blur/tree/ec4b5260
Diff: http://git-wip-us.apache.org/repos/asf/incubator-blur/diff/ec4b5260

Branch: refs/heads/master
Commit: ec4b5260e3ec1561ac001d8564104d2302ac88ac
Parents: 19749a3
Author: Aaron McCurry <amccurry@gmail.com>
Authored: Sat Apr 11 10:22:11 2015 -0400
Committer: Aaron McCurry <amccurry@gmail.com>
Committed: Sat Apr 11 10:22:11 2015 -0400

----------------------------------------------------------------------
 .../java/org/apache/blur/store/hdfs/HdfsDirectory.java  | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/ec4b5260/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
----------------------------------------------------------------------
diff --git a/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java b/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
index 90eff3f..0ff4ab4 100644
--- a/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
+++ b/blur-store/src/main/java/org/apache/blur/store/hdfs/HdfsDirectory.java
@@ -140,7 +140,7 @@ public class HdfsDirectory extends Directory implements LastModified,
HdfsSymlin
   protected final Map<String, Boolean> _copyFileMap = new ConcurrentHashMap<String,
Boolean>();
   protected final Map<String, Path> _copyFilePathMap = new ConcurrentHashMap<String,
Path>();
   protected final Map<String, FSDataInputRandomAccess> _inputMap = new ConcurrentHashMap<String,
FSDataInputRandomAccess>();
-  protected final boolean _useCache = false;
+  protected final boolean _useCache = true;
   protected final boolean _asyncClosing;
   protected final SequentialReadControl _sequentialReadControl;
 
@@ -276,7 +276,9 @@ public class HdfsDirectory extends Directory implements LastModified,
HdfsSymlin
     if (fileExists(name)) {
       deleteFile(name);
     }
-    _fileStatusMap.put(name, new FStat(System.currentTimeMillis(), 0L));
+    if (_useCache) {
+      _fileStatusMap.put(name, new FStat(System.currentTimeMillis(), 0L));
+    }
     final FSDataOutputStream outputStream = openForOutput(name);
     return new BufferedIndexOutput() {
 
@@ -297,10 +299,12 @@ public class HdfsDirectory extends Directory implements LastModified,
HdfsSymlin
       @Override
       public void close() throws IOException {
         super.close();
-        _fileStatusMap.put(name, new FStat(System.currentTimeMillis(), outputStream.getPos()));
+        if (_useCache) {
+          _fileStatusMap.put(name, new FStat(System.currentTimeMillis(), outputStream.getPos()));
+        }
         // This exists because HDFS is so slow to close files. There are
         // built-in sleeps during the close call.
-        if (_asyncClosing) {
+        if (_asyncClosing && _useCache) {
           outputStream.sync();
           CLOSING_QUEUE.add(outputStream);
         } else {


Mime
View raw message