hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r569446 - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/
Date Fri, 24 Aug 2007 16:24:41 GMT
Author: stack
Date: Fri Aug 24 09:24:40 2007
New Revision: 569446

URL: http://svn.apache.org/viewvc?rev=569446&view=rev
Log:
HADOOP-1776 Fix for sporadic compaction failures closing and moving compaction
result

M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
    Minor fix of a log message.
M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    (COMPACTION_DIR, WORKING_COMPACTION): Removed.
    (compactdir): Renamed compactionDir.
    Removed from constructor our checking if a compaction was left undone.
    Instead, just ignore it.  When compaction reruns whatever as left on 
    filesystem will just be cleaned up and we'll rerun the compaction 
    (Likelihood of a crash mid-compaction in exactly the area where
    the compaction was recoverable are low -- more robust just redoing
    the compaction from scratch).
    (compactHelper): We were deleting HBaseRoot/compaction.tmp dir
    after a compaction completed. Usually fine but on a cluster of
    more than one machine, if two compactions were near-concurrent, one
    machine could remove the compaction working directory while another
    was mid-way through its compaction.  Result was odd failures
    during compaction of result file, during the move of the resulting
    compacting file or subsequently trying to open reader on the
    resulting compaction file (See HADOOP-1765).
    a region fsck tool).
    (getFilesToCompact): Added.
    (processReadyCompaction): Added.  Reorganized compaction so that the
    window during which loss-of-data is possible is narrowed and even
    then, we log a message with how a restore might be performed manually
    (TODO: Add a repair tool).
M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
    (rename): More checking around rename that it was successful.
M src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
    An empty-log gives HLog trouble.  Added handling.
M  src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    Cleanup of debug level logging.
M  src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    Minor javadoc and changed a log from info to debug.

Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Fri Aug 24 09:24:40 2007
@@ -16,6 +16,8 @@
     HADOOP-1729 Recent renaming or META tables breaks hbase shell
     HADOOP-1730 unexpected null value causes META scanner to exit (silently)
     HADOOP-1747 On a cluster, on restart, regions multiply assigned
+    HADOOP-1776 Fix for sporadic compaction failures closing and moving
+    compaction result
 
   IMPROVEMENTS
     HADOOP-1737 Make HColumnDescriptor data publically members settable

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
Fri Aug 24 09:24:40 2007
@@ -679,7 +679,7 @@
 
               // We found at least one server for the table and now we're done.
               if (LOG.isDebugEnabled()) {
-                LOG.debug("Found " + servers.size() + " server(s) for " +
+                LOG.debug("Found " + servers.size() + " region(s) for " +
                   tableName + " at " + t);
               }
               break;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java Fri Aug
24 09:24:40 2007
@@ -40,8 +40,8 @@
  * 
  * <p>Each HRegion is identified by a unique long <code>int</code>. HRegions
do
  * not need to declare themselves before using the HLog; they simply include
- * their HRegion-id in the {@link #append(Text, Text, Text, TreeMap, long)} or 
- * {@link #completeCacheFlush(Text, Text, long)} calls.
+ * their HRegion-id in the <code>append</code> or 
+ * <code>completeCacheFlush</code> calls.
  *
  * <p>An HLog consists of multiple on-disk files, which have a chronological
  * order. As data is flushed to other (better) on-disk structures, the log
@@ -106,6 +106,12 @@
       for(int i = 0; i < logfiles.length; i++) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Splitting " + logfiles[i]);
+        }
+        // Check for empty file.
+        if (fs.getFileStatus(logfiles[i]).getLen() <= 0) {
+          LOG.warn("Skipping " + logfiles[i].toString() +
+            " because zero length");
+          continue;
         }
         SequenceFile.Reader in =
           new SequenceFile.Reader(fs, logfiles[i], conf);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java Fri
Aug 24 09:24:40 2007
@@ -305,10 +305,6 @@
         SortedMap<Text, byte[]> rowContent)
     throws IOException {
       boolean result = false;
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Checking " + parent.getRegionName() +
-          " to see if daughter splits still hold references");
-      }
 
       boolean hasReferencesA = hasReferences(metaRegionName, srvr,
           parent.getRegionName(), rowContent, COL_SPLITA);
@@ -318,7 +314,6 @@
       if (!hasReferencesA && !hasReferencesB) {
         LOG.info("Deleting region " + parent.getRegionName() +
         " because daughter splits no longer hold references");
-
         if (!HRegion.deleteRegion(fs, dir, parent.getRegionName())) {
           LOG.warn("Deletion of " + parent.getRegionName() + " failed");
         }
@@ -330,11 +325,11 @@
         b.delete(lockid, COL_STARTCODE);
         srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
         result = true;
-      }
-      
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Done checking " + parent.getRegionName() + ": splitA: " +
-            hasReferencesA + ", splitB: "+ hasReferencesB);
+      } else if (LOG.isDebugEnabled()) {
+        // If debug, note we checked and current state of daughters.
+        LOG.debug("Checked " + parent.getRegionName() +
+          " for references: splitA: " + hasReferencesA + ", splitB: "+
+          hasReferencesB);
       }
       return result;
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Fri
Aug 24 09:24:40 2007
@@ -649,7 +649,7 @@
       for (HStore store: stores.values()) {
         if (store.needsCompaction()) {
           needsCompaction = true;
-          LOG.info(store.toString() + " needs compaction");
+          LOG.debug(store.toString() + " needs compaction");
           break;
         }
       }
@@ -1628,7 +1628,7 @@
   /**
    * Computes the Path of the HRegion
    * 
-   * @param dir parent directory
+   * @param dir hbase home directory
    * @param regionName name of the region
    * @return Path of HRegion directory
    */

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Fri
Aug 24 09:24:40 2007
@@ -62,8 +62,6 @@
 class HStore implements HConstants {
   static final Log LOG = LogFactory.getLog(HStore.class);
 
-  static final String COMPACTION_DIR = "compaction.tmp";
-  static final String WORKING_COMPACTION = "compaction.inprogress";
   static final String COMPACTION_TO_REPLACE = "toreplace";    
   static final String COMPACTION_DONE = "done";
   
@@ -77,11 +75,11 @@
   FileSystem fs;
   Configuration conf;
   Path mapdir;
-  Path compactdir;
   Path loginfodir;
   Path filterDir;
   Filter bloomFilter;
   private String storeName;
+  private final Path compactionDir;
 
   Integer compactLock = new Integer(0);
   Integer flushLock = new Integer(0);
@@ -133,6 +131,7 @@
       FileSystem fs, Path reconstructionLog, Configuration conf)
   throws IOException {  
     this.dir = dir;
+    this.compactionDir = new Path(dir, "compaction.dir");
     this.regionName = regionName;
     this.family = family;
     this.familyName = HStoreKey.extractFamily(this.family.getName());
@@ -172,17 +171,6 @@
           " (no reconstruction log)": " with reconstruction log: " +
           reconstructionLog.toString()));
     }
-    
-    // Either restart or get rid of any leftover compaction work.  Either way, 
-    // by the time processReadyCompaction() returns, we can get rid of the 
-    // existing compaction-dir.
-    this.compactdir = new Path(dir, COMPACTION_DIR);
-    Path curCompactStore =
-      HStoreFile.getHStoreDir(compactdir, regionName, familyName);
-    if(fs.exists(curCompactStore)) {
-      processReadyCompaction();
-      fs.delete(curCompactStore);
-    }
 
     // Go through the 'mapdir' and 'loginfodir' together, make sure that all 
     // MapFiles are in a reliable state.  Every entry in 'mapdir' must have a 
@@ -409,7 +397,7 @@
       this.readers.clear();
       result = new Vector<HStoreFile>(storefiles.values());
       this.storefiles.clear();
-      LOG.info("closed " + this.storeName);
+      LOG.debug("closed " + this.storeName);
       return result;
     } finally {
       this.lock.releaseWriteLock();
@@ -563,7 +551,7 @@
   throws IOException {
     compactHelper(deleteSequenceInfo, -1);
   }
-  
+
   /* 
    * @param deleteSequenceInfo True if we are to set the sequence number to -1
    * on compacted file.
@@ -577,23 +565,22 @@
     long maxId = maxSeenSeqID;
     synchronized(compactLock) {
       Path curCompactStore =
-        HStoreFile.getHStoreDir(compactdir, regionName, familyName);
+        HStoreFile.getHStoreDir(this.compactionDir, regionName, familyName);
       if(LOG.isDebugEnabled()) {
         LOG.debug("started compaction of " + storefiles.size() + " files in " +
           curCompactStore.toString());
       }
-      try {
-        // Grab a list of files to compact.
-        Vector<HStoreFile> toCompactFiles = null;
-        this.lock.obtainWriteLock();
-        try {
-          toCompactFiles = new Vector<HStoreFile>(storefiles.values());
-        } finally {
-          this.lock.releaseWriteLock();
+      if (this.fs.exists(curCompactStore)) {
+        LOG.warn("Cleaning up a previous incomplete compaction at " +
+          curCompactStore.toString());
+        if (!this.fs.delete(curCompactStore)) {
+          LOG.warn("Deleted returned false on " + curCompactStore.toString());
         }
-
+      }
+      try {
+        Vector<HStoreFile> toCompactFiles = getFilesToCompact();
         HStoreFile compactedOutputFile =
-          new HStoreFile(conf, compactdir, regionName, familyName, -1);
+          new HStoreFile(conf, this.compactionDir, regionName, familyName, -1);
         if (toCompactFiles.size() < 1 ||
             (toCompactFiles.size() == 1 &&
               !toCompactFiles.get(0).isReference())) {
@@ -606,7 +593,9 @@
           return;
         }
         
-        fs.mkdirs(curCompactStore);
+        if (!fs.mkdirs(curCompactStore)) {
+          LOG.warn("Mkdir on " + curCompactStore.toString() + " failed");
+        }
         
         // Compute the max-sequenceID seen in any of the to-be-compacted
         // TreeMaps if it hasn't been passed in to us.
@@ -657,14 +646,32 @@
         // Move the compaction into place.
         processReadyCompaction();
       } finally {
-        if (fs.exists(compactdir)) {
-          fs.delete(compactdir);
+        // Clean up the parent -- the region dir in the compactions directory.
+        if (this.fs.exists(curCompactStore.getParent())) {
+          if (!this.fs.delete(curCompactStore.getParent())) {
+            LOG.warn("Delete returned false deleting " +
+              curCompactStore.getParent().toString());
+          }
         }
       }
     }
   }
   
   /*
+   * @return list of files to compact
+   */
+  private Vector<HStoreFile> getFilesToCompact() {
+    Vector<HStoreFile> toCompactFiles = null;
+    this.lock.obtainWriteLock();
+    try {
+      toCompactFiles = new Vector<HStoreFile>(storefiles.values());
+    } finally {
+      this.lock.releaseWriteLock();
+    }
+    return toCompactFiles;
+  }
+  
+  /*
    * Compact passed <code>toCompactFiles</code> into <code>compactedOut</code>.

    * We create a new set of MapFile.Reader objects so we don't screw up 
    * the caching associated with the currently-loaded ones. Our
@@ -886,33 +893,34 @@
     }
   }
 
-  /**
+  /*
    * It's assumed that the compactLock  will be acquired prior to calling this 
    * method!  Otherwise, it is not thread-safe!
    *
    * It works by processing a compaction that's been written to disk.
    * 
-   * It is usually invoked at the end of a compaction, but might also be
+   * <p>It is usually invoked at the end of a compaction, but might also be
    * invoked at HStore startup, if the prior execution died midway through.
+   * 
+   * <p>Moving the compacted TreeMap into place means:
+   * <pre>
+   * 1) Acquiring the write-lock
+   * 2) Figuring out what MapFiles are going to be replaced
+   * 3) Moving the new compacted MapFile into place
+   * 4) Unloading all the replaced MapFiles.
+   * 5) Deleting all the old MapFile files.
+   * 6) Loading the new TreeMap.
+   * 7) Releasing the write-lock
+   * </pre>
    */
   void processReadyCompaction() throws IOException {
-    // Move the compacted TreeMap into place.
-    // That means:
-    // 1) Acquiring the write-lock
-    // 2) Figuring out what MapFiles are going to be replaced
-    // 3) Unloading all the replaced MapFiles.
-    // 4) Deleting all the old MapFile files.
-    // 5) Moving the new MapFile into place
-    // 6) Loading the new TreeMap.
-    // 7) Releasing the write-lock
-
     // 1. Acquiring the write-lock
     Path curCompactStore =
-      HStoreFile.getHStoreDir(compactdir, regionName, familyName);
+      HStoreFile.getHStoreDir(this.compactionDir, regionName, familyName);
     this.lock.obtainWriteLock();
     try {
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
-      if(!fs.exists(doneFile)) {
+      if (!fs.exists(doneFile)) {
         // The last execution didn't finish the compaction, so there's nothing 
         // we can do.  We'll just have to redo it. Abandon it and return.
         LOG.warn("Redoing a failed compaction");
@@ -920,7 +928,6 @@
       }
 
       // 2. Load in the files to be deleted.
-      //    (Figuring out what MapFiles are going to be replaced)
       Vector<HStoreFile> toCompactFiles = new Vector<HStoreFile>();
       Path filesToReplace = new Path(curCompactStore, COMPACTION_TO_REPLACE);
       DataInputStream in = new DataInputStream(fs.open(filesToReplace));
@@ -936,41 +943,16 @@
         in.close();
       }
 
-      // 3. Unload all the replaced MapFiles.  Do it by getting keys of all
-      // to remove.  Then cycling on keys, removing, closing and deleting.
-      
-      // What if we crash at this point?  No big deal; we will restart
-      // processReadyCompaction(), and nothing has been lost.
-      Vector<Long> keys = new Vector<Long>(toCompactFiles.size());
-      for(Map.Entry<Long, HStoreFile> e: storefiles.entrySet()) {
-        if(toCompactFiles.contains(e.getValue())) {
-          keys.add(e.getKey());
-        }
-      }
-
-      Vector<HStoreFile> toDelete = new Vector<HStoreFile>(keys.size());
-      for (Long key: keys) {
-        MapFile.Reader reader = this.readers.remove(key);
-        if (reader != null) {
-          reader.close();
-        }
-        HStoreFile hsf = this.storefiles.remove(key);
-        // 4. Add to the toDelete files all old files, no longer needed
-        toDelete.add(hsf);
-      }
-      
-      // What if we fail now?  The above deletes will fail silently. We'd
-      // better make sure not to write out any new files with the same names as 
-      // something we delete, though.
-
-      // 5. Moving the new MapFile into place
+      // 3. Moving the new MapFile into place.
       HStoreFile compactedFile 
-        = new HStoreFile(conf, compactdir, regionName, familyName, -1);
+        = new HStoreFile(conf, this.compactionDir, regionName, familyName, -1);
+      // obtainNewHStoreFile does its best to generate a filename that does not
+      // currently exist.
       HStoreFile finalCompactedFile 
         = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
       if(LOG.isDebugEnabled()) {
         LOG.debug("moving " + compactedFile.toString() + " in " +
-          compactdir.toString() +
+            this.compactionDir.toString() +
           " to " + finalCompactedFile.toString() + " in " + dir.toString());
       }
       if (!compactedFile.rename(this.fs, finalCompactedFile)) {
@@ -978,24 +960,37 @@
           finalCompactedFile.toString());
         return;
       }
-      
-      // Safe to delete now compaction has been moved into place.
-      for (HStoreFile hsf: toDelete) {
-        if (hsf.getFileId() == finalCompactedFile.getFileId()) {
-          // Be careful we do not delte the just compacted file.
-          LOG.warn("Weird. File to delete has same name as one we are " +
-            "about to delete (skipping): " + hsf.getFileId());
+
+      // 4. and 5. Unload all the replaced MapFiles, close and delete.
+      Vector<Long> toDelete = new Vector<Long>(toCompactFiles.size());
+      for (Map.Entry<Long, HStoreFile> e: this.storefiles.entrySet()) {
+        if (!toCompactFiles.contains(e.getValue())) {
           continue;
         }
-        hsf.delete();
+        Long key = e.getKey();
+        MapFile.Reader reader = this.readers.remove(key);
+        if (reader != null) {
+          reader.close();
+        }
+        toDelete.add(key);
       }
+      
+      try {
+        for (Long key: toDelete) {
+          HStoreFile hsf = this.storefiles.remove(key);
+          hsf.delete();
+        }
 
-      Long orderVal = Long.valueOf(finalCompactedFile.loadInfo(fs));
-
-      // 6. Loading the new TreeMap.
-      this.readers.put(orderVal,
-        finalCompactedFile.getReader(this.fs, this.bloomFilter));
-      this.storefiles.put(orderVal, finalCompactedFile);
+        // 6. Loading the new TreeMap.
+        Long orderVal = Long.valueOf(finalCompactedFile.loadInfo(fs));
+        this.readers.put(orderVal,
+            finalCompactedFile.getReader(this.fs, this.bloomFilter));
+        this.storefiles.put(orderVal, finalCompactedFile);
+      } finally {
+        LOG.warn("Failed replacing compacted files.  Compacted fle is " +
+          finalCompactedFile.toString() + ".  Files replaced are " +
+          toCompactFiles.toString() + " some of which may have been removed");
+      }
     } finally {
       // 7. Releasing the write-lock
       this.lock.releaseWriteLock();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?rev=569446&r1=569445&r2=569446&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
(original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
Fri Aug 24 09:24:40 2007
@@ -23,6 +23,7 @@
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.Random;
@@ -608,7 +609,6 @@
     try {
       out.writeByte(INFO_SEQ_NUM);
       out.writeLong(infonum);
-      
     } finally {
       out.close();
     }
@@ -637,16 +637,22 @@
    */
   public boolean rename(final FileSystem fs, final HStoreFile hsf)
   throws IOException {
-    boolean success = fs.rename(getMapFilePath(), hsf.getMapFilePath());
+    Path src = getMapFilePath();
+    if (!fs.exists(src)) {
+      throw new FileNotFoundException(src.toString());
+    }
+    boolean success = fs.rename(src, hsf.getMapFilePath());
     if (!success) {
-      LOG.warn("Failed rename of " + getMapFilePath() + " to " +
-        hsf.getMapFilePath());
+      LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
       return success;
     }
-    success = fs.rename(getInfoFilePath(), hsf.getInfoFilePath());
+    src = getInfoFilePath();
+    if (!fs.exists(src)) {
+      throw new FileNotFoundException(src.toString());
+    }
+    success = fs.rename(src, hsf.getInfoFilePath());
     if (!success) {
-      LOG.warn("Failed rename of " + getInfoFilePath() + " to " +
-        hsf.getInfoFilePath());
+      LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
     }
     return success;
   }



Mime
View raw message