hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mberto...@apache.org
Subject svn commit: r1460617 [1/2] - in /hbase/branches/0.95/hbase-server/src: main/java/org/apache/hadoop/hbase/backup/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/snapshot/ main/...
Date Mon, 25 Mar 2013 11:15:37 GMT
Author: mbertozzi
Date: Mon Mar 25 11:15:36 2013
New Revision: 1460617

URL: http://svn.apache.org/r1460617
Log:
HBASE-7809 Refactor Split/Merge to use HRegionFileSystem

Modified:
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java Mon Mar 25 11:15:36 2013
@@ -266,35 +266,6 @@ public class HFileArchiver {
   }
 
   /**
-   * Archive the store file
-   * @param fs the filesystem where the store files live
-   * @param regionInfo region hosting the store files
-   * @param conf {@link Configuration} to examine to determine the archive directory
-   * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
-   * @param family the family hosting the store files
-   * @param storeFile file to be archived
-   * @throws IOException if the files could not be correctly disposed.
-   */
-  public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
-      Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
-    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
-    // make sure we don't archive if we can't and that the archive dir exists
-    if (!fs.mkdirs(storeArchiveDir)) {
-      throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
-          + Bytes.toString(family) + ", deleting compacted files instead.");
-    }
-
-    // do the actual archive
-    long start = EnvironmentEdgeManager.currentTimeMillis();
-    File file = new FileablePath(fs, storeFile);
-    if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
-      throw new IOException("Failed to archive/delete the file for region:"
-          + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
-          + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
-    }
-  }
-
-  /**
    * Archive the given files and resolve any conflicts with existing files via appending the time
    * archiving started (so all conflicts in the same group have the same timestamp appended).
    * <p>

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Mon Mar 25 11:15:36 2013
@@ -30,10 +30,8 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -44,11 +42,8 @@ import org.apache.hadoop.hbase.backup.HF
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Triple;
@@ -187,8 +182,18 @@ public class CatalogJanitor extends Chor
       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
     Path rootdir = this.services.getMasterFileSystem().getRootDir();
-    HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName());
-    if (!HRegion.hasReferences(fs, rootdir, mergedRegion, htd)) {
+    Path tabledir = HTableDescriptor.getTableDir(rootdir,
+        mergedRegion.getTableName());
+    HTableDescriptor htd = getTableDescriptor(mergedRegion
+        .getTableNameAsString());
+    HRegionFileSystem regionFs = null;
+    try {
+      regionFs = HRegionFileSystem.openRegionFromFileSystem(
+          this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
+    } catch (IOException e) {
+      LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
+    }
+    if (regionFs == null || !regionFs.hasReferences(htd)) {
       LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
           + regionB.getRegionNameAsString()
           + " from fs because merged region no longer holds references");
@@ -328,10 +333,8 @@ public class CatalogJanitor extends Chor
     }
     // Run checks on each daughter split.
     PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
-    Pair<Boolean, Boolean> a =
-      checkDaughterInFs(parent, daughters.getFirst());
-    Pair<Boolean, Boolean> b =
-      checkDaughterInFs(parent, daughters.getSecond());
+    Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
+    Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
     if (hasNoReferences(a) && hasNoReferences(b)) {
       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
         " because daughter splits no longer hold references");
@@ -386,46 +389,36 @@ public class CatalogJanitor extends Chor
    */
   Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
   throws IOException {
-    boolean references = false;
-    boolean exists = false;
     if (daughter == null)  {
       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
+
     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
     Path rootdir = this.services.getMasterFileSystem().getRootDir();
-    Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
-    Path regiondir = new Path(tabledir, daughter.getEncodedName());
-    exists = fs.exists(regiondir);
-    if (!exists) {
-      LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
-      return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
+    Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName());
+
+    HRegionFileSystem regionFs = null;
+    try {
+      regionFs = HRegionFileSystem.openRegionFromFileSystem(
+          this.services.getConfiguration(), fs, tabledir, daughter, true);
+    } catch (IOException e) {
+      LOG.warn("Daughter region does not exist: " + daughter.getEncodedName());
+      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
-    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
 
+    boolean references = false;
+    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString());
     for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
-      Path p = HStore.getStoreHomedir(tabledir, daughter, family.getName());
-      if (!fs.exists(p)) continue;
-      // Look for reference files.  Call listStatus with anonymous instance of PathFilter.
-      FileStatus [] ps = FSUtils.listStatus(fs, p,
-          new PathFilter () {
-            public boolean accept(Path path) {
-              return StoreFileInfo.isReference(path);
-            }
-          }
-      );
-
-      if (ps != null && ps.length > 0) {
-        references = true;
+      if ((references = regionFs.hasReferences(family.getNameAsString()))) {
         break;
       }
     }
-    return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
-      Boolean.valueOf(references));
+    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
   }
 
-  private HTableDescriptor getTableDescriptor(byte[] tableName)
-  throws FileNotFoundException, IOException {
-    return this.services.getTableDescriptors().get(Bytes.toString(tableName));
+  private HTableDescriptor getTableDescriptor(final String tableName)
+      throws FileNotFoundException, IOException {
+    return this.services.getTableDescriptors().get(tableName);
   }
 
   /**

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Mon Mar 25 11:15:36 2013
@@ -80,7 +80,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -220,12 +219,6 @@ public class HRegion implements HeapSize
   // TODO: account for each registered handler in HeapSize computation
   private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
 
-  //These variable are just used for getting data out of the region, to test on
-  //client side
-  // private int numStores = 0;
-  // private int [] storeSize = null;
-  // private byte [] name = null;
-
   public final AtomicLong memstoreSize = new AtomicLong(0);
 
   // Debug possible data loss due to WAL off
@@ -578,7 +571,7 @@ public class HRegion implements HeapSize
     // Get rid of any splits or merges that were lost in-progress.  Clean out
     // these directories here on open.  We may be opening a region that was
     // being split but we crashed in the middle of it all.
-    SplitTransaction.cleanupAnySplitDetritus(this);
+    fs.cleanupAnySplitDetritus();
     fs.cleanupMergesDir();
 
     this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
@@ -668,16 +661,14 @@ public class HRegion implements HeapSize
     mvcc.initialize(maxMemstoreTS + 1);
     // Recover any edits if available.
     maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
-        this.getRegionDir(), maxSeqIdInStores, reporter, status));
+        this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
 
     status.setStatus("Cleaning up detritus from prior splits");
     // Get rid of any splits or merges that were lost in-progress.  Clean out
     // these directories here on open.  We may be opening a region that was
     // being split but we crashed in the middle of it all.
-    SplitTransaction.cleanupAnySplitDetritus(this);
-    RegionMergeTransaction.cleanupMergeDir(this.getFilesystem(),
-        RegionMergeTransaction.getMergeDir(this));
-    FSUtils.deleteDirectory(this.getFilesystem(), new Path(this.getRegionDir(), MERGEDIR));
+    fs.cleanupAnySplitDetritus();
+    fs.cleanupMergesDir();
     this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
 
     this.writestate.flushRequested = false;
@@ -888,7 +879,7 @@ public class HRegion implements HeapSize
    *
    * @throws IOException e
    */
-  public List<StoreFile> close() throws IOException {
+  public Map<byte[], List<StoreFile>> close() throws IOException {
     return close(false);
   }
 
@@ -908,7 +899,7 @@ public class HRegion implements HeapSize
    *
    * @throws IOException e
    */
-  public List<StoreFile> close(final boolean abort) throws IOException {
+  public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
     // Only allow one thread to close at a time. Serialize them so dual
     // threads attempting to close will run up against each other.
     MonitoredTask status = TaskMonitor.get().createStatus(
@@ -925,9 +916,8 @@ public class HRegion implements HeapSize
     }
   }
 
-  private List<StoreFile> doClose(
-      final boolean abort, MonitoredTask status)
-  throws IOException {
+  private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status)
+      throws IOException {
     if (isClosed()) {
       LOG.warn("Region " + this + " already closed");
       return null;
@@ -973,28 +963,35 @@ public class HRegion implements HeapSize
         internalFlushcache(status);
       }
 
-      List<StoreFile> result = new ArrayList<StoreFile>();
+      Map<byte[], List<StoreFile>> result =
+        new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
       if (!stores.isEmpty()) {
         // initialize the thread pool for closing stores in parallel.
         ThreadPoolExecutor storeCloserThreadPool =
           getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString());
-        CompletionService<Collection<StoreFile>> completionService =
-          new ExecutorCompletionService<Collection<StoreFile>>(storeCloserThreadPool);
+        CompletionService<Pair<byte[], Collection<StoreFile>>> completionService =
+          new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(storeCloserThreadPool);
 
         // close each store in parallel
         for (final Store store : stores.values()) {
           completionService
-              .submit(new Callable<Collection<StoreFile>>() {
-                public Collection<StoreFile> call() throws IOException {
-                  return store.close();
+              .submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
+                public Pair<byte[], Collection<StoreFile>> call() throws IOException {
+                  return new Pair<byte[], Collection<StoreFile>>(
+                    store.getFamily().getName(), store.close());
                 }
               });
         }
         try {
           for (int i = 0; i < stores.size(); i++) {
-            Future<Collection<StoreFile>> future = completionService.take();
-            Collection<StoreFile> storeFileList = future.get();
-            result.addAll(storeFileList);
+            Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
+            Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
+            List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
+            if (familyFiles == null) {
+              familyFiles = new ArrayList<StoreFile>();
+              result.put(storeFiles.getFirst(), familyFiles);
+            }
+            familyFiles.addAll(storeFiles.getSecond());
           }
         } catch (InterruptedException e) {
           throw new IOException(e);
@@ -1133,11 +1130,6 @@ public class HRegion implements HeapSize
     return this.baseConf;
   }
 
-  /** @return region directory Path */
-  public Path getRegionDir() {
-    return fs.getRegionDir();
-  }
-
   /** @return {@link FileSystem} being used by this region */
   public FileSystem getFilesystem() {
     return fs.getFileSystem();
@@ -2419,7 +2411,7 @@ public class HRegion implements HeapSize
     // 1. dump region meta info into the snapshot directory
     LOG.debug("Storing region-info for snapshot.");
     HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        getFilesystem(), snapshotDir, getRegionInfo());
+        this.fs.getFileSystem(), snapshotDir, getRegionInfo());
 
     // 2. iterate through all the stores in the region
     LOG.debug("Creating references for hfiles");
@@ -3361,11 +3353,6 @@ public class HRegion implements HeapSize
     return this.getRegionNameAsString();
   }
 
-  /** @return Path of region base directory */
-  public Path getTableDir() {
-    return this.fs.getTableDir();
-  }
-
   /**
    * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families).
    */
@@ -4096,7 +4083,8 @@ public class HRegion implements HeapSize
    */
   public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
       throws IOException {
-    HRegion r = newHRegion(other.getTableDir(), other.getLog(), other.getFilesystem(),
+    HRegionFileSystem regionFs = other.getRegionFileSystem();
+    HRegion r = newHRegion(regionFs.getTableDir(), other.getLog(), regionFs.getFileSystem(),
         other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
     return r.openHRegion(reporter);
   }
@@ -4130,22 +4118,14 @@ public class HRegion implements HeapSize
   /**
    * Create a daughter region from given a temp directory with the region data.
    * @param hri Spec. for daughter region to open.
-   * @param daughterTmpDir Directory that contains region files.
    * @throws IOException
    */
-  HRegion createDaughterRegion(final HRegionInfo hri, final Path daughterTmpDir)
-      throws IOException {
-    FileSystem fs = this.fs.getFileSystem();
-    HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
+  HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException {
+    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(), fs.getFileSystem(),
         this.getBaseConf(), hri, this.getTableDesc(), rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
-    // Move the tmp dir in the expected location
-    if (daughterTmpDir != null && fs.exists(daughterTmpDir)) {
-      if (!fs.rename(daughterTmpDir, r.getRegionDir())) {
-        LOG.warn("Unable to rename " + daughterTmpDir + " to " + r.getRegionDir());
-      }
-    }
+    fs.commitDaughterRegion(hri);
     return r;
   }
 
@@ -4153,27 +4133,19 @@ public class HRegion implements HeapSize
    * Create a merged region given a temp directory with the region data.
    * @param mergedRegionInfo
    * @param region_b another merging region
-   * @param mergedTmpDir Directory that contains region files.
    * @return merged hregion
    * @throws IOException
    */
   HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo,
-      final HRegion region_b, final Path mergedTmpDir) throws IOException {
-    FileSystem fs = this.getFilesystem();
-    HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
-        this.getBaseConf(), mergedRegionInfo, this.getTableDesc(),
-        this.rsServices);
+      final HRegion region_b) throws IOException {
+    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(),
+        fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
+        this.getTableDesc(), this.rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount()
         + region_b.getReadRequestsCount());
     r.writeRequestsCount.set(this.getWriteRequestsCount()
         + region_b.getWriteRequestsCount());
-    // Move the tmp dir in the expected location
-    if (mergedTmpDir != null && fs.exists(mergedTmpDir)) {
-      if (!fs.rename(mergedTmpDir, r.getRegionDir())) {
-        throw new IOException("Unable to rename " + mergedTmpDir + " to "
-            + r.getRegionDir());
-      }
-    }
+    this.fs.commitMergedRegion(mergedRegionInfo);
     return r;
   }
 
@@ -4188,8 +4160,7 @@ public class HRegion implements HeapSize
    * @throws IOException
    */
   // TODO remove since only test and merge use this
-  public static void addRegionToMETA(HRegion meta, HRegion r)
-  throws IOException {
+  public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException {
     meta.checkResources();
     // The row key is the region name
     byte[] row = r.getRegionName();
@@ -4247,24 +4218,6 @@ public class HRegion implements HeapSize
   }
 
   /**
-   * Make the directories for a specific column family
-   *
-   * @param fs the file system
-   * @param tabledir base directory where region will live (usually the table dir)
-   * @param hri
-   * @param colFamily the column family
-   * @throws IOException
-   */
-  private static Path makeColumnFamilyDirs(FileSystem fs, Path tabledir,
-    final HRegionInfo hri, byte [] colFamily) throws IOException {
-    Path dir = HStore.getStoreHomedir(tabledir, hri, colFamily);
-    if (!fs.mkdirs(dir)) {
-      LOG.warn("Failed to create " + dir);
-    }
-    return dir;
-  }
-
-  /**
    * Merge two HRegions.  The regions must be adjacent and must not overlap.
    *
    * @param srcA
@@ -4304,37 +4257,35 @@ public class HRegion implements HeapSize
    * @return new merged region
    * @throws IOException
    */
-  public static HRegion merge(HRegion a, HRegion b)
-  throws IOException {
+  public static HRegion merge(final HRegion a, final HRegion b) throws IOException {
     if (!a.getRegionInfo().getTableNameAsString().equals(
         b.getRegionInfo().getTableNameAsString())) {
       throw new IOException("Regions do not belong to the same table");
     }
 
-    FileSystem fs = a.getFilesystem();
+    FileSystem fs = a.getRegionFileSystem().getFileSystem();
 
     // Make sure each region's cache is empty
-
     a.flushcache();
     b.flushcache();
 
     // Compact each region so we only have one store file per family
-
     a.compactStores(true);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for region: " + a);
-      FSUtils.logFileSystemState(fs, a.getRegionDir(), LOG);
+      a.getRegionFileSystem().logFileSystemState(LOG);
     }
     b.compactStores(true);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for region: " + b);
-      FSUtils.logFileSystemState(fs, b.getRegionDir(), LOG);
+      b.getRegionFileSystem().logFileSystemState(LOG);
     }
 
     Configuration conf = a.baseConf;
     HTableDescriptor tabledesc = a.getTableDesc();
     HLog log = a.getLog();
-    Path tableDir = a.getTableDir();
+    Path tableDir = a.getRegionFileSystem().getTableDir();
+
     // Presume both are of same region type -- i.e. both user or catalog
     // table regions.  This way can use comparator.
     final byte[] startKey =
@@ -4360,43 +4311,34 @@ public class HRegion implements HeapSize
          ? b.getEndKey()
          : a.getEndKey());
 
-    HRegionInfo newRegionInfo =
-        new HRegionInfo(tabledesc.getName(), startKey, endKey);
-    LOG.info("Creating new region " + newRegionInfo.toString());
-    String encodedName = newRegionInfo.getEncodedName();
-    Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
-    if(fs.exists(newRegionDir)) {
-      throw new IOException("Cannot merge; target file collision at " +
-          newRegionDir);
-    }
-    fs.mkdirs(newRegionDir);
+    HRegionInfo newRegionInfo = new HRegionInfo(tabledesc.getName(), startKey, endKey);
+
+    LOG.info("Creating new region " + newRegionInfo);
+    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
+        conf, fs, tableDir, newRegionInfo);
 
     LOG.info("starting merge of regions: " + a + " and " + b +
       " into new region " + newRegionInfo.toString() +
         " with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
         Bytes.toStringBinary(endKey) + ">");
 
-    // Move HStoreFiles under new region directory
-    Map<byte [], List<StoreFile>> byFamily =
-      new TreeMap<byte [], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
-    byFamily = filesByFamily(byFamily, a.close());
-    byFamily = filesByFamily(byFamily, b.close());
-    for (Map.Entry<byte [], List<StoreFile>> es : byFamily.entrySet()) {
-      byte [] colFamily = es.getKey();
-      Path storeDir = makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
-      // Because we compacted the source regions we should have no more than two
-      // HStoreFiles per family and there will be no reference store
-      List<StoreFile> srcFiles = es.getValue();
-      for (StoreFile hsf: srcFiles) {
-        StoreFile.rename(fs, hsf.getPath(), StoreFile.getUniqueFile(fs, storeDir));
-      }
-    }
+    // Because we compacted the source regions we should have no more than two
+    // StoreFiles per family and there will be no reference store
+    Map<byte[], List<StoreFile>> aStoreFiles = a.close();
+    Map<byte[], List<StoreFile>> bStoreFiles = b.close();
+
+    // Move StoreFiles under new region directory
+    regionFs.commitStoreFiles(aStoreFiles);
+    regionFs.commitStoreFiles(bStoreFiles);
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for new region");
-      FSUtils.logFileSystemState(fs, newRegionDir, LOG);
+      regionFs.logFileSystemState(LOG);
     }
+
+    // Create HRegion and update the metrics
     HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
-        newRegionInfo, a.getTableDesc(), null);
+        newRegionInfo, tabledesc, null);
     dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
     dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
     dstRegion.checkAndMutateChecksFailed.set(
@@ -4405,79 +4347,23 @@ public class HRegion implements HeapSize
       a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
     dstRegion.initialize();
     dstRegion.compactStores();
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for new region");
-      FSUtils.logFileSystemState(fs, dstRegion.getRegionDir(), LOG);
+      dstRegion.getRegionFileSystem().logFileSystemState(LOG);
     }
 
     // delete out the 'A' region
-    HFileArchiver.archiveRegion(fs,
-      FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
+    HRegionFileSystem.deleteRegionFromFileSystem(
+      a.getBaseConf(), fs, tableDir, a.getRegionInfo());
     // delete out the 'B' region
-    HFileArchiver.archiveRegion(fs,
-      FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
+    HRegionFileSystem.deleteRegionFromFileSystem(
+      b.getBaseConf(), fs, tableDir, b.getRegionInfo());
 
     LOG.info("merge completed. New region is " + dstRegion);
-
     return dstRegion;
   }
 
-  /*
-   * Fills a map with a vector of store files keyed by column family.
-   * @param byFamily Map to fill.
-   * @param storeFiles Store files to process.
-   * @param family
-   * @return Returns <code>byFamily</code>
-   */
-  private static Map<byte [], List<StoreFile>> filesByFamily(
-      Map<byte [], List<StoreFile>> byFamily, List<StoreFile> storeFiles) {
-    for (StoreFile src: storeFiles) {
-      byte [] family = src.getFamily();
-      List<StoreFile> v = byFamily.get(family);
-      if (v == null) {
-        v = new ArrayList<StoreFile>();
-        byFamily.put(family, v);
-      }
-      v.add(src);
-    }
-    return byFamily;
-  }
-
-  /**
-   * Check whether region has Reference file
-   * @param fs
-   * @param rootDir
-   * @param region
-   * @param htd
-   * @return true if region has reference file
-   * @throws IOException
-   */
-  public static boolean hasReferences(final FileSystem fs,
-      final Path rootDir, final HRegionInfo region, final HTableDescriptor htd)
-      throws IOException {
-    Path tabledir = new Path(rootDir, region.getTableNameAsString());
-    boolean hasReference = false;
-    for (HColumnDescriptor family : htd.getFamilies()) {
-      Path p = HStore.getStoreHomedir(tabledir, region.getEncodedName(),
-          family.getName());
-      if (!fs.exists(p))
-        continue;
-      // Look for reference files. Call listStatus with anonymous instance of
-      // PathFilter.
-      FileStatus[] ps = FSUtils.listStatus(fs, p, new PathFilter() {
-        public boolean accept(Path path) {
-          return StoreFileInfo.isReference(path);
-        }
-      });
-
-      if (ps != null && ps.length > 0) {
-        hasReference = true;
-        break;
-      }
-    }
-    return hasReference;
-  }
-
   /**
    * @return True if needs a major compaction.
    * @throws IOException

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java Mon Mar 25 11:15:36 2013
@@ -24,6 +24,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 
 import org.apache.commons.logging.Log;
@@ -36,11 +38,15 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -108,14 +114,14 @@ public class HRegionFileSystem {
   //  Temp Helpers
   // ===========================================================================
   /** @return {@link Path} to the region's temp directory, used for file creations */
-  public Path getTempDir() {
+  Path getTempDir() {
     return new Path(getRegionDir(), REGION_TEMP_DIR);
   }
 
   /**
    * Clean up any temp detritus that may have been left around from previous operation attempts.
    */
-  public void cleanupTempDir() throws IOException {
+  void cleanupTempDir() throws IOException {
     FSUtils.deleteDirectory(fs, getTempDir());
   }
 
@@ -137,7 +143,7 @@ public class HRegionFileSystem {
    * @return {@link Path} to the directory of the specified family
    * @throws IOException if the directory creation fails.
    */
-  public Path createStoreDir(final String familyName) throws IOException {
+  Path createStoreDir(final String familyName) throws IOException {
     Path storeDir = getStoreDir(familyName);
     if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
       throw new IOException("Failed create of: " + storeDir);
@@ -176,7 +182,40 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Returns true if the specified family has reference files
+   * @param familyName Column Family Name
+   * @return true if family contains reference files
+   * @throws IOException
+   */
+  public boolean hasReferences(final String familyName) throws IOException {
+    FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
+      new PathFilter () {
+        public boolean accept(Path path) {
+          return StoreFileInfo.isReference(path);
+        }
+      }
+    );
+    return files != null && files.length > 0;
+  }
+
+  /**
+   * Check whether region has Reference file
+   * @param htd table desciptor of the region
+   * @return true if region has reference file
+   * @throws IOException
+   */
+  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
+    for (HColumnDescriptor family : htd.getFamilies()) {
+      if (hasReferences(family.getNameAsString())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
    * @return the set of families present on disk
+   * @throws IOException
    */
   public Collection<String> getFamilies() throws IOException {
     FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
@@ -191,6 +230,24 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Remove the region family from disk, archiving the store files.
+   * @param familyName Column Family Name
+   * @throws IOException if an error occours during the archiving
+   */
+  public void deleteFamily(final String familyName) throws IOException {
+    // archive family store files
+    HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
+
+    // delete the family folder
+    Path familyDir = getStoreDir(familyName);
+    if (!fs.delete(familyDir, true)) {
+      throw new IOException("Could not delete family " + familyName +
+        " from FileSystem for region " + regionInfo.getRegionNameAsString() +
+        "(" + regionInfo.getEncodedName() + ")");
+    }
+  }
+
+  /**
    * Generate a unique file name, used by createTempName() and commitStoreFile()
    * @param suffix extra information to append to the generated name
    * @return Unique file name
@@ -252,7 +309,7 @@ public class HRegionFileSystem {
    * @return The new {@link Path} of the committed file
    * @throws IOException
    */
-  public Path commitStoreFile(final String familyName, final Path buildPath,
+  private Path commitStoreFile(final String familyName, final Path buildPath,
       final long seqNum, final boolean generateNewName) throws IOException {
     Path storeDir = getStoreDir(familyName);
     fs.mkdirs(storeDir);
@@ -272,6 +329,20 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Moves multiple store files to the relative region's family store directory.
+   * @param storeFiles list of store files divided by family
+   * @throws IOException
+   */
+  void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
+    for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
+      String familyName = Bytes.toString(es.getKey());
+      for (StoreFile sf: es.getValue()) {
+        commitStoreFile(familyName, sf.getPath());
+      }
+    }
+  }
+
+  /**
    * Archives the specified store file from the specified family.
    * @param familyName Family that contains the store files
    * @param filePath {@link Path} to the store file to remove
@@ -306,7 +377,7 @@ public class HRegionFileSystem {
    * @return The destination {@link Path} of the bulk loaded file
    * @throws IOException
    */
-  public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
+  Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
       throws IOException {
     // Copy the file if it's on another filesystem
     FileSystem srcFs = srcPath.getFileSystem(conf);
@@ -331,36 +402,232 @@ public class HRegionFileSystem {
   //  Splits Helpers
   // ===========================================================================
   /** @return {@link Path} to the temp directory used during split operations */
-  public Path getSplitsDir() {
+  Path getSplitsDir() {
     return new Path(getRegionDir(), REGION_SPLITS_DIR);
   }
 
+  Path getSplitsDir(final HRegionInfo hri) {
+    return new Path(getSplitsDir(), hri.getEncodedName());
+  }
+
   /**
    * Clean up any split detritus that may have been left around from previous split attempts.
    */
-  public void cleanupSplitsDir() throws IOException {
+  void cleanupSplitsDir() throws IOException {
     FSUtils.deleteDirectory(fs, getSplitsDir());
   }
 
+  /**
+   * Clean up any split detritus that may have been left around from previous
+   * split attempts.
+   * Call this method on initial region deploy.
+   * @throws IOException
+   */
+  void cleanupAnySplitDetritus() throws IOException {
+    Path splitdir = this.getSplitsDir();
+    if (!fs.exists(splitdir)) return;
+    // Look at the splitdir.  It could have the encoded names of the daughter
+    // regions we tried to make.  See if the daughter regions actually got made
+    // out under the tabledir.  If here under splitdir still, then the split did
+    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
+    // where we successfully created daughter a but regionserver crashed during
+    // the creation of region b.  In this case, there'll be an orphan daughter
+    // dir in the filesystem.  TOOD: Fix.
+    FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
+    if (daughters != null) {
+      for (FileStatus daughter: daughters) {
+        Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
+        if (fs.exists(daughterDir) && !fs.delete(daughterDir, true)) {
+          throw new IOException("Failed delete of " + daughterDir);
+        }
+      }
+    }
+    cleanupSplitsDir();
+    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
+  }
+
+  /**
+   * Remove daughter region
+   * @param regionInfo daughter {@link HRegionInfo}
+   * @throws IOException
+   */
+  void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
+    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
+      throw new IOException("Failed delete of " + regionDir);
+    }
+  }
+
+  /**
+   * Commit a daughter region, moving it from the split temporary directory
+   * to the proper location in the filesystem.
+   * @param regionInfo daughter {@link HRegionInfo}
+   * @throws IOException
+   */
+  Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
+    Path daughterTmpDir = this.getSplitsDir(regionInfo);
+    if (fs.exists(daughterTmpDir) && !fs.rename(daughterTmpDir, regionDir)) {
+      throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
+    }
+    return regionDir;
+  }
+
+  /**
+   * Create the region splits directory.
+   */
+  void createSplitsDir() throws IOException {
+    Path splitdir = getSplitsDir();
+    if (fs.exists(splitdir)) {
+      LOG.info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
+      if (!fs.delete(splitdir, true)) {
+        throw new IOException("Failed deletion of " + splitdir
+            + " before creating them again.");
+      }
+    }
+    if (!fs.mkdirs(splitdir)) {
+      throw new IOException("Failed create of " + splitdir);
+    }
+  }
+
+  /**
+   * Write out a split reference. Package local so it doesnt leak out of
+   * regionserver.
+   * @param hri {@link HRegionInfo} of the destination
+   * @param familyName Column Family Name
+   * @param f File to split.
+   * @param splitRow Split Row
+   * @param top True if we are referring to the top half of the hfile.
+   * @return Path to created reference.
+   * @throws IOException
+   */
+  Path splitStoreFile(final HRegionInfo hri, final String familyName,
+      final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
+    Path splitDir = new Path(getSplitsDir(hri), familyName);
+    // A reference to the bottom half of the hsf store file.
+    Reference r =
+      top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
+    // Add the referred-to regions name as a dot separated suffix.
+    // See REF_NAME_REGEX regex above.  The referred-to regions name is
+    // up in the path of the passed in <code>f</code> -- parentdir is family,
+    // then the directory above is the region name.
+    String parentRegionName = regionInfo.getEncodedName();
+    // Write reference with same file id only with the other region name as
+    // suffix and into the new region location (under same family).
+    Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
+    return r.write(fs, p);
+  }
+
   // ===========================================================================
   //  Merge Helpers
   // ===========================================================================
   /** @return {@link Path} to the temp directory used during merge operations */
-  public Path getMergesDir() {
+  Path getMergesDir() {
     return new Path(getRegionDir(), REGION_MERGES_DIR);
   }
 
+  Path getMergesDir(final HRegionInfo hri) {
+    return new Path(getMergesDir(), hri.getEncodedName());
+  }
+
   /**
    * Clean up any merge detritus that may have been left around from previous merge attempts.
    */
-  public void cleanupMergesDir() throws IOException {
+  void cleanupMergesDir() throws IOException {
     FSUtils.deleteDirectory(fs, getMergesDir());
   }
 
+  /**
+   * Remove merged region
+   * @param mergedRegion {@link HRegionInfo}
+   * @throws IOException
+   */
+  void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
+    Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
+    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
+      throw new IOException("Failed delete of " + regionDir);
+    }
+  }
+
+  /**
+   * Create the region merges directory.
+   * @throws IOException If merges dir already exists or we fail to create it.
+   * @see HRegionFileSystem#cleanupMergesDir()
+   */
+  void createMergesDir() throws IOException {
+    Path mergesdir = getMergesDir();
+    if (fs.exists(mergesdir)) {
+      LOG.info("The " + mergesdir
+          + " directory exists.  Hence deleting it to recreate it");
+      if (!fs.delete(mergesdir, true)) {
+        throw new IOException("Failed deletion of " + mergesdir
+            + " before creating them again.");
+      }
+    }
+    if (!fs.mkdirs(mergesdir))
+      throw new IOException("Failed create of " + mergesdir);
+  }
+
+  /**
+   * Write out a merge reference under the given merges directory. Package local
+   * so it doesnt leak out of regionserver.
+   * @param mergedRegion {@link HRegionInfo} of the merged region
+   * @param familyName Column Family Name
+   * @param f File to create reference.
+   * @param mergedDir
+   * @return Path to created reference.
+   * @throws IOException
+   */
+  Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
+      final StoreFile f, final Path mergedDir)
+      throws IOException {
+    Path referenceDir = new Path(new Path(mergedDir,
+        mergedRegion.getEncodedName()), familyName);
+    // A whole reference to the store file.
+    Reference r = Reference.createTopReference(regionInfo.getStartKey());
+    // Add the referred-to regions name as a dot separated suffix.
+    // See REF_NAME_REGEX regex above. The referred-to regions name is
+    // up in the path of the passed in <code>f</code> -- parentdir is family,
+    // then the directory above is the region name.
+    String mergingRegionName = regionInfo.getEncodedName();
+    // Write reference with same file id only with the other region name as
+    // suffix and into the new region location (under same family).
+    Path p = new Path(referenceDir, f.getPath().getName() + "."
+        + mergingRegionName);
+    return r.write(fs, p);
+  }
+
+  /**
+   * Commit a merged region, moving it from the merges temporary directory to
+   * the proper location in the filesystem.
+   * @param mergedRegionInfo merged region {@link HRegionInfo}
+   * @throws IOException 
+   */
+  void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
+    Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
+    // Move the tmp dir in the expected location
+    if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
+      if (!fs.rename(mergedRegionTmpDir, regionDir)) {
+        throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
+            + regionDir);
+      }
+    }
+  }
+
   // ===========================================================================
   //  Create/Open/Delete Helpers
   // ===========================================================================
   /**
+   * Log the current state of the region
+   * @param LOG log to output information
+   * @throws IOException if an unexpected exception occurs
+   */
+  void logFileSystemState(final Log LOG) throws IOException {
+    FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
+  }
+
+  /**
    * @param hri
    * @return Content of the file we write out to the filesystem under a region
    * @throws IOException
@@ -517,10 +784,12 @@ public class HRegionFileSystem {
    * @param fs {@link FileSystem} from which to add the region
    * @param tableDir {@link Path} to where the table is being stored
    * @param regionInfo {@link HRegionInfo} for region to be added
+   * @param readOnly True if you don't want to edit the region data
    * @throws IOException if the region creation fails due to a FileSystem exception.
    */
   public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
+      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
+      throws IOException {
     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
     Path regionDir = regionFs.getRegionDir();
 
@@ -529,12 +798,16 @@ public class HRegionFileSystem {
       throw new IOException("The specified region do not exists on disk: " + regionDir);
     }
 
-    // Cleanup temporary directories
-    regionFs.cleanupTempDir();
-    regionFs.cleanupSplitsDir();
-    regionFs.cleanupMergesDir();
-    // if it doesn't exists, Write HRI to a file, in case we need to recover .META.
-    regionFs.checkRegionInfoOnFilesystem();
+    if (readOnly) {
+      // Cleanup temporary directories
+      regionFs.cleanupTempDir();
+      regionFs.cleanupSplitsDir();
+      regionFs.cleanupMergesDir();
+
+      // if it doesn't exists, Write HRI to a file, in case we need to recover .META.
+      regionFs.checkRegionInfoOnFilesystem();
+    }
+
     return regionFs;
   }
 

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java Mon Mar 25 11:15:36 2013
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.ListIterator;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionTransition;
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.catalog.M
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -75,7 +76,6 @@ import org.apache.zookeeper.KeeperExcept
 @InterfaceAudience.Private
 public class RegionMergeTransaction {
   private static final Log LOG = LogFactory.getLog(RegionMergeTransaction.class);
-  private static final String MERGEDIR = ".merges";
 
   // Merged region info
   private HRegionInfo mergedRegionInfo;
@@ -152,7 +152,7 @@ public class RegionMergeTransaction {
       this.region_b = a;
     }
     this.forcible = forcible;
-    this.mergesdir = getMergeDir(this.region_a);
+    this.mergesdir = region_a.getRegionFileSystem().getMergesDir();
   }
 
   /**
@@ -280,12 +280,12 @@ public class RegionMergeTransaction {
       }
     }
 
-    createMergeDir(this.region_a.getFilesystem(), this.mergesdir);
+    this.region_a.getRegionFileSystem().createMergesDir();
     this.journal.add(JournalEntry.CREATED_MERGE_DIR);
 
-    List<StoreFile> hstoreFilesOfRegionA = closeAndOfflineRegion(
+    Map<byte[], List<StoreFile>> hstoreFilesOfRegionA = closeAndOfflineRegion(
         services, this.region_a, true, testing);
-    List<StoreFile> hstoreFilesOfRegionB = closeAndOfflineRegion(
+    Map<byte[], List<StoreFile>> hstoreFilesOfRegionB = closeAndOfflineRegion(
         services, this.region_b, false, testing);
 
     assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null;
@@ -335,8 +335,7 @@ public class RegionMergeTransaction {
    */
   HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b,
       final HRegionInfo mergedRegion) throws IOException {
-    return a.createMergedRegionFromMerges(mergedRegion, b, new Path(
-        this.mergesdir, mergedRegion.getEncodedName()));
+    return a.createMergedRegionFromMerges(mergedRegion, b);
   }
 
   /**
@@ -345,13 +344,13 @@ public class RegionMergeTransaction {
    * @param region
    * @param isRegionA true if it is merging region a, false if it is region b
    * @param testing true if it is testing
-   * @return a list of store files
+   * @return a map of family name to list of store files
    * @throws IOException
    */
-  private List<StoreFile> closeAndOfflineRegion(
+  private Map<byte[], List<StoreFile>> closeAndOfflineRegion(
       final RegionServerServices services, final HRegion region,
       final boolean isRegionA, final boolean testing) throws IOException {
-    List<StoreFile> hstoreFilesToMerge = null;
+    Map<byte[], List<StoreFile>> hstoreFilesToMerge = null;
     Exception exceptionToThrow = null;
     try {
       hstoreFilesToMerge = region.close(false);
@@ -511,24 +510,29 @@ public class RegionMergeTransaction {
    * @param hstoreFilesOfRegionB
    * @throws IOException
    */
-  private void mergeStoreFiles(List<StoreFile> hstoreFilesOfRegionA,
-      List<StoreFile> hstoreFilesOfRegionB)
+  private void mergeStoreFiles(
+      Map<byte[], List<StoreFile>> hstoreFilesOfRegionA,
+      Map<byte[], List<StoreFile>> hstoreFilesOfRegionB)
       throws IOException {
     // Create reference file(s) of region A in mergdir
-    FileSystem fs = this.region_a.getFilesystem();
-    for (StoreFile storeFile : hstoreFilesOfRegionA) {
-      Path storedir = HStore.getStoreHomedir(this.mergesdir,
-          mergedRegionInfo.getEncodedName(), storeFile.getFamily());
-      StoreFile.split(fs, storedir, storeFile, this.region_a.getStartKey(),
-          true);
+    HRegionFileSystem fs_a = this.region_a.getRegionFileSystem();
+    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionA
+        .entrySet()) {
+      String familyName = Bytes.toString(entry.getKey());
+      for (StoreFile storeFile : entry.getValue()) {
+        fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
+            this.mergesdir);
+      }
     }
-
     // Create reference file(s) of region B in mergedir
-    for (StoreFile storeFile : hstoreFilesOfRegionB) {
-      Path storedir = HStore.getStoreHomedir(this.mergesdir,
-          mergedRegionInfo.getEncodedName(), storeFile.getFamily());
-      StoreFile.split(fs, storedir, storeFile, this.region_b.getStartKey(),
-          true);
+    HRegionFileSystem fs_b = this.region_b.getRegionFileSystem();
+    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionB
+        .entrySet()) {
+      String familyName = Bytes.toString(entry.getKey());
+      for (StoreFile storeFile : entry.getValue()) {
+        fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
+            this.mergesdir);
+      }
     }
   }
 
@@ -544,7 +548,6 @@ public class RegionMergeTransaction {
       final RegionServerServices services) throws IOException {
     assert this.mergedRegionInfo != null;
     boolean result = true;
-    FileSystem fs = this.region_a.getFilesystem();
     ListIterator<JournalEntry> iterator = this.journal
         .listIterator(this.journal.size());
     // Iterate in reverse.
@@ -561,7 +564,7 @@ public class RegionMergeTransaction {
         case CREATED_MERGE_DIR:
           this.region_a.writestate.writesEnabled = true;
           this.region_b.writestate.writesEnabled = true;
-          cleanupMergeDir(fs, this.mergesdir);
+          this.region_a.getRegionFileSystem().cleanupMergesDir();
           break;
 
         case CLOSED_REGION_A:
@@ -600,8 +603,8 @@ public class RegionMergeTransaction {
           break;
 
         case STARTED_MERGED_REGION_CREATION:
-          cleanupMergedRegion(fs, region_a.getTableDir(),
-              this.mergedRegionInfo.getEncodedName());
+          this.region_a.getRegionFileSystem().cleanupMergedRegion(
+              this.mergedRegionInfo);
           break;
 
         case PONR:
@@ -764,59 +767,5 @@ public class RegionMergeTransaction {
     }
     return false;
   }
-
-  static Path getMergeDir(final HRegion r) {
-    return new Path(r.getRegionDir(), MERGEDIR);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param mergedir Directory to store temporary merge data in
-   * @throws IOException If <code>mergedir</code> already exists or we fail to
-   *           create it.
-   * @see #cleanupMergeDir(FileSystem, Path)
-   */
-  private static void createMergeDir(final FileSystem fs, final Path mergedir)
-      throws IOException {
-    if (fs.exists(mergedir)) {
-      LOG.info("The " + mergedir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(mergedir, true)) {
-        throw new IOException("Failed deletion of " + mergedir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(mergedir))
-      throw new IOException("Failed create of " + mergedir);
-  }
-
-  static void cleanupMergeDir(final FileSystem fs, final Path mergedir)
-      throws IOException {
-    // Mergedir may have been cleaned up by reopen of the parent dir.
-    deleteDir(fs, mergedir, false);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param dir Directory to delete
-   * @param mustPreExist If true, we'll throw exception if <code>dir</code> does
-   *          not preexist, else we'll just pass.
-   * @throws IOException Thrown if we fail to delete passed <code>dir</code>
-   */
-  private static void deleteDir(final FileSystem fs, final Path dir,
-      final boolean mustPreExist) throws IOException {
-    if (!fs.exists(dir)) {
-      if (mustPreExist)
-        throw new IOException(dir.toString() + " does not exist!");
-    } else if (!fs.delete(dir, true)) {
-      throw new IOException("Failed delete of " + dir);
-    }
-  }
-
-  private static void cleanupMergedRegion(final FileSystem fs,
-      final Path tabledir, final String encodedName) throws IOException {
-    Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
-    // Dir may not preexist.
-    deleteDir(fs, regiondir, false);
-  }
 }
+

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java Mon Mar 25 11:15:36 2013
@@ -107,13 +107,12 @@ class SplitRequest implements Runnable {
         return;
       }
       LOG.info("Region split, META updated, and report to master. Parent="
-          + parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
+          + parent.getRegionNameAsString() + ", new regions: "
           + st.getFirstDaughter().getRegionNameAsString() + ", "
           + st.getSecondDaughter().getRegionNameAsString() + ". Split took "
           + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
     } catch (IOException ex) {
-      LOG.error("Split failed " + this, RemoteExceptionHandler
-          .checkIOException(ex));
+      LOG.error("Split failed " + this, RemoteExceptionHandler.checkIOException(ex));
       server.checkFileSystem();
     } finally {
       if (this.parent.getCoprocessorHost() != null) {

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Mon Mar 25 11:15:36 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.ListIterator;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
@@ -34,9 +35,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
@@ -46,7 +44,6 @@ import org.apache.hadoop.hbase.executor.
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -90,7 +87,6 @@ public class SplitTransaction {
   private final HRegion parent;
   private HRegionInfo hri_a;
   private HRegionInfo hri_b;
-  private Path splitdir;
   private long fileSplitTimeout = 30000;
   private int znodeVersion = -1;
 
@@ -150,7 +146,6 @@ public class SplitTransaction {
   public SplitTransaction(final HRegion r, final byte [] splitrow) {
     this.parent = r;
     this.splitrow = splitrow;
-    this.splitdir = getSplitDir(this.parent);
   }
 
   /**
@@ -174,10 +169,8 @@ public class SplitTransaction {
       return false;
     }
     long rid = getDaughterRegionIdTimestamp(hri);
-    this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
-      false, rid);
-    this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
-      false, rid);
+    this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid);
+    this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid);
     return true;
   }
 
@@ -206,7 +199,8 @@ public class SplitTransaction {
    * @param server Hosting server instance.  Can be null when testing (won't try
    * and update in zk if a null server)
    * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *    Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    */
   /* package */PairOfSameType<HRegion> createDaughters(final Server server,
@@ -216,7 +210,8 @@ public class SplitTransaction {
         (services != null && services.isStopping())) {
       throw new IOException("Server is stopped or stopping");
     }
-    assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
+    assert !this.parent.lock.writeLock().isHeldByCurrentThread():
+      "Unsafe to hold write lock while performing RPCs";
 
     // Coprocessor callback
     if (this.parent.getCoprocessorHost() != null) {
@@ -253,7 +248,8 @@ public class SplitTransaction {
         // Master will get the callback for node change only if the transition is successful.
         // Note that if the transition fails then the rollback will delete the created znode
         // as the journal entry SET_SPLITTING_IN_ZK is added.
-        // TODO : May be we can add some new state to znode and handle the new state incase of success/failure
+        // TODO : May be we can add some new state to znode and handle the new state incase
+        //        of success/failure
         this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
             this.parent.getRegionInfo(), server.getServerName(), -1);
       } catch (KeeperException e) {
@@ -262,10 +258,10 @@ public class SplitTransaction {
       }
     }
 
-    createSplitDir(this.parent.getFilesystem(), this.splitdir);
+    this.parent.getRegionFileSystem().createSplitsDir();
     this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
 
-    List<StoreFile> hstoreFilesToSplit = null;
+    Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
     Exception exceptionToThrow = null;
     try{
       hstoreFilesToSplit = this.parent.close(false);
@@ -298,18 +294,18 @@ public class SplitTransaction {
     // splitStoreFiles creates daughter region dirs under the parent splits dir
     // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
     // clean this up.
-    splitStoreFiles(this.splitdir, hstoreFilesToSplit);
+    splitStoreFiles(hstoreFilesToSplit);
 
     // Log to the journal that we are creating region A, the first daughter
     // region.  We could fail halfway through.  If we do, we could have left
     // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
     // add entry to journal BEFORE rather than AFTER the change.
     this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
-    HRegion a = createDaughterRegion(this.hri_a);
+    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
 
     // Ditto
     this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
-    HRegion b = createDaughterRegion(this.hri_b);
+    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
 
     // This is the point of no return.  Adding subsequent edits to .META. as we
     // do below when we do the daughter opens adding each to .META. can fail in
@@ -347,7 +343,8 @@ public class SplitTransaction {
    * @param services Used to online/offline regions.
    * @param a first daughter region
    * @param a second daughter region
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void openDaughters(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
@@ -404,7 +401,8 @@ public class SplitTransaction {
    * @param services Used to online/offline regions.
    * @param a first daughter region
    * @param a second daughter region
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void transitionZKNode(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
@@ -456,7 +454,8 @@ public class SplitTransaction {
    * @param server Hosting server instance.  Can be null when testing (won't try
    * and update in zk if a null server)
    * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    * @throws IOException
    * @see #rollback(Server, RegionServerServices)
@@ -542,56 +541,8 @@ public class SplitTransaction {
     }
   }
 
-  private static Path getSplitDir(final HRegion r) {
-    return new Path(r.getRegionDir(), HRegionFileSystem.REGION_SPLITS_DIR);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param splitdir Directory to store temporary split data in
-   * @throws IOException If <code>splitdir</code> already exists or we fail
-   * to create it.
-   * @see #cleanupSplitDir(FileSystem, Path)
-   */
-  private static void createSplitDir(final FileSystem fs, final Path splitdir)
-  throws IOException {
-    if (fs.exists(splitdir)) {
-      LOG.info("The " + splitdir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(splitdir, true)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir);
-  }
-
-  private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
-  throws IOException {
-    // Splitdir may have been cleaned up by reopen of the parent dir.
-    deleteDir(fs, splitdir, false);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param dir Directory to delete
-   * @param mustPreExist If true, we'll throw exception if <code>dir</code>
-   * does not preexist, else we'll just pass.
-   * @throws IOException Thrown if we fail to delete passed <code>dir</code>
-   */
-  private static void deleteDir(final FileSystem fs, final Path dir,
-      final boolean mustPreExist)
-  throws IOException {
-    if (!fs.exists(dir)) {
-      if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
-    } else if (!fs.delete(dir, true)) {
-      throw new IOException("Failed delete of " + dir);
-    }
-  }
-
-  private void splitStoreFiles(final Path splitdir,
-    final List<StoreFile> hstoreFilesToSplit)
-  throws IOException {
+  private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
+      throws IOException {
     if (hstoreFilesToSplit == null) {
       // Could be null because close didn't succeed -- for now consider it fatal
       throw new IOException("Close returned empty list of StoreFiles");
@@ -611,11 +562,12 @@ public class SplitTransaction {
       (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
     List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
 
-     // Split each store file.
-    for (StoreFile sf: hstoreFilesToSplit) {
-      //splitStoreFile(sf, splitdir);
-      StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
-      futures.add(threadPool.submit(sfs));
+    // Split each store file.
+    for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
+      for (StoreFile sf: entry.getValue()) {
+        StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
+        futures.add(threadPool.submit(sfs));
+      }
     }
     // Shutdown the pool
     threadPool.shutdown();
@@ -652,14 +604,11 @@ public class SplitTransaction {
     }
   }
 
-  private void splitStoreFile(final StoreFile sf, final Path splitdir)
-  throws IOException {
-    FileSystem fs = this.parent.getFilesystem();
-    byte [] family = sf.getFamily();
-    Path storedir = HStore.getStoreHomedir(splitdir, this.hri_a, family);
-    StoreFile.split(fs, storedir, sf, this.splitrow, false);
-    storedir = HStore.getStoreHomedir(splitdir, this.hri_b, family);
-    StoreFile.split(fs, storedir, sf, this.splitrow, true);
+  private void splitStoreFile(final byte[] family, final StoreFile sf) throws IOException {
+    HRegionFileSystem fs = this.parent.getRegionFileSystem();
+    String familyName = Bytes.toString(family);
+    fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false);
+    fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true);
   }
 
   /**
@@ -667,61 +616,26 @@ public class SplitTransaction {
    * in parallel instead of sequentially.
    */
   class StoreFileSplitter implements Callable<Void> {
-
+    private final byte[] family;
     private final StoreFile sf;
-    private final Path splitdir;
 
     /**
      * Constructor that takes what it needs to split
+     * @param family Family that contains the store file
      * @param sf which file
-     * @param splitdir where the splitting is done
      */
-    public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
+    public StoreFileSplitter(final byte[] family, final StoreFile sf) {
       this.sf = sf;
-      this.splitdir = splitdir;
+      this.family = family;
     }
 
     public Void call() throws IOException {
-      splitStoreFile(sf, splitdir);
+      splitStoreFile(family, sf);
       return null;
     }
   }
 
   /**
-   * @param hri Spec. for daughter region to open.
-   * @param rsServices RegionServerServices this region should use.
-   * @return Created daughter HRegion.
-   * @throws IOException
-   * @see #cleanupDaughterRegion(FileSystem, Path, String)
-   */
-  HRegion createDaughterRegion(final HRegionInfo hri) throws IOException {
-    // Package private so unit tests have access.
-    Path regionDir = getSplitDirForDaughter(this.splitdir, hri);
-    return this.parent.createDaughterRegion(hri, regionDir);
-  }
-
-  private static void cleanupDaughterRegion(final FileSystem fs,
-    final Path tabledir, final String encodedName)
-  throws IOException {
-    Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
-    // Dir may not preexist.
-    deleteDir(fs, regiondir, false);
-  }
-
-  /*
-   * Get the daughter directories in the splits dir.  The splits dir is under
-   * the parent regions' directory.
-   * @param splitdir
-   * @param hri
-   * @return Path to daughter split dir.
-   * @throws IOException
-   */
-  private static Path getSplitDirForDaughter(final Path splitdir, final HRegionInfo hri)
-      throws IOException {
-    return new Path(splitdir, hri.getEncodedName());
-  }
-
-  /**
    * @param server Hosting server instance (May be null when testing).
    * @param services
    * @throws IOException If thrown, rollback failed.  Take drastic action.
@@ -736,7 +650,6 @@ public class SplitTransaction {
     }
 
     boolean result = true;
-    FileSystem fs = this.parent.getFilesystem();
     ListIterator<JournalEntry> iterator =
       this.journal.listIterator(this.journal.size());
     // Iterate in reverse.
@@ -751,8 +664,8 @@ public class SplitTransaction {
         break;
 
       case CREATE_SPLIT_DIR:
-    	this.parent.writestate.writesEnabled = true;
-        cleanupSplitDir(fs, this.splitdir);
+        this.parent.writestate.writesEnabled = true;
+        this.parent.getRegionFileSystem().cleanupSplitsDir();
         break;
 
       case CLOSED_PARENT_REGION:
@@ -771,13 +684,11 @@ public class SplitTransaction {
         break;
 
       case STARTED_REGION_A_CREATION:
-        cleanupDaughterRegion(fs, this.parent.getTableDir(),
-          this.hri_a.getEncodedName());
+        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
         break;
 
       case STARTED_REGION_B_CREATION:
-        cleanupDaughterRegion(fs, this.parent.getTableDir(),
-          this.hri_b.getEncodedName());
+        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
         break;
 
       case OFFLINED_PARENT:
@@ -810,39 +721,6 @@ public class SplitTransaction {
     return hri_b;
   }
 
-  // For unit testing.
-  Path getSplitDir() {
-    return this.splitdir;
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous
-   * split attempts.
-   * Call this method on initial region deploy.  Cleans up any mess
-   * left by previous deploys of passed <code>r</code> region.
-   * @param r
-   * @throws IOException
-   */
-  static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
-    Path splitdir = getSplitDir(r);
-    FileSystem fs = r.getFilesystem();
-    if (!fs.exists(splitdir)) return;
-    // Look at the splitdir.  It could have the encoded names of the daughter
-    // regions we tried to make.  See if the daughter regions actually got made
-    // out under the tabledir.  If here under splitdir still, then the split did
-    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
-    // where we successfully created daughter a but regionserver crashed during
-    // the creation of region b.  In this case, there'll be an orphan daughter
-    // dir in the filesystem.  TOOD: Fix.
-    FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
-    for (int i = 0; i < daughters.length; i++) {
-      cleanupDaughterRegion(fs, r.getTableDir(),
-        daughters[i].getPath().getName());
-    }
-    cleanupSplitDir(r.getFilesystem(), splitdir);
-    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
-  }
-
   private static void cleanZK(final Server server, final HRegionInfo hri) {
     try {
       // Only delete if its in expected state; could have been hijacked.

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Mon Mar 25 11:15:36 2013
@@ -36,7 +36,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.HDFSBlock
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
@@ -245,13 +243,6 @@ public class StoreFile {
   }
 
   /**
-   * @return The Store/ColumnFamily this file belongs to.
-   */
-  byte [] getFamily() {
-    return Bytes.toBytes(this.getPath().getParent().getName());
-  }
-
-  /**
    * @return True if this is a StoreFile Reference; call after {@link #open()}
    * else may get wrong answer.
    */
@@ -521,28 +512,6 @@ public class StoreFile {
     return sb.toString();
   }
 
-  /**
-   * Utility to help with rename.
-   * @param fs
-   * @param src
-   * @param tgt
-   * @return True if succeeded.
-   * @throws IOException
-   */
-  public static Path rename(final FileSystem fs,
-                            final Path src,
-                            final Path tgt)
-      throws IOException {
-
-    if (!fs.exists(src)) {
-      throw new FileNotFoundException(src.toString());
-    }
-    if (!fs.rename(src, tgt)) {
-      throw new IOException("Failed rename of " + src + " to " + tgt);
-    }
-    return tgt;
-  }
-
   public static class WriterBuilder {
     private final Configuration conf;
     private final CacheConfig cacheConf;
@@ -706,38 +675,6 @@ public class StoreFile {
     return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
   }
 
-  /**
-   * Write out a split reference. Package local so it doesnt leak out of
-   * regionserver.
-   * @param fs
-   * @param splitDir Presumes path format is actually
-   *          <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
-   * @param f File to split.
-   * @param splitRow
-   * @param top True if we are referring to the top half of the hfile.
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  static Path split(final FileSystem fs,
-                    final Path splitDir,
-                    final StoreFile f,
-                    final byte [] splitRow,
-                    final boolean top)
-      throws IOException {
-    // A reference to the bottom half of the hsf store file.
-    Reference r =
-      top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above.  The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String parentRegionName = f.getPath().getParent().getParent().getName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
-    return r.write(fs, p);
-  }
-
   public Long getMinimumTimestamp() {
     return (getReader().timeRangeTracker == null) ?
         null :

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java Mon Mar 25 11:15:36 2013
@@ -330,7 +330,7 @@ public class RestoreSnapshotHelper {
           Path hfile = new Path(familyDir, hfileName);
           LOG.trace("Removing hfile=" + hfile +
             " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
-          HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
+          HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
         }
       } else {
         // Family doesn't exists in the snapshot

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Mon Mar 25 11:15:36 2013
@@ -555,7 +555,7 @@ public class HBaseFsck extends Configure
     HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
     LOG.info("Creating new region : " + hri);
     HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
-    Path target = region.getRegionDir();
+    Path target = region.getRegionFileSystem().getRegionDir();
 
     // rename all the data to new region
     mergeRegionDirs(target, hi);
@@ -2182,11 +2182,11 @@ public class HBaseFsck extends Configure
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Created new empty container region: " +
             newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
-        debugLsr(region.getRegionDir());
+        debugLsr(region.getRegionFileSystem().getRegionDir());
 
         // all target regions are closed, should be able to safely cleanup.
         boolean didFix= false;
-        Path target = region.getRegionDir();
+        Path target = region.getRegionFileSystem().getRegionDir();
         for (HbckInfo contained : overlap) {
           LOG.info("Merging " + contained  + " into " + target );
           int merges = mergeRegionDirs(target, contained);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1460617&r1=1460616&r2=1460617&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Mon Mar 25 11:15:36 2013
@@ -54,21 +54,6 @@ public class HFileArchiveUtil {
 
   /**
    * Get the directory to archive a store directory
-   * @param conf {@link Configuration} to read for the archive directory name
-   * @param tableName table name under which the store currently lives
-   * @param region parent region information under which the store currently lives
-   * @param familyName name of the family in the store
-   * @return {@link Path} to the directory to archive the given store or
-   *         <tt>null</tt> if it should not be archived
-   */
-  public static Path getStoreArchivePath(final Configuration conf, final String tableName,
-      final HRegionInfo region, final String familyName) throws IOException {
-    Path tableArchiveDir = getTableArchivePath(conf, tableName);
-    return HStore.getStoreHomedir(tableArchiveDir, region, Bytes.toBytes(familyName));
-  }
-
-  /**
-   * Get the directory to archive a store directory
    * @param conf {@link Configuration} to read for the archive directory name. Can be null.
    * @param region parent region information under which the store currently lives
    * @param tabledir directory for the table under which the store currently lives
@@ -79,19 +64,17 @@ public class HFileArchiveUtil {
   public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
       byte[] family) {
     Path tableArchiveDir = getTableArchivePath(tabledir);
-    return HStore.getStoreHomedir(tableArchiveDir,
-      HRegionInfo.encodeRegionName(region.getRegionName()), family);
+    return HStore.getStoreHomedir(tableArchiveDir, region, family);
   }
 
   /**
    * Get the archive directory for a given region under the specified table
-   * @param conf {@link Configuration} to read the archive directory from. Can be null
    * @param tabledir the original table directory. Cannot be null.
    * @param regiondir the path to the region directory. Cannot be null.
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Configuration conf, Path tabledir, Path regiondir) {
+  public static Path getRegionArchiveDir(Path tabledir, Path regiondir) {
     // get the archive directory for a table
     Path archiveDir = getTableArchivePath(tabledir);
 



Mime
View raw message