hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mberto...@apache.org
Subject svn commit: r1457148 [1/2] - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/backup/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/snapshot/ main/java/org...
Date Fri, 15 Mar 2013 22:19:35 GMT
Author: mbertozzi
Date: Fri Mar 15 22:19:34 2013
New Revision: 1457148

URL: http://svn.apache.org/r1457148
Log:
HBASE-7809 Refactor Split/Merge to use HRegionFileSystem

Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java Fri Mar 15 22:19:34 2013
@@ -266,35 +266,6 @@ public class HFileArchiver {
   }
 
   /**
-   * Archive the store file
-   * @param fs the filesystem where the store files live
-   * @param regionInfo region hosting the store files
-   * @param conf {@link Configuration} to examine to determine the archive directory
-   * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
-   * @param family the family hosting the store files
-   * @param storeFile file to be archived
-   * @throws IOException if the files could not be correctly disposed.
-   */
-  public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
-      Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
-    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
-    // make sure we don't archive if we can't and that the archive dir exists
-    if (!fs.mkdirs(storeArchiveDir)) {
-      throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
-          + Bytes.toString(family) + ", deleting compacted files instead.");
-    }
-
-    // do the actual archive
-    long start = EnvironmentEdgeManager.currentTimeMillis();
-    File file = new FileablePath(fs, storeFile);
-    if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
-      throw new IOException("Failed to archive/delete the file for region:"
-          + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
-          + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
-    }
-  }
-
-  /**
    * Archive the given files and resolve any conflicts with existing files via appending the time
    * archiving started (so all conflicts in the same group have the same timestamp appended).
    * <p>

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Fri Mar 15 22:19:34 2013
@@ -30,10 +30,8 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -43,10 +41,8 @@ import org.apache.hadoop.hbase.backup.HF
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 
@@ -226,10 +222,8 @@ class CatalogJanitor extends Chore {
     boolean result = false;
     // Run checks on each daughter split.
     PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
-    Pair<Boolean, Boolean> a =
-      checkDaughterInFs(parent, daughters.getFirst());
-    Pair<Boolean, Boolean> b =
-      checkDaughterInFs(parent, daughters.getSecond());
+    Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
+    Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
     if (hasNoReferences(a) && hasNoReferences(b)) {
       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
         " because daughter splits no longer hold references");
@@ -284,45 +278,35 @@ class CatalogJanitor extends Chore {
    */
   Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
   throws IOException {
-    boolean references = false;
-    boolean exists = false;
     if (daughter == null)  {
       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
+
     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
     Path rootdir = this.services.getMasterFileSystem().getRootDir();
-    Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
-    Path regiondir = new Path(tabledir, daughter.getEncodedName());
-    exists = fs.exists(regiondir);
-    if (!exists) {
-      LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
-      return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
+    Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName());
+
+    HRegionFileSystem regionFs = null;
+    try {
+      regionFs = HRegionFileSystem.openRegionFromFileSystem(
+          this.services.getConfiguration(), fs, tabledir, daughter, true);
+    } catch (IOException e) {
+      LOG.warn("Daughter region does not exist: " + daughter.getEncodedName());
+      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
-    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
 
+    boolean references = false;
+    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString());
     for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
-      Path p = HStore.getStoreHomedir(tabledir, daughter, family.getName());
-      if (!fs.exists(p)) continue;
-      // Look for reference files.  Call listStatus with anonymous instance of PathFilter.
-      FileStatus [] ps = FSUtils.listStatus(fs, p,
-          new PathFilter () {
-            public boolean accept(Path path) {
-              return StoreFileInfo.isReference(path);
-            }
-          }
-      );
-
-      if (ps != null && ps.length > 0) {
-        references = true;
+      if ((references = regionFs.hasReferences(family.getNameAsString()))) {
         break;
       }
     }
-    return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
-      Boolean.valueOf(references));
+    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
   }
 
-  private HTableDescriptor getTableDescriptor(byte[] tableName)
-  throws FileNotFoundException, IOException {
-    return this.services.getTableDescriptors().get(Bytes.toString(tableName));
+  private HTableDescriptor getTableDescriptor(final String tableName)
+      throws FileNotFoundException, IOException {
+    return this.services.getTableDescriptors().get(tableName);
   }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri Mar 15 22:19:34 2013
@@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -216,12 +215,6 @@ public class HRegion implements HeapSize
   // TODO: account for each registered handler in HeapSize computation
   private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
 
-  //These variable are just used for getting data out of the region, to test on
-  //client side
-  // private int numStores = 0;
-  // private int [] storeSize = null;
-  // private byte [] name = null;
-
   public final AtomicLong memstoreSize = new AtomicLong(0);
 
   // Debug possible data loss due to WAL off
@@ -574,7 +567,7 @@ public class HRegion implements HeapSize
     // Get rid of any splits or merges that were lost in-progress.  Clean out
     // these directories here on open.  We may be opening a region that was
     // being split but we crashed in the middle of it all.
-    SplitTransaction.cleanupAnySplitDetritus(this);
+    fs.cleanupAnySplitDetritus();
     fs.cleanupMergesDir();
 
     this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
@@ -664,7 +657,7 @@ public class HRegion implements HeapSize
     mvcc.initialize(maxMemstoreTS + 1);
     // Recover any edits if available.
     maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
-        this.getRegionDir(), maxSeqIdInStores, reporter, status));
+        this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
     return maxSeqId;
   }
 
@@ -817,7 +810,7 @@ public class HRegion implements HeapSize
    *
    * @throws IOException e
    */
-  public List<StoreFile> close() throws IOException {
+  public Map<byte[], List<StoreFile>> close() throws IOException {
     return close(false);
   }
 
@@ -837,7 +830,7 @@ public class HRegion implements HeapSize
    *
    * @throws IOException e
    */
-  public List<StoreFile> close(final boolean abort) throws IOException {
+  public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
     // Only allow one thread to close at a time. Serialize them so dual
     // threads attempting to close will run up against each other.
     MonitoredTask status = TaskMonitor.get().createStatus(
@@ -854,9 +847,8 @@ public class HRegion implements HeapSize
     }
   }
 
-  private List<StoreFile> doClose(
-      final boolean abort, MonitoredTask status)
-  throws IOException {
+  private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status)
+      throws IOException {
     if (isClosed()) {
       LOG.warn("Region " + this + " already closed");
       return null;
@@ -902,28 +894,35 @@ public class HRegion implements HeapSize
         internalFlushcache(status);
       }
 
-      List<StoreFile> result = new ArrayList<StoreFile>();
+      Map<byte[], List<StoreFile>> result =
+        new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
       if (!stores.isEmpty()) {
         // initialize the thread pool for closing stores in parallel.
         ThreadPoolExecutor storeCloserThreadPool =
           getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString());
-        CompletionService<Collection<StoreFile>> completionService =
-          new ExecutorCompletionService<Collection<StoreFile>>(storeCloserThreadPool);
+        CompletionService<Pair<byte[], Collection<StoreFile>>> completionService =
+          new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(storeCloserThreadPool);
 
         // close each store in parallel
         for (final Store store : stores.values()) {
           completionService
-              .submit(new Callable<Collection<StoreFile>>() {
-                public Collection<StoreFile> call() throws IOException {
-                  return store.close();
+              .submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
+                public Pair<byte[], Collection<StoreFile>> call() throws IOException {
+                  return new Pair<byte[], Collection<StoreFile>>(
+                    store.getFamily().getName(), store.close());
                 }
               });
         }
         try {
           for (int i = 0; i < stores.size(); i++) {
-            Future<Collection<StoreFile>> future = completionService.take();
-            Collection<StoreFile> storeFileList = future.get();
-            result.addAll(storeFileList);
+            Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
+            Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
+            List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
+            if (familyFiles == null) {
+              familyFiles = new ArrayList<StoreFile>();
+              result.put(storeFiles.getFirst(), familyFiles);
+            }
+            familyFiles.addAll(storeFiles.getSecond());
           }
         } catch (InterruptedException e) {
           throw new IOException(e);
@@ -1062,11 +1061,6 @@ public class HRegion implements HeapSize
     return this.baseConf;
   }
 
-  /** @return region directory Path */
-  public Path getRegionDir() {
-    return fs.getRegionDir();
-  }
-
   /** @return {@link FileSystem} being used by this region */
   public FileSystem getFilesystem() {
     return fs.getFileSystem();
@@ -2348,7 +2342,7 @@ public class HRegion implements HeapSize
     // 1. dump region meta info into the snapshot directory
     LOG.debug("Storing region-info for snapshot.");
     HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        getFilesystem(), snapshotDir, getRegionInfo());
+        this.fs.getFileSystem(), snapshotDir, getRegionInfo());
 
     // 2. iterate through all the stores in the region
     LOG.debug("Creating references for hfiles");
@@ -3290,11 +3284,6 @@ public class HRegion implements HeapSize
     return this.getRegionNameAsString();
   }
 
-  /** @return Path of region base directory */
-  public Path getTableDir() {
-    return this.fs.getTableDir();
-  }
-
   /**
    * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families).
    */
@@ -4022,7 +4011,8 @@ public class HRegion implements HeapSize
    */
   public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
       throws IOException {
-    HRegion r = newHRegion(other.getTableDir(), other.getLog(), other.getFilesystem(),
+    HRegionFileSystem regionFs = other.getRegionFileSystem();
+    HRegion r = newHRegion(regionFs.getTableDir(), other.getLog(), regionFs.getFileSystem(),
         other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
     return r.openHRegion(reporter);
   }
@@ -4056,22 +4046,14 @@ public class HRegion implements HeapSize
   /**
    * Create a daughter region from given a temp directory with the region data.
    * @param hri Spec. for daughter region to open.
-   * @param daughterTmpDir Directory that contains region files.
    * @throws IOException
    */
-  HRegion createDaughterRegion(final HRegionInfo hri, final Path daughterTmpDir)
-      throws IOException {
-    FileSystem fs = this.fs.getFileSystem();
-    HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
+  HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException {
+    HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(), fs.getFileSystem(),
         this.getBaseConf(), hri, this.getTableDesc(), rsServices);
     r.readRequestsCount.set(this.getReadRequestsCount() / 2);
     r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
-    // Move the tmp dir in the expected location
-    if (daughterTmpDir != null && fs.exists(daughterTmpDir)) {
-      if (!fs.rename(daughterTmpDir, r.getRegionDir())) {
-        LOG.warn("Unable to rename " + daughterTmpDir + " to " + r.getRegionDir());
-      }
-    }
+    fs.commitDaughterRegion(hri);
     return r;
   }
 
@@ -4086,8 +4068,7 @@ public class HRegion implements HeapSize
    * @throws IOException
    */
   // TODO remove since only test and merge use this
-  public static void addRegionToMETA(HRegion meta, HRegion r)
-  throws IOException {
+  public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException {
     meta.checkResources();
     // The row key is the region name
     byte[] row = r.getRegionName();
@@ -4145,24 +4126,6 @@ public class HRegion implements HeapSize
   }
 
   /**
-   * Make the directories for a specific column family
-   *
-   * @param fs the file system
-   * @param tabledir base directory where region will live (usually the table dir)
-   * @param hri
-   * @param colFamily the column family
-   * @throws IOException
-   */
-  private static Path makeColumnFamilyDirs(FileSystem fs, Path tabledir,
-    final HRegionInfo hri, byte [] colFamily) throws IOException {
-    Path dir = HStore.getStoreHomedir(tabledir, hri, colFamily);
-    if (!fs.mkdirs(dir)) {
-      LOG.warn("Failed to create " + dir);
-    }
-    return dir;
-  }
-
-  /**
    * Merge two HRegions.  The regions must be adjacent and must not overlap.
    *
    * @param srcA
@@ -4202,37 +4165,35 @@ public class HRegion implements HeapSize
    * @return new merged region
    * @throws IOException
    */
-  public static HRegion merge(HRegion a, HRegion b)
-  throws IOException {
+  public static HRegion merge(final HRegion a, final HRegion b) throws IOException {
     if (!a.getRegionInfo().getTableNameAsString().equals(
         b.getRegionInfo().getTableNameAsString())) {
       throw new IOException("Regions do not belong to the same table");
     }
 
-    FileSystem fs = a.getFilesystem();
+    FileSystem fs = a.getRegionFileSystem().getFileSystem();
 
     // Make sure each region's cache is empty
-
     a.flushcache();
     b.flushcache();
 
     // Compact each region so we only have one store file per family
-
     a.compactStores(true);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for region: " + a);
-      FSUtils.logFileSystemState(fs, a.getRegionDir(), LOG);
+      a.getRegionFileSystem().logFileSystemState(LOG);
     }
     b.compactStores(true);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for region: " + b);
-      FSUtils.logFileSystemState(fs, b.getRegionDir(), LOG);
+      b.getRegionFileSystem().logFileSystemState(LOG);
     }
 
     Configuration conf = a.baseConf;
     HTableDescriptor tabledesc = a.getTableDesc();
     HLog log = a.getLog();
-    Path tableDir = a.getTableDir();
+    Path tableDir = a.getRegionFileSystem().getTableDir();
+
     // Presume both are of same region type -- i.e. both user or catalog
     // table regions.  This way can use comparator.
     final byte[] startKey =
@@ -4258,43 +4219,34 @@ public class HRegion implements HeapSize
          ? b.getEndKey()
          : a.getEndKey());
 
-    HRegionInfo newRegionInfo =
-        new HRegionInfo(tabledesc.getName(), startKey, endKey);
-    LOG.info("Creating new region " + newRegionInfo.toString());
-    String encodedName = newRegionInfo.getEncodedName();
-    Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
-    if(fs.exists(newRegionDir)) {
-      throw new IOException("Cannot merge; target file collision at " +
-          newRegionDir);
-    }
-    fs.mkdirs(newRegionDir);
+    HRegionInfo newRegionInfo = new HRegionInfo(tabledesc.getName(), startKey, endKey);
+
+    LOG.info("Creating new region " + newRegionInfo);
+    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
+        conf, fs, tableDir, newRegionInfo);
 
     LOG.info("starting merge of regions: " + a + " and " + b +
       " into new region " + newRegionInfo.toString() +
         " with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
         Bytes.toStringBinary(endKey) + ">");
 
-    // Move HStoreFiles under new region directory
-    Map<byte [], List<StoreFile>> byFamily =
-      new TreeMap<byte [], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
-    byFamily = filesByFamily(byFamily, a.close());
-    byFamily = filesByFamily(byFamily, b.close());
-    for (Map.Entry<byte [], List<StoreFile>> es : byFamily.entrySet()) {
-      byte [] colFamily = es.getKey();
-      Path storeDir = makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
-      // Because we compacted the source regions we should have no more than two
-      // HStoreFiles per family and there will be no reference store
-      List<StoreFile> srcFiles = es.getValue();
-      for (StoreFile hsf: srcFiles) {
-        StoreFile.rename(fs, hsf.getPath(), StoreFile.getUniqueFile(fs, storeDir));
-      }
-    }
+    // Because we compacted the source regions we should have no more than two
+    // StoreFiles per family and there will be no reference store
+    Map<byte[], List<StoreFile>> aStoreFiles = a.close();
+    Map<byte[], List<StoreFile>> bStoreFiles = b.close();
+
+    // Move StoreFiles under new region directory
+    regionFs.commitStoreFiles(aStoreFiles);
+    regionFs.commitStoreFiles(bStoreFiles);
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for new region");
-      FSUtils.logFileSystemState(fs, newRegionDir, LOG);
+      regionFs.logFileSystemState(LOG);
     }
+
+    // Create HRegion and update the metrics
     HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
-        newRegionInfo, a.getTableDesc(), null);
+        newRegionInfo, tabledesc, null);
     dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
     dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
     dstRegion.checkAndMutateChecksFailed.set(
@@ -4303,44 +4255,23 @@ public class HRegion implements HeapSize
       a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
     dstRegion.initialize();
     dstRegion.compactStores();
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Files for new region");
-      FSUtils.logFileSystemState(fs, dstRegion.getRegionDir(), LOG);
+      dstRegion.getRegionFileSystem().logFileSystemState(LOG);
     }
 
     // delete out the 'A' region
-    HFileArchiver.archiveRegion(fs,
-      FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
+    HRegionFileSystem.deleteRegionFromFileSystem(
+      a.getBaseConf(), fs, tableDir, a.getRegionInfo());
     // delete out the 'B' region
-    HFileArchiver.archiveRegion(fs,
-      FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
+    HRegionFileSystem.deleteRegionFromFileSystem(
+      b.getBaseConf(), fs, tableDir, b.getRegionInfo());
 
     LOG.info("merge completed. New region is " + dstRegion);
-
     return dstRegion;
   }
 
-  /*
-   * Fills a map with a vector of store files keyed by column family.
-   * @param byFamily Map to fill.
-   * @param storeFiles Store files to process.
-   * @param family
-   * @return Returns <code>byFamily</code>
-   */
-  private static Map<byte [], List<StoreFile>> filesByFamily(
-      Map<byte [], List<StoreFile>> byFamily, List<StoreFile> storeFiles) {
-    for (StoreFile src: storeFiles) {
-      byte [] family = src.getFamily();
-      List<StoreFile> v = byFamily.get(family);
-      if (v == null) {
-        v = new ArrayList<StoreFile>();
-        byFamily.put(family, v);
-      }
-      v.add(src);
-    }
-    return byFamily;
-  }
-
   /**
    * @return True if needs a major compaction.
    * @throws IOException

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java Fri Mar 15 22:19:34 2013
@@ -24,6 +24,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 
 import org.apache.commons.logging.Log;
@@ -36,11 +38,13 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -108,14 +112,14 @@ public class HRegionFileSystem {
   //  Temp Helpers
   // ===========================================================================
   /** @return {@link Path} to the region's temp directory, used for file creations */
-  public Path getTempDir() {
+  Path getTempDir() {
     return new Path(getRegionDir(), REGION_TEMP_DIR);
   }
 
   /**
    * Clean up any temp detritus that may have been left around from previous operation attempts.
    */
-  public void cleanupTempDir() throws IOException {
+  void cleanupTempDir() throws IOException {
     FSUtils.deleteDirectory(fs, getTempDir());
   }
 
@@ -137,7 +141,7 @@ public class HRegionFileSystem {
    * @return {@link Path} to the directory of the specified family
    * @throws IOException if the directory creation fails.
    */
-  public Path createStoreDir(final String familyName) throws IOException {
+  Path createStoreDir(final String familyName) throws IOException {
     Path storeDir = getStoreDir(familyName);
     if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
       throw new IOException("Failed create of: " + storeDir);
@@ -176,7 +180,25 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Returns true if the specified family has reference files
+   * @param familyName Column Family Name
+   * @return true if family contains reference files
+   * @throws IOException
+   */
+  public boolean hasReferences(final String familyName) throws IOException {
+    FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
+      new PathFilter () {
+        public boolean accept(Path path) {
+          return StoreFileInfo.isReference(path);
+        }
+      }
+    );
+    return files != null && files.length > 0;
+  }
+
+  /**
    * @return the set of families present on disk
+   * @throws IOException
    */
   public Collection<String> getFamilies() throws IOException {
     FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
@@ -191,6 +213,24 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Remove the region family from disk, archiving the store files.
+   * @param familyName Column Family Name
+   * @throws IOException if an error occours during the archiving
+   */
+  public void deleteFamily(final String familyName) throws IOException {
+    // archive family store files
+    HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
+
+    // delete the family folder
+    Path familyDir = getStoreDir(familyName);
+    if (!fs.delete(familyDir, true)) {
+      throw new IOException("Could not delete family " + familyName +
+        " from FileSystem for region " + regionInfo.getRegionNameAsString() +
+        "(" + regionInfo.getEncodedName() + ")");
+    }
+  }
+
+  /**
    * Generate a unique file name, used by createTempName() and commitStoreFile()
    * @param suffix extra information to append to the generated name
    * @return Unique file name
@@ -252,7 +292,7 @@ public class HRegionFileSystem {
    * @return The new {@link Path} of the committed file
    * @throws IOException
    */
-  public Path commitStoreFile(final String familyName, final Path buildPath,
+  private Path commitStoreFile(final String familyName, final Path buildPath,
       final long seqNum, final boolean generateNewName) throws IOException {
     Path storeDir = getStoreDir(familyName);
     fs.mkdirs(storeDir);
@@ -272,6 +312,20 @@ public class HRegionFileSystem {
   }
 
   /**
+   * Moves multiple store files to the relative region's family store directory.
+   * @param storeFiles list of store files divided by family
+   * @throws IOException
+   */
+  void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
+    for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
+      String familyName = Bytes.toString(es.getKey());
+      for (StoreFile sf: es.getValue()) {
+        commitStoreFile(familyName, sf.getPath());
+      }
+    }
+  }
+
+  /**
    * Archives the specified store file from the specified family.
    * @param familyName Family that contains the store files
    * @param filePath {@link Path} to the store file to remove
@@ -306,7 +360,7 @@ public class HRegionFileSystem {
    * @return The destination {@link Path} of the bulk loaded file
    * @throws IOException
    */
-  public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
+  Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
       throws IOException {
     // Copy the file if it's on another filesystem
     FileSystem srcFs = srcPath.getFileSystem(conf);
@@ -331,29 +385,134 @@ public class HRegionFileSystem {
   //  Splits Helpers
   // ===========================================================================
   /** @return {@link Path} to the temp directory used during split operations */
-  public Path getSplitsDir() {
+  Path getSplitsDir() {
     return new Path(getRegionDir(), REGION_SPLITS_DIR);
   }
 
+  Path getSplitsDir(final HRegionInfo hri) {
+    return new Path(getSplitsDir(), hri.getEncodedName());
+  }
+
   /**
    * Clean up any split detritus that may have been left around from previous split attempts.
    */
-  public void cleanupSplitsDir() throws IOException {
+  void cleanupSplitsDir() throws IOException {
     FSUtils.deleteDirectory(fs, getSplitsDir());
   }
 
+  /**
+   * Clean up any split detritus that may have been left around from previous
+   * split attempts.
+   * Call this method on initial region deploy.
+   * @throws IOException
+   */
+  void cleanupAnySplitDetritus() throws IOException {
+    Path splitdir = this.getSplitsDir();
+    if (!fs.exists(splitdir)) return;
+    // Look at the splitdir.  It could have the encoded names of the daughter
+    // regions we tried to make.  See if the daughter regions actually got made
+    // out under the tabledir.  If here under splitdir still, then the split did
+    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
+    // where we successfully created daughter a but regionserver crashed during
+    // the creation of region b.  In this case, there'll be an orphan daughter
+    // dir in the filesystem.  TOOD: Fix.
+    FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
+    if (daughters != null) {
+      for (FileStatus daughter: daughters) {
+        Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
+        if (fs.exists(daughterDir) && !fs.delete(daughterDir, true)) {
+          throw new IOException("Failed delete of " + daughterDir);
+        }
+      }
+    }
+    cleanupSplitsDir();
+    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
+  }
+
+  /**
+   * Remove daughter region
+   * @param regionInfo daughter {@link HRegionInfo}
+   * @throws IOException
+   */
+  void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
+    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
+      throw new IOException("Failed delete of " + regionDir);
+    }
+  }
+
+  /**
+   * Commit a daughter region, moving it from the split temporary directory
+   * to the proper location in the filesystem.
+   * @param regionInfo daughter {@link HRegionInfo}
+   * @throws IOException
+   */
+  Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
+    Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
+    Path daughterTmpDir = this.getSplitsDir(regionInfo);
+    if (fs.exists(daughterTmpDir) && !fs.rename(daughterTmpDir, regionDir)) {
+      throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
+    }
+    return regionDir;
+  }
+
+  /**
+   * Create the region splits directory.
+   */
+  void createSplitsDir() throws IOException {
+    Path splitdir = getSplitsDir();
+    if (fs.exists(splitdir)) {
+      LOG.info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
+      if (!fs.delete(splitdir, true)) {
+        throw new IOException("Failed deletion of " + splitdir
+            + " before creating them again.");
+      }
+    }
+    if (!fs.mkdirs(splitdir)) {
+      throw new IOException("Failed create of " + splitdir);
+    }
+  }
+
+  /**
+   * Write out a split reference. Package local so it doesnt leak out of
+   * regionserver.
+   * @param hri {@link HRegionInfo} of the destination
+   * @param familyName Column Family Name
+   * @param f File to split.
+   * @param splitRow Split Row
+   * @param top True if we are referring to the top half of the hfile.
+   * @return Path to created reference.
+   * @throws IOException
+   */
+  Path splitStoreFile(final HRegionInfo hri, final String familyName,
+      final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
+    Path splitDir = new Path(getSplitsDir(hri), familyName);
+    // A reference to the bottom half of the hsf store file.
+    Reference r =
+      top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
+    // Add the referred-to regions name as a dot separated suffix.
+    // See REF_NAME_REGEX regex above.  The referred-to regions name is
+    // up in the path of the passed in <code>f</code> -- parentdir is family,
+    // then the directory above is the region name.
+    String parentRegionName = regionInfo.getEncodedName();
+    // Write reference with same file id only with the other region name as
+    // suffix and into the new region location (under same family).
+    Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
+    return r.write(fs, p);
+  }
+
   // ===========================================================================
   //  Merge Helpers
   // ===========================================================================
   /** @return {@link Path} to the temp directory used during merge operations */
-  public Path getMergesDir() {
+  Path getMergesDir() {
     return new Path(getRegionDir(), REGION_MERGES_DIR);
   }
 
   /**
    * Clean up any merge detritus that may have been left around from previous merge attempts.
    */
-  public void cleanupMergesDir() throws IOException {
+  void cleanupMergesDir() throws IOException {
     FSUtils.deleteDirectory(fs, getMergesDir());
   }
 
@@ -361,6 +520,15 @@ public class HRegionFileSystem {
   //  Create/Open/Delete Helpers
   // ===========================================================================
   /**
+   * Log the current state of the region
+   * @param LOG log to output information
+   * @throws IOException if an unexpected exception occurs
+   */
+  void logFileSystemState(final Log LOG) throws IOException {
+    FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
+  }
+
+  /**
    * @param hri
    * @return Content of the file we write out to the filesystem under a region
    * @throws IOException
@@ -517,10 +685,12 @@ public class HRegionFileSystem {
    * @param fs {@link FileSystem} from which to add the region
    * @param tableDir {@link Path} to where the table is being stored
    * @param regionInfo {@link HRegionInfo} for region to be added
+   * @param readOnly True if you don't want to edit the region data
    * @throws IOException if the region creation fails due to a FileSystem exception.
    */
   public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
-      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
+      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
+      throws IOException {
     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
     Path regionDir = regionFs.getRegionDir();
 
@@ -529,12 +699,16 @@ public class HRegionFileSystem {
       throw new IOException("The specified region do not exists on disk: " + regionDir);
     }
 
-    // Cleanup temporary directories
-    regionFs.cleanupTempDir();
-    regionFs.cleanupSplitsDir();
-    regionFs.cleanupMergesDir();
-    // if it doesn't exists, Write HRI to a file, in case we need to recover .META.
-    regionFs.checkRegionInfoOnFilesystem();
+    if (readOnly) {
+      // Cleanup temporary directories
+      regionFs.cleanupTempDir();
+      regionFs.cleanupSplitsDir();
+      regionFs.cleanupMergesDir();
+
+      // if it doesn't exists, Write HRI to a file, in case we need to recover .META.
+      regionFs.checkRegionInfoOnFilesystem();
+    }
+
     return regionFs;
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java Fri Mar 15 22:19:34 2013
@@ -93,13 +93,12 @@ class SplitRequest implements Runnable {
         return;
       }
       LOG.info("Region split, META updated, and report to master. Parent="
-          + parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
+          + parent.getRegionNameAsString() + ", new regions: "
           + st.getFirstDaughter().getRegionNameAsString() + ", "
           + st.getSecondDaughter().getRegionNameAsString() + ". Split took "
           + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
     } catch (IOException ex) {
-      LOG.error("Split failed " + this, RemoteExceptionHandler
-          .checkIOException(ex));
+      LOG.error("Split failed " + this, RemoteExceptionHandler.checkIOException(ex));
       server.checkFileSystem();
     } finally {
       if (this.parent.getCoprocessorHost() != null) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Fri Mar 15 22:19:34 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.ListIterator;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
@@ -34,9 +35,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
@@ -46,7 +44,6 @@ import org.apache.hadoop.hbase.executor.
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -90,7 +87,6 @@ public class SplitTransaction {
   private final HRegion parent;
   private HRegionInfo hri_a;
   private HRegionInfo hri_b;
-  private Path splitdir;
   private long fileSplitTimeout = 30000;
   private int znodeVersion = -1;
 
@@ -150,7 +146,6 @@ public class SplitTransaction {
   public SplitTransaction(final HRegion r, final byte [] splitrow) {
     this.parent = r;
     this.splitrow = splitrow;
-    this.splitdir = getSplitDir(this.parent);
   }
 
   /**
@@ -174,10 +169,8 @@ public class SplitTransaction {
       return false;
     }
     long rid = getDaughterRegionIdTimestamp(hri);
-    this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
-      false, rid);
-    this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
-      false, rid);
+    this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid);
+    this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid);
     return true;
   }
 
@@ -206,7 +199,8 @@ public class SplitTransaction {
    * @param server Hosting server instance.  Can be null when testing (won't try
    * and update in zk if a null server)
    * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *    Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    */
   /* package */PairOfSameType<HRegion> createDaughters(final Server server,
@@ -216,7 +210,8 @@ public class SplitTransaction {
         (services != null && services.isStopping())) {
       throw new IOException("Server is stopped or stopping");
     }
-    assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
+    assert !this.parent.lock.writeLock().isHeldByCurrentThread():
+      "Unsafe to hold write lock while performing RPCs";
 
     // Coprocessor callback
     if (this.parent.getCoprocessorHost() != null) {
@@ -253,7 +248,8 @@ public class SplitTransaction {
         // Master will get the callback for node change only if the transition is successful.
         // Note that if the transition fails then the rollback will delete the created znode
         // as the journal entry SET_SPLITTING_IN_ZK is added.
-        // TODO : May be we can add some new state to znode and handle the new state incase of success/failure
+        // TODO : May be we can add some new state to znode and handle the new state incase
+        //        of success/failure
         this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
             this.parent.getRegionInfo(), server.getServerName(), -1);
       } catch (KeeperException e) {
@@ -262,10 +258,10 @@ public class SplitTransaction {
       }
     }
 
-    createSplitDir(this.parent.getFilesystem(), this.splitdir);
+    this.parent.getRegionFileSystem().createSplitsDir();
     this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
 
-    List<StoreFile> hstoreFilesToSplit = null;
+    Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
     Exception exceptionToThrow = null;
     try{
       hstoreFilesToSplit = this.parent.close(false);
@@ -298,18 +294,18 @@ public class SplitTransaction {
     // splitStoreFiles creates daughter region dirs under the parent splits dir
     // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
     // clean this up.
-    splitStoreFiles(this.splitdir, hstoreFilesToSplit);
+    splitStoreFiles(hstoreFilesToSplit);
 
     // Log to the journal that we are creating region A, the first daughter
     // region.  We could fail halfway through.  If we do, we could have left
     // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
     // add entry to journal BEFORE rather than AFTER the change.
     this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
-    HRegion a = createDaughterRegion(this.hri_a);
+    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
 
     // Ditto
     this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
-    HRegion b = createDaughterRegion(this.hri_b);
+    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
 
     // This is the point of no return.  Adding subsequent edits to .META. as we
     // do below when we do the daughter opens adding each to .META. can fail in
@@ -347,7 +343,8 @@ public class SplitTransaction {
    * @param services Used to online/offline regions.
    * @param a first daughter region
    * @param a second daughter region
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void openDaughters(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
@@ -404,7 +401,8 @@ public class SplitTransaction {
    * @param services Used to online/offline regions.
    * @param a first daughter region
    * @param a second daughter region
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void transitionZKNode(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
@@ -456,7 +454,8 @@ public class SplitTransaction {
    * @param server Hosting server instance.  Can be null when testing (won't try
    * and update in zk if a null server)
    * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
+   * @throws IOException If thrown, transaction failed.
+   *          Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    * @throws IOException
    * @see #rollback(Server, RegionServerServices)
@@ -542,56 +541,8 @@ public class SplitTransaction {
     }
   }
 
-  private static Path getSplitDir(final HRegion r) {
-    return new Path(r.getRegionDir(), HRegionFileSystem.REGION_SPLITS_DIR);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param splitdir Directory to store temporary split data in
-   * @throws IOException If <code>splitdir</code> already exists or we fail
-   * to create it.
-   * @see #cleanupSplitDir(FileSystem, Path)
-   */
-  private static void createSplitDir(final FileSystem fs, final Path splitdir)
-  throws IOException {
-    if (fs.exists(splitdir)) {
-      LOG.info("The " + splitdir
-          + " directory exists.  Hence deleting it to recreate it");
-      if (!fs.delete(splitdir, true)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
-      }
-    }
-    if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir);
-  }
-
-  private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
-  throws IOException {
-    // Splitdir may have been cleaned up by reopen of the parent dir.
-    deleteDir(fs, splitdir, false);
-  }
-
-  /**
-   * @param fs Filesystem to use
-   * @param dir Directory to delete
-   * @param mustPreExist If true, we'll throw exception if <code>dir</code>
-   * does not preexist, else we'll just pass.
-   * @throws IOException Thrown if we fail to delete passed <code>dir</code>
-   */
-  private static void deleteDir(final FileSystem fs, final Path dir,
-      final boolean mustPreExist)
-  throws IOException {
-    if (!fs.exists(dir)) {
-      if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
-    } else if (!fs.delete(dir, true)) {
-      throw new IOException("Failed delete of " + dir);
-    }
-  }
-
-  private void splitStoreFiles(final Path splitdir,
-    final List<StoreFile> hstoreFilesToSplit)
-  throws IOException {
+  private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
+      throws IOException {
     if (hstoreFilesToSplit == null) {
       // Could be null because close didn't succeed -- for now consider it fatal
       throw new IOException("Close returned empty list of StoreFiles");
@@ -611,11 +562,12 @@ public class SplitTransaction {
       (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
     List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
 
-     // Split each store file.
-    for (StoreFile sf: hstoreFilesToSplit) {
-      //splitStoreFile(sf, splitdir);
-      StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
-      futures.add(threadPool.submit(sfs));
+    // Split each store file.
+    for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
+      for (StoreFile sf: entry.getValue()) {
+        StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
+        futures.add(threadPool.submit(sfs));
+      }
     }
     // Shutdown the pool
     threadPool.shutdown();
@@ -652,14 +604,11 @@ public class SplitTransaction {
     }
   }
 
-  private void splitStoreFile(final StoreFile sf, final Path splitdir)
-  throws IOException {
-    FileSystem fs = this.parent.getFilesystem();
-    byte [] family = sf.getFamily();
-    Path storedir = HStore.getStoreHomedir(splitdir, this.hri_a, family);
-    StoreFile.split(fs, storedir, sf, this.splitrow, false);
-    storedir = HStore.getStoreHomedir(splitdir, this.hri_b, family);
-    StoreFile.split(fs, storedir, sf, this.splitrow, true);
+  private void splitStoreFile(final byte[] family, final StoreFile sf) throws IOException {
+    HRegionFileSystem fs = this.parent.getRegionFileSystem();
+    String familyName = Bytes.toString(family);
+    fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false);
+    fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true);
   }
 
   /**
@@ -667,61 +616,26 @@ public class SplitTransaction {
    * in parallel instead of sequentially.
    */
   class StoreFileSplitter implements Callable<Void> {
-
+    private final byte[] family;
     private final StoreFile sf;
-    private final Path splitdir;
 
     /**
      * Constructor that takes what it needs to split
+     * @param family Family that contains the store file
      * @param sf which file
-     * @param splitdir where the splitting is done
      */
-    public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
+    public StoreFileSplitter(final byte[] family, final StoreFile sf) {
       this.sf = sf;
-      this.splitdir = splitdir;
+      this.family = family;
     }
 
     public Void call() throws IOException {
-      splitStoreFile(sf, splitdir);
+      splitStoreFile(family, sf);
       return null;
     }
   }
 
   /**
-   * @param hri Spec. for daughter region to open.
-   * @param rsServices RegionServerServices this region should use.
-   * @return Created daughter HRegion.
-   * @throws IOException
-   * @see #cleanupDaughterRegion(FileSystem, Path, String)
-   */
-  HRegion createDaughterRegion(final HRegionInfo hri) throws IOException {
-    // Package private so unit tests have access.
-    Path regionDir = getSplitDirForDaughter(this.splitdir, hri);
-    return this.parent.createDaughterRegion(hri, regionDir);
-  }
-
-  private static void cleanupDaughterRegion(final FileSystem fs,
-    final Path tabledir, final String encodedName)
-  throws IOException {
-    Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
-    // Dir may not preexist.
-    deleteDir(fs, regiondir, false);
-  }
-
-  /*
-   * Get the daughter directories in the splits dir.  The splits dir is under
-   * the parent regions' directory.
-   * @param splitdir
-   * @param hri
-   * @return Path to daughter split dir.
-   * @throws IOException
-   */
-  private static Path getSplitDirForDaughter(final Path splitdir, final HRegionInfo hri)
-      throws IOException {
-    return new Path(splitdir, hri.getEncodedName());
-  }
-
-  /**
    * @param server Hosting server instance (May be null when testing).
    * @param services
    * @throws IOException If thrown, rollback failed.  Take drastic action.
@@ -736,7 +650,6 @@ public class SplitTransaction {
     }
 
     boolean result = true;
-    FileSystem fs = this.parent.getFilesystem();
     ListIterator<JournalEntry> iterator =
       this.journal.listIterator(this.journal.size());
     // Iterate in reverse.
@@ -751,8 +664,8 @@ public class SplitTransaction {
         break;
 
       case CREATE_SPLIT_DIR:
-    	this.parent.writestate.writesEnabled = true;
-        cleanupSplitDir(fs, this.splitdir);
+        this.parent.writestate.writesEnabled = true;
+        this.parent.getRegionFileSystem().cleanupSplitsDir();
         break;
 
       case CLOSED_PARENT_REGION:
@@ -771,13 +684,11 @@ public class SplitTransaction {
         break;
 
       case STARTED_REGION_A_CREATION:
-        cleanupDaughterRegion(fs, this.parent.getTableDir(),
-          this.hri_a.getEncodedName());
+        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
         break;
 
       case STARTED_REGION_B_CREATION:
-        cleanupDaughterRegion(fs, this.parent.getTableDir(),
-          this.hri_b.getEncodedName());
+        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
         break;
 
       case OFFLINED_PARENT:
@@ -810,39 +721,6 @@ public class SplitTransaction {
     return hri_b;
   }
 
-  // For unit testing.
-  Path getSplitDir() {
-    return this.splitdir;
-  }
-
-  /**
-   * Clean up any split detritus that may have been left around from previous
-   * split attempts.
-   * Call this method on initial region deploy.  Cleans up any mess
-   * left by previous deploys of passed <code>r</code> region.
-   * @param r
-   * @throws IOException
-   */
-  static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
-    Path splitdir = getSplitDir(r);
-    FileSystem fs = r.getFilesystem();
-    if (!fs.exists(splitdir)) return;
-    // Look at the splitdir.  It could have the encoded names of the daughter
-    // regions we tried to make.  See if the daughter regions actually got made
-    // out under the tabledir.  If here under splitdir still, then the split did
-    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
-    // where we successfully created daughter a but regionserver crashed during
-    // the creation of region b.  In this case, there'll be an orphan daughter
-    // dir in the filesystem.  TOOD: Fix.
-    FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
-    for (int i = 0; i < daughters.length; i++) {
-      cleanupDaughterRegion(fs, r.getTableDir(),
-        daughters[i].getPath().getName());
-    }
-    cleanupSplitDir(r.getFilesystem(), splitdir);
-    LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
-  }
-
   private static void cleanZK(final Server server, final HRegionInfo hri) {
     try {
       // Only delete if its in expected state; could have been hijacked.

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Fri Mar 15 22:19:34 2013
@@ -36,7 +36,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
@@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
@@ -254,13 +252,6 @@ public class StoreFile {
   }
 
   /**
-   * @return The Store/ColumnFamily this file belongs to.
-   */
-  byte [] getFamily() {
-    return Bytes.toBytes(this.getPath().getParent().getName());
-  }
-
-  /**
    * @return True if this is a StoreFile Reference; call after {@link #open()}
    * else may get wrong answer.
    */
@@ -545,28 +536,6 @@ public class StoreFile {
     return sb.toString();
   }
 
-  /**
-   * Utility to help with rename.
-   * @param fs
-   * @param src
-   * @param tgt
-   * @return True if succeeded.
-   * @throws IOException
-   */
-  public static Path rename(final FileSystem fs,
-                            final Path src,
-                            final Path tgt)
-      throws IOException {
-
-    if (!fs.exists(src)) {
-      throw new FileNotFoundException(src.toString());
-    }
-    if (!fs.rename(src, tgt)) {
-      throw new IOException("Failed rename of " + src + " to " + tgt);
-    }
-    return tgt;
-  }
-
   public static class WriterBuilder {
     private final Configuration conf;
     private final CacheConfig cacheConf;
@@ -720,38 +689,6 @@ public class StoreFile {
     return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
   }
 
-  /**
-   * Write out a split reference. Package local so it doesnt leak out of
-   * regionserver.
-   * @param fs
-   * @param splitDir Presumes path format is actually
-   *          <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
-   * @param f File to split.
-   * @param splitRow
-   * @param top True if we are referring to the top half of the hfile.
-   * @return Path to created reference.
-   * @throws IOException
-   */
-  static Path split(final FileSystem fs,
-                    final Path splitDir,
-                    final StoreFile f,
-                    final byte [] splitRow,
-                    final boolean top)
-      throws IOException {
-    // A reference to the bottom half of the hsf store file.
-    Reference r =
-      top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
-    // Add the referred-to regions name as a dot separated suffix.
-    // See REF_NAME_REGEX regex above.  The referred-to regions name is
-    // up in the path of the passed in <code>f</code> -- parentdir is family,
-    // then the directory above is the region name.
-    String parentRegionName = f.getPath().getParent().getParent().getName();
-    // Write reference with same file id only with the other region name as
-    // suffix and into the new region location (under same family).
-    Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
-    return r.write(fs, p);
-  }
-
   public Long getMinimumTimestamp() {
     return (getReader().timeRangeTracker == null) ?
         null :

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java Fri Mar 15 22:19:34 2013
@@ -330,7 +330,7 @@ public class RestoreSnapshotHelper {
           Path hfile = new Path(familyDir, hfileName);
           LOG.trace("Removing hfile=" + hfile +
             " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
-          HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
+          HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
         }
       } else {
         // Family doesn't exists in the snapshot

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Fri Mar 15 22:19:34 2013
@@ -555,7 +555,7 @@ public class HBaseFsck extends Configure
     HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
     LOG.info("Creating new region : " + hri);
     HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
-    Path target = region.getRegionDir();
+    Path target = region.getRegionFileSystem().getRegionDir();
 
     // rename all the data to new region
     mergeRegionDirs(target, hi);
@@ -2183,11 +2183,11 @@ public class HBaseFsck extends Configure
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Created new empty container region: " +
             newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
-        debugLsr(region.getRegionDir());
+        debugLsr(region.getRegionFileSystem().getRegionDir());
 
         // all target regions are closed, should be able to safely cleanup.
         boolean didFix= false;
-        Path target = region.getRegionDir();
+        Path target = region.getRegionFileSystem().getRegionDir();
         for (HbckInfo contained : overlap) {
           LOG.info("Merging " + contained  + " into " + target );
           int merges = mergeRegionDirs(target, contained);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Fri Mar 15 22:19:34 2013
@@ -54,21 +54,6 @@ public class HFileArchiveUtil {
 
   /**
    * Get the directory to archive a store directory
-   * @param conf {@link Configuration} to read for the archive directory name
-   * @param tableName table name under which the store currently lives
-   * @param region parent region information under which the store currently lives
-   * @param familyName name of the family in the store
-   * @return {@link Path} to the directory to archive the given store or
-   *         <tt>null</tt> if it should not be archived
-   */
-  public static Path getStoreArchivePath(final Configuration conf, final String tableName,
-      final HRegionInfo region, final String familyName) throws IOException {
-    Path tableArchiveDir = getTableArchivePath(conf, tableName);
-    return HStore.getStoreHomedir(tableArchiveDir, region, Bytes.toBytes(familyName));
-  }
-
-  /**
-   * Get the directory to archive a store directory
    * @param conf {@link Configuration} to read for the archive directory name. Can be null.
    * @param region parent region information under which the store currently lives
    * @param tabledir directory for the table under which the store currently lives
@@ -79,19 +64,17 @@ public class HFileArchiveUtil {
   public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
       byte[] family) {
     Path tableArchiveDir = getTableArchivePath(tabledir);
-    return HStore.getStoreHomedir(tableArchiveDir,
-      HRegionInfo.encodeRegionName(region.getRegionName()), family);
+    return HStore.getStoreHomedir(tableArchiveDir, region, family);
   }
 
   /**
    * Get the archive directory for a given region under the specified table
-   * @param conf {@link Configuration} to read the archive directory from. Can be null
    * @param tabledir the original table directory. Cannot be null.
    * @param regiondir the path to the region directory. Cannot be null.
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Configuration conf, Path tabledir, Path regiondir) {
+  public static Path getRegionArchiveDir(Path tabledir, Path regiondir) {
     // get the archive directory for a table
     Path archiveDir = getTableArchivePath(tabledir);
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java Fri Mar 15 22:19:34 2013
@@ -43,7 +43,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -57,8 +56,7 @@ import org.apache.hadoop.hbase.catalog.M
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -123,7 +121,8 @@ import com.google.common.collect.Sets;
  * <p>
  * The more complicated answer is that this depends upon the largest storefile
  * in your region. With a growing data size, this will get larger over time. You
- * want the largest region to be just big enough that the {@link HStore} compact
+ * want the largest region to be just big enough that the
+ * {@link org.apache.hadoop.hbase.regionserver.HStore} compact
  * selection algorithm only compacts it due to a timed major. If you don't, your
  * cluster can be prone to compaction storms as the algorithm decides to run
  * major compactions on a large series of regions all at once. Note that
@@ -628,9 +627,10 @@ public class RegionSplitter {
     LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
 
     // get table info
-    Path hbDir = new Path(table.getConfiguration().get(HConstants.HBASE_DIR));
-    Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
+    Path rootDir = FSUtils.getRootDir(table.getConfiguration());
+    Path tableDir = HTableDescriptor.getTableDir(rootDir, table.getTableName());
     FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
+    HTableDescriptor htd = table.getTableDescriptor();
 
     // clear the cache to forcibly refresh region information
     table.clearRegionCache();
@@ -661,25 +661,22 @@ public class RegionSplitter {
         check.add(table.getRegionLocation(start).getRegionInfo());
         check.add(table.getRegionLocation(split).getRegionInfo());
         for (HRegionInfo hri : check.toArray(new HRegionInfo[] {})) {
-          boolean refFound = false;
           byte[] sk = hri.getStartKey();
           if (sk.length == 0)
             sk = splitAlgo.firstRow();
           String startKey = splitAlgo.rowToStr(sk);
-          HTableDescriptor htd = table.getTableDescriptor();
+
+          HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
+              table.getConfiguration(), fs, tableDir, hri, true);
+
           // check every Column Family for that region
+          boolean refFound = false;
           for (HColumnDescriptor c : htd.getFamilies()) {
-            Path cfDir = HStore.getStoreHomedir(tableDir, hri, c.getName());
-            if (fs.exists(cfDir)) {
-              for (FileStatus file : fs.listStatus(cfDir)) {
-                refFound |= StoreFileInfo.isReference(file.getPath());
-                if (refFound)
-                  break;
-              }
-            }
-            if (refFound)
+            if ((refFound = regionFs.hasReferences(htd.getNameAsString()))) {
               break;
+            }
           }
+
           // compaction is completed when all reference files are gone
           if (!refFound) {
             check.remove(hri);
@@ -691,8 +688,7 @@ public class RegionSplitter {
           physicalSplitting.add(region);
         }
       } catch (NoServerForRegionException nsfre) {
-        LOG.debug("No Server Exception thrown for: "
-            + splitAlgo.rowToStr(start));
+        LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
         physicalSplitting.add(region);
         table.clearRegionCache();
       }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Fri Mar 15 22:19:34 2013
@@ -139,7 +139,8 @@ public class TestHFileArchiving {
     FileSystem fs = UTIL.getTestFileSystem();
 
     // now attempt to depose the region
-    Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());
+    Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
+    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
 
     HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
 
@@ -172,7 +173,7 @@ public class TestHFileArchiving {
     assertEquals(1, servingRegions.size());
     HRegion region = servingRegions.get(0);
 
-    FileSystem fs = region.getFilesystem();
+    FileSystem fs = region.getRegionFileSystem().getFileSystem();
 
     // make sure there are some files in the regiondir
     Path rootDir = FSUtils.getRootDir(fs.getConf());
@@ -238,8 +239,7 @@ public class TestHFileArchiving {
     clearArchiveDirectory();
 
     // then get the current store files
-    Path regionDir = region.getRegionDir();
-    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
+    List<String> storeFiles = getRegionStoreFiles(region);
 
     // then delete the table so the hfiles get archived
     UTIL.deleteTable(TABLE_NAME);
@@ -299,8 +299,7 @@ public class TestHFileArchiving {
     clearArchiveDirectory();
 
     // then get the current store files
-    Path regionDir = region.getRegionDir();
-    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
+    List<String> storeFiles = getRegionStoreFiles(region);
 
     // then delete the table so the hfiles get archived
     UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
@@ -416,8 +415,9 @@ public class TestHFileArchiving {
     return fileNames;
   }
 
-  private List<String> getRegionStoreFiles(final FileSystem fs, final Path regionDir) 
-      throws IOException {
+  private List<String> getRegionStoreFiles(final HRegion region) throws IOException {
+    Path regionDir = region.getRegionFileSystem().getRegionDir();
+    FileSystem fs = region.getRegionFileSystem().getFileSystem();
     List<String> storeFiles = getAllFileNames(fs, regionDir);
     // remove all the non-storefile named files for the region
     for (int i = 0; i < storeFiles.size(); i++) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java Fri Mar 15 22:19:34 2013
@@ -70,9 +70,9 @@ public class TestHFileLinkCleaner {
 
     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
     Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
-          tableName, hri, familyName);
+          tableName, hri.getEncodedName(), familyName);
     Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
-          tableLinkName, hriLink, familyName);
+          tableLinkName, hriLink.getEncodedName(), familyName);
 
     // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
     Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Fri Mar 15 22:19:34 2013
@@ -245,8 +245,8 @@ public class TestHRegion extends HBaseTe
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, conf, family);
     try {
-      Path regiondir = region.getRegionDir();
-      FileSystem fs = region.getFilesystem();
+      Path regiondir = region.getRegionFileSystem().getRegionDir();
+      FileSystem fs = region.getRegionFileSystem().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
@@ -257,8 +257,7 @@ public class TestHRegion extends HBaseTe
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLogFactory.createWriter(fs,
-            recoveredEdits, conf);
+        HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
@@ -273,8 +272,7 @@ public class TestHRegion extends HBaseTe
       Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
           Bytes.BYTES_COMPARATOR);
       for (Store store : region.getStores().values()) {
-        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
-            minSeqId - 1);
+        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
       assertEquals(maxSeqId, seqId);
@@ -297,8 +295,8 @@ public class TestHRegion extends HBaseTe
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, conf, family);
     try {
-      Path regiondir = region.getRegionDir();
-      FileSystem fs = region.getFilesystem();
+      Path regiondir = region.getRegionFileSystem().getRegionDir();
+      FileSystem fs = region.getRegionFileSystem().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
       Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
@@ -309,8 +307,7 @@ public class TestHRegion extends HBaseTe
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLogFactory.createWriter(fs,
-            recoveredEdits, conf);
+        HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
@@ -354,13 +351,12 @@ public class TestHRegion extends HBaseTe
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, conf, family);
     try {
-      Path regiondir = region.getRegionDir();
-      FileSystem fs = region.getFilesystem();
+      Path regiondir = region.getRegionFileSystem().getRegionDir();
+      FileSystem fs = region.getRegionFileSystem().getFileSystem();
 
       Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
       for (int i = 1000; i < 1050; i += 10) {
-        Path recoveredEdits = new Path(
-            recoveredEditsDir, String.format("%019d", i));
+        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         FSDataOutputStream dos=  fs.create(recoveredEdits);
         dos.writeInt(i);
         dos.close();
@@ -1713,9 +1709,9 @@ public class TestHRegion extends HBaseTe
           openClosedRegion(subregions[i]);
           subregions[i].compactStores();
         }
-        Path oldRegionPath = region.getRegionDir();
-        Path oldRegion1 = subregions[0].getRegionDir();
-        Path oldRegion2 = subregions[1].getRegionDir();
+        Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
+        Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
+        Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
         long startTime = System.currentTimeMillis();
         region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
         LOG.info("Merge regions elapsed time: " +
@@ -3491,8 +3487,8 @@ public class TestHRegion extends HBaseTe
 
     // Create a region and skip the initialization (like CreateTableHandler)
     HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true);
-    Path regionDir = region.getRegionDir();
-    FileSystem fs = region.getFilesystem();
+    Path regionDir = region.getRegionFileSystem().getRegionDir();
+    FileSystem fs = region.getRegionFileSystem().getFileSystem();
     HRegion.closeHRegion(region);
 
     Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
@@ -3503,7 +3499,7 @@ public class TestHRegion extends HBaseTe
 
     // Try to open the region
     region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
-    assertEquals(regionDir, region.getRegionDir());
+    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
     HRegion.closeHRegion(region);
 
     // Verify that the .regioninfo file is still there
@@ -3516,7 +3512,7 @@ public class TestHRegion extends HBaseTe
       fs.exists(regionInfoFile));
 
     region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
-    assertEquals(regionDir, region.getRegionDir());
+    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
     HRegion.closeHRegion(region);
 
     // Verify that the .regioninfo file is still there

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java Fri Mar 15 22:19:34 2013
@@ -63,7 +63,7 @@ public class TestHRegionFileSystem {
     assertEquals(hri, hriVerify);
 
     // Open the region
-    regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri);
+    regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri, false);
     assertEquals(regionDir, regionFs.getRegionDir());
 
     // Delete the region

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java?rev=1457148&r1=1457147&r2=1457148&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java Fri Mar 15 22:19:34 2013
@@ -68,13 +68,13 @@ public class TestHRegionInfo {
     assertEquals(modtime, modtime2);
     // Now load the file.
     HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
-        FileSystem.get(htu.getConfiguration()), r.getRegionDir());
+        r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
     assertTrue(hri.equals(deserializedHri));
   }
 
   long getModTime(final HRegion r) throws IOException {
-    FileStatus [] statuses =
-      r.getFilesystem().listStatus(new Path(r.getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
+    FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus(
+      new Path(r.getRegionFileSystem().getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
     assertTrue(statuses != null && statuses.length == 1);
     return statuses[0].getModificationTime();
   }



Mime
View raw message