accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject svn commit: r1496171 [3/3] - in /accumulo/trunk: core/src/main/java/org/apache/accumulo/core/client/admin/ core/src/main/java/org/apache/accumulo/core/client/impl/ core/src/main/java/org/apache/accumulo/core/client/mock/ core/src/main/java/org/apache/a...
Date Mon, 24 Jun 2013 19:29:41 GMT
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/Initialize.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/Initialize.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/Initialize.java Mon
Jun 24 19:29:39 2013
@@ -201,8 +201,9 @@ public class Initialize {
       return false;
     }
   }
+  
   private static Path[] paths(String[] paths) {
-    Path result[] = new Path[paths.length];
+    Path[] result = new Path[paths.length];
     for (int i = 0; i < paths.length; i++) {
       result[i] = new Path(paths[i]);
     }
@@ -223,14 +224,14 @@ public class Initialize {
   private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid) throws IOException
{
     FileStatus fstat;
     
-    // the actual disk location of the root tablet
+    // the actual disk locations of the root table and tablets
     final Path rootTablet = new Path(ServerConstants.getRootTabletDir());
     
-    final Path tableMetadataTabletDirs[] = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(),
MetadataTable.TABLE_TABLET_LOCATION));
-    final Path defaultMetadataTabletDirs[] = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(),
Constants.DEFAULT_TABLET_LOCATION));
+    // the actual disk locations of the metadata table and tablets
+    final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
+    final Path[] tableMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(),
MetadataTable.TABLE_TABLET_LOCATION));
+    final Path[] defaultMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(),
Constants.DEFAULT_TABLET_LOCATION));
     
-    final Path metadataTableDirs[] = paths(ServerConstants.getMetadataTableDirs());
-
     fs.mkdirs(new Path(ServerConstants.getDataVersionLocation(), "" + ServerConstants.DATA_VERSION));
     
     // create an instance id
@@ -256,7 +257,7 @@ public class Initialize {
       }
     }
     
-    // create root tablet
+    // create root table and tablet
     try {
       fstat = fs.getFileStatus(rootTablet);
       if (!fstat.isDir()) {
@@ -273,25 +274,13 @@ public class Initialize {
     // populate the root tablet with info about the default tablet
     // the root tablet contains the key extent and locations of all the
     // metadata tablets
-    String initRootTabFile = rootTablet + "/00000_00000."
-        + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
+    String initRootTabFile = rootTablet + "/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
     FileSystem ns = fs.getFileSystemByPath(new Path(initRootTabFile));
     FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(),
AccumuloConfiguration.getDefaultConfiguration());
     mfw.startDefaultLocalityGroup();
     
-    // -----------] root tablet info
-    Text rootExtent = RootTable.ROOT_TABLET_EXTENT.getMetadataEntry();
-    
-    // root's directory
-    Key rootDirKey = new Key(rootExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(),
MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(rootDirKey, new Value("/root_tablet".getBytes()));
-    
-    // root's prev row
-    Key rootPrevRowKey = new Key(rootExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(rootPrevRowKey, new Value(new byte[] {0}));
-    
     // ----------] table tablet info
-    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataTable.RESERVED_KEYSPACE_START_KEY.getRow()));
+    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataTable.RESERVED_RANGE_START_KEY.getRow()));
     
     // table tablet's directory
     Key tableDirKey = new Key(tableExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(),
MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
@@ -302,16 +291,14 @@ public class Initialize {
     mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // table tablet's prevrow
-    Key tablePrevRowKey = new Key(tableExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(),
-        0);
-    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new
Text(MetadataTable.ID), null))));
+    Key tablePrevRowKey = new Key(tableExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
     
     // ----------] default tablet info
     Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID),
null));
     
     // default's directory
-    Key defaultDirKey = new Key(defaultExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(),
-        MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    Key defaultDirKey = new Key(defaultExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(),
MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
     
     // default's time
@@ -319,9 +306,8 @@ public class Initialize {
     mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // default's prevrow
-    Key defaultPrevRowKey = new Key(defaultExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
-        MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataTable.RESERVED_KEYSPACE_START_KEY.getRow()));
+    Key defaultPrevRowKey = new Key(defaultExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataTable.RESERVED_RANGE_START_KEY.getRow()));
     
     mfw.close();
     
@@ -372,6 +358,7 @@ public class Initialize {
     String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
     zoo.putPersistentData(zkInstanceRoot, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID,
NodeExistsPolicy.FAIL);
+    TableManager.prepareNewTableState(uuid, RootTable.ID, RootTable.NAME, TableState.ONLINE,
NodeExistsPolicy.FAIL);
     TableManager.prepareNewTableState(uuid, MetadataTable.ID, MetadataTable.NAME, TableState.ONLINE,
NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, new byte[0], NodeExistsPolicy.FAIL);
@@ -456,9 +443,12 @@ public class Initialize {
         setMetadataReplication(max, "max");
       if (min > 5)
         setMetadataReplication(min, "min");
-      for (Entry<String,String> entry : initialMetadataConf.entrySet())
+      for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
+        if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue()))
+          throw new IOException("Cannot create per-table property " + entry.getKey());
         if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
+      }
     } catch (Exception e) {
       log.fatal("error talking to zookeeper", e);
       throw new IOException(e);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
Mon Jun 24 19:29:39 2013
@@ -52,7 +52,6 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.util.FileUtil;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -90,14 +89,13 @@ import org.apache.zookeeper.KeeperExcept
 public class MetadataTable extends org.apache.accumulo.core.util.MetadataTable {
   
   private static final Text EMPTY_TEXT = new Text();
+  private static Map<TCredentials,Writer> root_tables = new HashMap<TCredentials,Writer>();
   private static Map<TCredentials,Writer> metadata_tables = new HashMap<TCredentials,Writer>();
   private static final Logger log = Logger.getLogger(MetadataTable.class);
   
   private static final int SAVE_ROOT_TABLET_RETRIES = 3;
   
-  private MetadataTable() {
-    
-  }
+  private MetadataTable() {}
   
   public synchronized static Writer getMetadataTable(TCredentials credentials) {
     Writer metadataTable = metadata_tables.get(credentials);
@@ -108,17 +106,25 @@ public class MetadataTable extends org.a
     return metadataTable;
   }
   
+  public synchronized static Writer getRootTable(TCredentials credentials) {
+    Writer rootTable = root_tables.get(credentials);
+    if (rootTable == null) {
+      rootTable = new Writer(HdfsZooInstance.getInstance(), credentials, RootTable.ID);
+      root_tables.put(credentials, rootTable);
+    }
+    return rootTable;
+  }
+  
   public static void putLockID(ZooLock zooLock, Mutation m) {
     LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance())
+ "/").getBytes()));
   }
   
-  public static void update(TCredentials credentials, Mutation m) {
-    update(credentials, null, m);
+  public static void update(TCredentials credentials, Mutation m, KeyExtent extent) {
+    update(credentials, null, m, extent);
   }
   
-  public static void update(TCredentials credentials, ZooLock zooLock, Mutation m) {
-    Writer t;
-    t = getMetadataTable(credentials);
+  public static void update(TCredentials credentials, ZooLock zooLock, Mutation m, KeyExtent
extent) {
+    Writer t = extent.isMeta() ? getRootTable(credentials) : getMetadataTable(credentials);
     if (zooLock != null)
       putLockID(zooLock, m);
     while (true) {
@@ -142,17 +148,13 @@ public class MetadataTable extends org.a
   /**
    * new data file update function adds one data file to a tablet's list
    * 
-   * path should be relative to the table directory
-   * 
-   * @param time
-   * @param filesInUseByScans
-   * @param zooLock
-   * @param flushId
+   * @param path
+   *          should be relative to the table directory
    * 
    */
   public static void updateTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile,
DataFileValue dfv, String time, TCredentials credentials,
       Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String>
unusedWalLogs, TServerInstance lastLocation, long flushId) {
-    if (extent.equals(RootTable.ROOT_TABLET_EXTENT)) {
+    if (extent.equals(RootTable.EXTENT)) {
       if (unusedWalLogs != null) {
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
         // unusedWalLogs will contain the location/name of each log in a log set
@@ -210,7 +212,7 @@ public class MetadataTable extends org.a
     
     FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
     
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
     
   }
   
@@ -231,7 +233,7 @@ public class MetadataTable extends org.a
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
-      update(credentials, zooLock, m);
+      update(credentials, zooLock, m, extent);
     }
   }
   
@@ -239,11 +241,12 @@ public class MetadataTable extends org.a
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
-      update(credentials, zooLock, m);
+      update(credentials, zooLock, m, extent);
     }
   }
   
-  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue>
estSizes, String time, TCredentials credentials, ZooLock zooLock) {
+  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue>
estSizes, String time, TCredentials credentials,
+      ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     byte[] tidBytes = Long.toString(tid).getBytes();
     
@@ -253,7 +256,7 @@ public class MetadataTable extends org.a
       m.put(BULKFILE_COLUMN_FAMILY, file, new Value(tidBytes));
     }
     TIME_COLUMN.put(m, new Value(time.getBytes()));
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
   }
   
   public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char
timeType, ZooLock lock) {
@@ -262,12 +265,12 @@ public class MetadataTable extends org.a
     DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
     TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
     
-    update(credentials, lock, m);
+    update(credentials, lock, m, extent);
   }
   
   public static void updateTabletPrevEndRow(KeyExtent extent, TCredentials credentials) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
-    update(credentials, m);
+    update(credentials, m, extent);
   }
   
   /**
@@ -383,7 +386,7 @@ public class MetadataTable extends org.a
       m.put(BULKFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(tidBytes));
     }
     
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
   }
   
   public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, TCredentials credentials,
ZooLock zooLock) {
@@ -391,7 +394,7 @@ public class MetadataTable extends org.a
     Mutation m = ke.getPrevRowUpdateMutation();
     SPLIT_RATIO_COLUMN.putDelete(m);
     OLD_PREV_ROW_COLUMN.putDelete(m);
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
   }
   
   public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio,
TCredentials credentials, ZooLock zooLock) {
@@ -401,7 +404,7 @@ public class MetadataTable extends org.a
     
     OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
     CHOPPED_COLUMN.putDelete(m);
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
   }
   
   public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove, TCredentials credentials,
@@ -419,7 +422,7 @@ public class MetadataTable extends org.a
       m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
     }
     
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
   }
   
   public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove, TCredentials credentials,
@@ -433,7 +436,8 @@ public class MetadataTable extends org.a
   }
   
   public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete,
Set<FileRef> scanFiles, FileRef path, Long compactionId,
-      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation,
ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
+      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation,
ZooLock zooLock, boolean insertDeleteFlags)
+      throws IOException {
     
     if (insertDeleteFlags) {
       // add delete flags for those paths before the data file reference is removed
@@ -462,7 +466,7 @@ public class MetadataTable extends org.a
     if (lastLocation != null && !lastLocation.equals(self))
       lastLocation.clearLastLocation(m);
     
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
   }
   
   public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete,
TCredentials credentials) throws IOException {
@@ -471,18 +475,16 @@ public class MetadataTable extends org.a
     
     // TODO could use batch writer,would need to handle failure and retry like update does
- ACCUMULO-1294
     for (FileRef pathToRemove : datafilesToDelete) {
-      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()));
+      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()),
extent);
     }
   }
   
   public static void addDeleteEntry(String tableId, String path) throws IOException {
-    update(SecurityConstants.getSystemCredentials(), createDeleteMutation(tableId, path));
+    update(SecurityConstants.getSystemCredentials(), createDeleteMutation(tableId, path),
new KeyExtent(new Text(tableId), null, null));
   }
   
   public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws
IOException {
-    String prefix = DELETE_FLAG_PREFIX;
-    if (tableId.equals(ID))
-      prefix = RootTable.DELETE_FLAG_PREFIX;
+    String prefix = DELETED_RANGE.getStartKey().getRow().toString();
     
     if (!pathToRemove.contains(":")) {
       if (pathToRemove.startsWith("../"))
@@ -503,7 +505,7 @@ public class MetadataTable extends org.a
     for (FileRef pathToRemove : scanFiles)
       m.putDelete(SCANFILE_COLUMN_FAMILY, pathToRemove.meta());
     
-    update(credentials, zooLock, m);
+    update(credentials, zooLock, m, extent);
   }
   
   private static KeyExtent fixSplit(Text table, Text metadataEntry, Text metadataPrevEndRow,
Value oper, double splitRatio, TServerInstance tserver,
@@ -774,7 +776,7 @@ public class MetadataTable extends org.a
       String value = StringUtil.join(entry.logSet, ";") + "|" + entry.tabletId;
       Mutation m = new Mutation(entry.extent.getMetadataEntry());
       m.put(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
-      update(credentials, zooLock, m);
+      update(credentials, zooLock, m, entry.extent);
     }
   }
   
@@ -811,7 +813,8 @@ public class MetadataTable extends org.a
       }
       
     } else {
-      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+      String systemTableToCheck = extent.isMeta() ? RootTable.ID : ID;
+      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck,
Authorizations.EMPTY);
       scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
       scanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
       scanner.setRange(extent.toMetadataRange());
@@ -838,7 +841,7 @@ public class MetadataTable extends org.a
   public static List<LogEntry> getLogEntries(TCredentials credentials, KeyExtent extent)
throws IOException, KeeperException, InterruptedException {
     log.info("Scanning logging entries for " + extent);
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
-    if (extent.equals(RootTable.ROOT_TABLET_EXTENT)) {
+    if (extent.equals(RootTable.EXTENT)) {
       log.info("Getting logs for root tablet from zookeeper");
       getRootLogEntries(result);
     } else {
@@ -891,7 +894,10 @@ public class MetadataTable extends org.a
   }
   
   private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent)
{
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+    String tableId = ID;
+    if (extent.isMeta())
+      tableId = RootTable.ID;
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId,
Authorizations.EMPTY);
     scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
     Text start = extent.getMetadataEntry();
     Key endKey = new Key(start, LOG_COLUMN_FAMILY);
@@ -906,7 +912,7 @@ public class MetadataTable extends org.a
     Iterator<Entry<Key,Value>> metadataEntries = null;
     
     LogEntryIterator(TCredentials creds) throws IOException, KeeperException, InterruptedException
{
-      rootTabletEntries = getLogEntries(creds, RootTable.ROOT_TABLET_EXTENT).iterator();
+      rootTabletEntries = getLogEntries(creds, RootTable.EXTENT).iterator();
       try {
         Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(),
CredentialHelper.extractToken(creds))
             .createScanner(NAME, Authorizations.EMPTY);
@@ -961,7 +967,7 @@ public class MetadataTable extends org.a
       } else {
         Mutation m = new Mutation(entry.extent.getMetadataEntry());
         m.putDelete(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename));
-        update(SecurityConstants.getSystemCredentials(), zooLock, m);
+        update(SecurityConstants.getSystemCredentials(), zooLock, m, entry.extent);
       }
     }
   }
@@ -1167,7 +1173,7 @@ public class MetadataTable extends org.a
   public static void chopped(KeyExtent extent, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
-    update(SecurityConstants.getSystemCredentials(), zooLock, m);
+    update(SecurityConstants.getSystemCredentials(), zooLock, m, extent);
   }
   
   public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws
Exception {
@@ -1191,7 +1197,7 @@ public class MetadataTable extends org.a
     List<FileRef> result = new ArrayList<FileRef>();
     try {
       VolumeManager fs = VolumeManagerImpl.get();
-      Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
+      Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME
: NAME, Authorizations.EMPTY));
       mscanner.setRange(extent.toMetadataRange());
       mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
       for (Entry<Key,Value> entry : mscanner) {
@@ -1207,15 +1213,11 @@ public class MetadataTable extends org.a
   }
   
   public static Map<FileRef,Long> getBulkFilesLoaded(TCredentials credentials, KeyExtent
extent) throws IOException {
-    return getBulkFilesLoaded(credentials, extent.getMetadataEntry());
-  }
-  
-  public static Map<FileRef,Long> getBulkFilesLoaded(TCredentials credentials, Text
metadataRow) throws IOException {
-    
+    Text metadataRow = extent.getMetadataEntry();
     Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
     
     VolumeManager fs = VolumeManagerImpl.get();
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta()
? RootTable.ID : ID, Authorizations.EMPTY);
     scanner.setRange(new Range(metadataRow));
     scanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
     for (Entry<Key,Value> entry : scanner) {
@@ -1230,7 +1232,9 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
     m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
     
-    update(SecurityConstants.getSystemCredentials(), m);
+    // new KeyExtent is only added to force update to write to the metadata table, not the
root table
+    // because bulk loads aren't supported to the metadata table
+    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"),
null, null));
   }
   
   public static void removeBulkLoadInProgressFlag(String path) {
@@ -1238,26 +1242,30 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
     m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
     
-    update(SecurityConstants.getSystemCredentials(), m);
+    // new KeyExtent is only added to force update to write to the metadata table, not the
root table
+    // because bulk loads aren't supported to the metadata table
+    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"),
null, null));
   }
   
   public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
+    if (true)
+      throw new UnsupportedOperationException();
     // move delete markers from the normal delete keyspace to the root tablet delete keyspace
if the files are for the !METADATA table
     Scanner scanner = new ScannerImpl(instance, creds, ID, Authorizations.EMPTY);
-    scanner.setRange(new Range(DELETES_KEYSPACE));
+    scanner.setRange(new Range(DELETED_RANGE));
     for (Entry<Key,Value> entry : scanner) {
       String row = entry.getKey().getRow().toString();
-      if (row.startsWith(DELETE_FLAG_PREFIX)) {
-        String filename = row.substring(DELETE_FLAG_PREFIX.length());
+      if (row.startsWith(DELETED_RANGE.getStartKey().getRow().toString())) {
+        String filename = row.substring(DELETED_RANGE.getStartKey().getRow().toString().length());
         // add the new entry first
         log.info("Moving " + filename + " marker to the root tablet");
-        Mutation m = new Mutation(RootTable.DELETE_FLAG_PREFIX + filename);
+        Mutation m = new Mutation(DELETED_RANGE.getStartKey().getRow().toString() + filename);
         m.put(new byte[] {}, new byte[] {}, new byte[] {});
-        update(creds, m);
+        update(creds, m, null);
         // remove the old entry
         m = new Mutation(entry.getKey().getRow());
         m.putDelete(new byte[] {}, new byte[] {});
-        update(creds, m);
+        update(creds, m, null);
       } else {
         break;
       }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
(original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
Mon Jun 24 19:29:39 2013
@@ -126,7 +126,7 @@ public class OfflineMetadataScanner exte
     this.conf = conf;
     List<LogEntry> rwal;
     try {
-      rwal = MetadataTable.getLogEntries(null, RootTable.ROOT_TABLET_EXTENT);
+      rwal = MetadataTable.getLogEntries(null, RootTable.EXTENT);
     } catch (Exception e) {
       throw new RuntimeException("Failed to check if root tablet has write ahead log entries",
e);
     }

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java
(original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java
Mon Jun 24 19:29:39 2013
@@ -34,7 +34,7 @@ public class MergeInfoTest {
     in.reset(buffer.getData(), 0, buffer.getLength());
     MergeInfo info2 = new MergeInfo();
     info2.readFields(in);
-    Assert.assertEquals(info.range, info2.range);
+    Assert.assertEquals(info.extent, info2.extent);
     Assert.assertEquals(info.state, info2.state);
     Assert.assertEquals(info.operation, info2.operation);
     return info2;

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
(original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
Mon Jun 24 19:29:39 2013
@@ -146,7 +146,7 @@ public class RootTabletStateStoreTest {
   @Test
   public void testRootTabletStateStore() throws DistributedStoreException {
     ZooTabletStateStore tstore = new ZooTabletStateStore(new FakeZooStore());
-    KeyExtent root = RootTable.ROOT_TABLET_EXTENT;
+    KeyExtent root = RootTable.EXTENT;
     String sessionId = "this is my unique session data";
     TServerInstance server = new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 10000),
sessionId);
     List<Assignment> assignments = Collections.singletonList(new Assignment(root, server));

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
(original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
Mon Jun 24 19:29:39 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.test;
 
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -28,6 +27,7 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.io.Text;
 
 public class GCLotsOfCandidatesTest {
@@ -42,7 +42,7 @@ public class GCLotsOfCandidatesTest {
     
     for (int i = 0; i < 100000; ++i) {
       final Text emptyText = new Text("");
-      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETE_FLAG_PREFIX,
"/", i,
+      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(),
"/", i,
           "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
       Mutation delFlag = new Mutation(row);
       delFlag.put(emptyText, emptyText, new Value(new byte[] {}));

Modified: accumulo/trunk/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java (original)
+++ accumulo/trunk/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java Mon Jun
24 19:29:39 2013
@@ -73,17 +73,17 @@ public class MetaSplitTest {
       opts.create("" + i);
     }
     opts.merge(MetadataTable.NAME, new Text("01"), new Text("02"));
-    assertEquals(2, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(1, opts.listSplits(MetadataTable.NAME).size());
     addSplits(opts, "4 5 6 7 8".split(" "));
-    assertEquals(7, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(6, opts.listSplits(MetadataTable.NAME).size());
     opts.merge(MetadataTable.NAME, new Text("6"), new Text("9"));
-    assertEquals(5, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(4, opts.listSplits(MetadataTable.NAME).size());
     addSplits(opts, "44 55 66 77 88".split(" "));
-    assertEquals(10, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(9, opts.listSplits(MetadataTable.NAME).size());
     opts.merge(MetadataTable.NAME, new Text("5"), new Text("7"));
-    assertEquals(7, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(6, opts.listSplits(MetadataTable.NAME).size());
     opts.merge(MetadataTable.NAME, null, null);
-    assertEquals(1, opts.listSplits(MetadataTable.NAME).size());
+    assertEquals(0, opts.listSplits(MetadataTable.NAME).size());
   }
   
 }

Modified: accumulo/trunk/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java (original)
+++ accumulo/trunk/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java Mon Jun
24 19:29:39 2013
@@ -193,7 +193,7 @@ public class ShellServerTest {
     String export = "file://" + folder.newFolder().toString();
     exec("exporttable -t t " + export, true);
     DistCp cp = newDistCp();
-    String import_ = "file://" +folder.newFolder().toString();
+    String import_ = "file://" + folder.newFolder().toString();
     cp.run(new String[] {"-f", export + "/distcp.txt", import_});
     exec("importtable t2 " + import_, true);
     exec("config -t t2 -np", true, "345M", true);
@@ -652,10 +652,12 @@ public class ShellServerTest {
     exec("getsplits", true, "z", false);
     exec("deletetable -f t");
     exec("getsplits -t !METADATA", true);
-    assertEquals(3, output.get().split("\n").length);
+    assertEquals(2, output.get().split("\n").length);
+    exec("getsplits -t !!ROOT", true);
+    assertEquals(1, output.get().split("\n").length);
     exec("merge --all -t !METADATA");
     exec("getsplits -t !METADATA", true);
-    assertEquals(2, output.get().split("\n").length);
+    assertEquals(1, output.get().split("\n").length);
   }
   
   @Test(timeout = 30000)

Modified: accumulo/trunk/test/system/auto/simple/readwrite.py
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/system/auto/simple/readwrite.py?rev=1496171&r1=1496170&r2=1496171&view=diff
==============================================================================
--- accumulo/trunk/test/system/auto/simple/readwrite.py (original)
+++ accumulo/trunk/test/system/auto/simple/readwrite.py Mon Jun 24 19:29:39 2013
@@ -157,7 +157,7 @@ class SunnyLG(SunnyDayTest):
         handle = self.runOn(self.masterHost(),
                             [self.accumulo_sh(),
                              'org.apache.accumulo.core.file.rfile.PrintInfo',
-                             dir + '/tables/1/default_tablet/F0000000.rf'])
+                             dir + '/tables/1/default_tablet/F0000001.rf'])
         out, err = handle.communicate()
         self.assert_(handle.returncode == 0)
         self.assert_(out.find('Locality group         : g1') >= 0)



Mime
View raw message