hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1511577 [13/23] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/ap...
Date Thu, 08 Aug 2013 04:19:56 GMT
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java Thu Aug  8 04:19:49 2013
@@ -16,10 +16,8 @@ package org.apache.hadoop.hbase.security
 
 import java.io.IOException;
 import java.net.InetAddress;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -33,11 +31,13 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
@@ -139,13 +139,13 @@ public class AccessController extends Ba
   void initialize(RegionCoprocessorEnvironment e) throws IOException {
     final HRegion region = e.getRegion();
 
-    Map<byte[],ListMultimap<String,TablePermission>> tables =
+    Map<TableName,ListMultimap<String,TablePermission>> tables =
         AccessControlLists.loadAll(region);
     // For each table, write out the table's permissions to the respective
     // znode for that table.
-    for (Map.Entry<byte[],ListMultimap<String,TablePermission>> t:
+    for (Map.Entry<TableName,ListMultimap<String,TablePermission>> t:
       tables.entrySet()) {
-      byte[] table = t.getKey();
+      TableName table = t.getKey();
       ListMultimap<String,TablePermission> perms = t.getValue();
       byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, e.getConfiguration());
       this.authManager.getZKPermissionWatcher().writeToZookeeper(table, serialized);
@@ -159,7 +159,8 @@ public class AccessController extends Ba
    */
   void updateACL(RegionCoprocessorEnvironment e,
       final Map<byte[], List<? extends Cell>> familyMap) {
-    Set<byte[]> tableSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+    Set<TableName> tableSet =
+        new TreeSet<TableName>();
     for (Map.Entry<byte[], List<? extends Cell>> f : familyMap.entrySet()) {
       List<? extends Cell> cells = f.getValue();
       for (Cell cell: cells) {
@@ -167,21 +168,21 @@ public class AccessController extends Ba
         if (Bytes.equals(kv.getBuffer(), kv.getFamilyOffset(),
             kv.getFamilyLength(), AccessControlLists.ACL_LIST_FAMILY, 0,
             AccessControlLists.ACL_LIST_FAMILY.length)) {
-          tableSet.add(kv.getRow());
+          tableSet.add(TableName.valueOf(kv.getRow()));
         }
       }
     }
 
     ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher();
     Configuration conf = regionEnv.getConfiguration();
-    for (byte[] tableName: tableSet) {
+    for (TableName tableName: tableSet) {
       try {
         ListMultimap<String,TablePermission> perms =
           AccessControlLists.getTablePermissions(conf, tableName);
         byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
         zkw.writeToZookeeper(tableName, serialized);
       } catch (IOException ex) {
-        LOG.error("Failed updating permissions mirror for '" + Bytes.toString(tableName) + "'", ex);
+        LOG.error("Failed updating permissions mirror for '" + tableName + "'", ex);
       }
     }
   }
@@ -204,7 +205,7 @@ public class AccessController extends Ba
       RegionCoprocessorEnvironment e,
       Map<byte [], ? extends Collection<?>> families) {
     HRegionInfo hri = e.getRegion().getRegionInfo();
-    byte[] tableName = hri.getTableName();
+    TableName tableName = hri.getTableName();
 
     // 1. All users need read access to .META. table.
     // this is a very common operation, so deal with it quickly.
@@ -227,7 +228,7 @@ public class AccessController extends Ba
     // and the user need to be allowed to write on both tables.
     if (permRequest == Permission.Action.WRITE &&
        (hri.isMetaRegion() ||
-        Bytes.equals(tableName, AccessControlLists.ACL_GLOBAL_NAME)) &&
+        Bytes.equals(tableName.getName(), AccessControlLists.ACL_GLOBAL_NAME)) &&
        (authManager.authorize(user, Permission.Action.CREATE) ||
         authManager.authorize(user, Permission.Action.ADMIN)))
     {
@@ -329,7 +330,7 @@ public class AccessController extends Ba
    * @throws IOException if obtaining the current user fails
    * @throws AccessDeniedException if user has no authorization
    */
-  private void requirePermission(String request, byte[] tableName, byte[] family, byte[] qualifier,
+  private void requirePermission(String request, TableName tableName, byte[] family, byte[] qualifier,
       Action... permissions) throws IOException {
     User user = getActiveUser();
     AuthResult result = null;
@@ -379,7 +380,7 @@ public class AccessController extends Ba
 
     if (!result.isAllowed()) {
       throw new AccessDeniedException("Insufficient permissions (table=" +
-        env.getRegion().getTableDesc().getNameAsString()+
+        env.getRegion().getTableDesc().getTableName()+
         ((families != null && families.size() > 0) ? ", family: " +
         result.toFamilyString() : "") + ", action=" +
         perm.toString() + ")");
@@ -394,7 +395,7 @@ public class AccessController extends Ba
    * @param tableName Affected table name.
    * @param familiMap Affected column families.
    */
-  private void requireGlobalPermission(String request, Permission.Action perm, byte[] tableName,
+  private void requireGlobalPermission(String request, Permission.Action perm, TableName tableName,
       Map<byte[], ? extends Collection<byte[]>> familyMap) throws IOException {
     User user = getActiveUser();
     if (authManager.authorize(user, perm)) {
@@ -417,7 +418,7 @@ public class AccessController extends Ba
       Map<byte[], ? extends Set<byte[]>> familyMap)
     throws IOException {
     HRegionInfo hri = env.getRegion().getRegionInfo();
-    byte[] tableName = hri.getTableName();
+    TableName tableName = hri.getTableName();
 
     if (user == null) {
       return false;
@@ -490,7 +491,7 @@ public class AccessController extends Ba
     for (byte[] family: families) {
       familyMap.put(family, null);
     }
-    requireGlobalPermission("createTable", Permission.Action.CREATE, desc.getName(), familyMap);
+    requireGlobalPermission("createTable", Permission.Action.CREATE, desc.getTableName(), familyMap);
   }
 
   @Override
@@ -504,7 +505,7 @@ public class AccessController extends Ba
       String owner = desc.getOwnerString();
       // default the table owner to current user, if not specified.
       if (owner == null) owner = getActiveUser().getShortName();
-      UserPermission userperm = new UserPermission(Bytes.toBytes(owner), desc.getName(), null,
+      UserPermission userperm = new UserPermission(Bytes.toBytes(owner), desc.getTableName(), null,
           Action.values());
       AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(), userperm);
     }
@@ -515,121 +516,121 @@ public class AccessController extends Ba
       HTableDescriptor desc, HRegionInfo[] regions) throws IOException {}
 
   @Override
-  public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName)
+  public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName)
       throws IOException {
     requirePermission("deleteTable", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
   @Override
   public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {
+      TableName tableName) throws IOException {
     AccessControlLists.removeTablePermissions(c.getEnvironment().getConfiguration(), tableName);
   }
   @Override
   public void postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
 
   @Override
-  public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName,
+  public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
       HTableDescriptor htd) throws IOException {
     requirePermission("modifyTable", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HTableDescriptor htd) throws IOException {}
+      TableName tableName, HTableDescriptor htd) throws IOException {}
 
   @Override
   public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HTableDescriptor htd) throws IOException {
+      TableName tableName, HTableDescriptor htd) throws IOException {
     String owner = htd.getOwnerString();
     // default the table owner to current user, if not specified.
     if (owner == null) owner = getActiveUser().getShortName();
-    UserPermission userperm = new UserPermission(Bytes.toBytes(owner), htd.getName(), null,
+    UserPermission userperm = new UserPermission(Bytes.toBytes(owner), htd.getTableName(), null,
         Action.values());
     AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(), userperm);
   }
 
   @Override
   public void postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HTableDescriptor htd) throws IOException {}
+      TableName tableName, HTableDescriptor htd) throws IOException {}
 
 
   @Override
-  public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName,
+  public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
       HColumnDescriptor column) throws IOException {
     requirePermission("addColumn", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor column) throws IOException {}
+      TableName tableName, HColumnDescriptor column) throws IOException {}
   @Override
   public void postAddColumn(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor column) throws IOException {}
+      TableName tableName, HColumnDescriptor column) throws IOException {}
   @Override
   public void postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor column) throws IOException {}
+      TableName tableName, HColumnDescriptor column) throws IOException {}
 
   @Override
-  public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName,
+  public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
       HColumnDescriptor descriptor) throws IOException {
     requirePermission("modifyColumn", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor descriptor) throws IOException {}
+      TableName tableName, HColumnDescriptor descriptor) throws IOException {}
   @Override
   public void postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor descriptor) throws IOException {}
+      TableName tableName, HColumnDescriptor descriptor) throws IOException {}
   @Override
   public void postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, HColumnDescriptor descriptor) throws IOException {}
+      TableName tableName, HColumnDescriptor descriptor) throws IOException {}
 
 
   @Override
-  public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName,
+  public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
       byte[] col) throws IOException {
     requirePermission("deleteColumn", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, byte[] col) throws IOException {}
+      TableName tableName, byte[] col) throws IOException {}
   @Override
   public void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, byte[] col) throws IOException {
+      TableName tableName, byte[] col) throws IOException {
     AccessControlLists.removeTablePermissions(c.getEnvironment().getConfiguration(),
                                               tableName, col);
   }
   @Override
   public void postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName, byte[] col) throws IOException {}
+      TableName tableName, byte[] col) throws IOException {}
 
   @Override
-  public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName)
+  public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName)
       throws IOException {
     requirePermission("enableTable", tableName, null, null, Action.ADMIN, Action.CREATE);
   }
 
   @Override
   public void preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
   @Override
   public void postEnableTable(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
   @Override
   public void postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
 
   @Override
-  public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, byte[] tableName)
+  public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName)
       throws IOException {
-    if (Bytes.equals(tableName, AccessControlLists.ACL_GLOBAL_NAME)) {
+    if (Bytes.equals(tableName.getName(), AccessControlLists.ACL_GLOBAL_NAME)) {
       throw new AccessDeniedException("Not allowed to disable "
           + AccessControlLists.ACL_TABLE_NAME_STR + " table.");
     }
@@ -638,13 +639,13 @@ public class AccessController extends Ba
 
   @Override
   public void preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
   @Override
   public void postDisableTable(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
   @Override
   public void postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> c,
-      byte[] tableName) throws IOException {}
+      TableName tableName) throws IOException {}
 
   @Override
   public void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
@@ -776,6 +777,36 @@ public class AccessController extends Ba
       final SnapshotDescription snapshot) throws IOException {
   }
 
+  @Override
+  public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                 NamespaceDescriptor ns) throws IOException {
+  }
+
+  @Override
+  public void postCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                  NamespaceDescriptor ns) throws IOException {
+  }
+
+  @Override
+  public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                 String namespace) throws IOException {
+  }
+
+  @Override
+  public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                  String namespace) throws IOException {
+  }
+
+  @Override
+  public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                 NamespaceDescriptor ns) throws IOException {
+  }
+
+  @Override
+  public void postModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                                  NamespaceDescriptor ns) throws IOException {
+  }
+
   /* ---- RegionObserver implementation ---- */
 
   @Override
@@ -869,7 +900,7 @@ public class AccessController extends Ba
     if (!authResult.isAllowed()) {
       if (hasFamilyQualifierPermission(requestUser,
           Permission.Action.READ, e, get.getFamilyMap())) {
-        byte[] table = getTableName(e);
+        TableName table = getTableName(e);
         AccessControlFilter filter = new AccessControlFilter(authManager,
             requestUser, table);
 
@@ -882,11 +913,11 @@ public class AccessController extends Ba
           get.setFilter(filter);
         }
         logResult(AuthResult.allow("get", "Access allowed with filter", requestUser,
-            Permission.Action.READ, authResult.getTable(), get.getFamilyMap()));
+            Permission.Action.READ, authResult.getTableName(), get.getFamilyMap()));
       } else {
         logResult(authResult);
         throw new AccessDeniedException("Insufficient permissions (table=" +
-          e.getRegion().getTableDesc().getNameAsString() + ", action=READ)");
+          e.getRegion().getTableDesc().getTableName() + ", action=READ)");
       }
     } else {
       // log auth success
@@ -1008,7 +1039,7 @@ public class AccessController extends Ba
     if (!authResult.isAllowed()) {
       if (hasFamilyQualifierPermission(user, Permission.Action.READ, e,
           scan.getFamilyMap())) {
-        byte[] table = getTableName(e);
+        TableName table = getTableName(e);
         AccessControlFilter filter = new AccessControlFilter(authManager,
             user, table);
 
@@ -1021,13 +1052,13 @@ public class AccessController extends Ba
           scan.setFilter(filter);
         }
         logResult(AuthResult.allow("scannerOpen", "Access allowed with filter", user,
-            Permission.Action.READ, authResult.getTable(), scan.getFamilyMap()));
+            Permission.Action.READ, authResult.getTableName(), scan.getFamilyMap()));
       } else {
         // no table/family level perms and no qualifier level perms, reject
         logResult(authResult);
         throw new AccessDeniedException("Insufficient permissions for user '"+
             (user != null ? user.getShortName() : "null")+"' "+
-            "for scanner open on table " + Bytes.toString(getTableName(e)));
+            "for scanner open on table " + getTableName(e));
       }
     } else {
       // log success
@@ -1094,7 +1125,7 @@ public class AccessController extends Ba
       List<Pair<byte[], String>> familyPaths) throws IOException {
     for(Pair<byte[],String> el : familyPaths) {
       requirePermission("preBulkLoadHFile",
-          ctx.getEnvironment().getRegion().getTableDesc().getName(),
+          ctx.getEnvironment().getRegion().getTableDesc().getTableName(),
           el.getFirst(),
           null,
           Permission.Action.WRITE);
@@ -1103,7 +1134,7 @@ public class AccessController extends Ba
 
   private AuthResult hasSomeAccess(RegionCoprocessorEnvironment e, String method, Action action) throws IOException {
     User requestUser = getActiveUser();
-    byte[] tableName = e.getRegion().getTableDesc().getName();
+    TableName tableName = e.getRegion().getTableDesc().getTableName();
     AuthResult authResult = permissionGranted(method, requestUser,
         action, e, Collections.EMPTY_MAP);
     if (!authResult.isAllowed()) {
@@ -1132,7 +1163,7 @@ public class AccessController extends Ba
     logResult(authResult);
     if (!authResult.isAllowed()) {
       throw new AccessDeniedException("Insufficient permissions (table=" +
-        e.getRegion().getTableDesc().getNameAsString() + ", action=WRITE)");
+        e.getRegion().getTableDesc().getTableName() + ", action=WRITE)");
     }
   }
 
@@ -1148,7 +1179,7 @@ public class AccessController extends Ba
     logResult(authResult);
     if (!authResult.isAllowed()) {
       throw new AccessDeniedException("Insufficient permissions (table=" +
-        e.getRegion().getTableDesc().getNameAsString() + ", action=WRITE)");
+        e.getRegion().getTableDesc().getTableName() + ", action=WRITE)");
     }
   }
 
@@ -1223,9 +1254,9 @@ public class AccessController extends Ba
                                  AccessControlProtos.UserPermissionsRequest request,
                                  RpcCallback<AccessControlProtos.UserPermissionsResponse> done) {
     AccessControlProtos.UserPermissionsResponse response = null;
-    byte[] table = null;
-    if (request.hasTable()) {
-      table = request.getTable().toByteArray();
+    TableName table = null;
+    if (request.hasTableName()) {
+      table = ProtobufUtil.toTableName(request.getTableName());
     }
     try {
       // only allowed to be called on _acl_ region
@@ -1256,16 +1287,16 @@ public class AccessController extends Ba
     }
     AccessControlProtos.CheckPermissionsResponse response = null;
     try {
-      byte[] tableName = regionEnv.getRegion().getTableDesc().getName();
+      TableName tableName = regionEnv.getRegion().getTableDesc().getTableName();
       for (Permission permission : permissions) {
         if (permission instanceof TablePermission) {
           TablePermission tperm = (TablePermission) permission;
           for (Permission.Action action : permission.getActions()) {
-            if (!Arrays.equals(tperm.getTable(), tableName)) {
+            if (!tperm.getTable().equals(tableName)) {
               throw new CoprocessorException(AccessController.class, String.format("This method "
                   + "can only execute at the table specified in TablePermission. " +
-                  "Table of the region:%s , requested table:%s", Bytes.toString(tableName),
-                  Bytes.toString(tperm.getTable())));
+                  "Table of the region:%s , requested table:%s", tableName,
+                  tperm.getTable()));
             }
 
             Map<byte[], Set<byte[]>> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
@@ -1300,9 +1331,9 @@ public class AccessController extends Ba
     return AccessControlProtos.AccessControlService.newReflectiveService(this);
   }
 
-  private byte[] getTableName(RegionCoprocessorEnvironment e) {
+  private TableName getTableName(RegionCoprocessorEnvironment e) {
     HRegion region = e.getRegion();
-    byte[] tableName = null;
+    TableName tableName = null;
 
     if (region != null) {
       HRegionInfo regionInfo = region.getRegionInfo();
@@ -1339,10 +1370,9 @@ public class AccessController extends Ba
   }
 
   private boolean isSpecialTable(HRegionInfo regionInfo) {
-    byte[] tableName = regionInfo.getTableName();
-    return Arrays.equals(tableName, AccessControlLists.ACL_TABLE_NAME)
-        || Arrays.equals(tableName, Bytes.toBytes("-ROOT-"))
-        || Arrays.equals(tableName, Bytes.toBytes(".META."));
+    TableName tableName = regionInfo.getTableName();
+    return tableName.equals(AccessControlLists.ACL_TABLE)
+        || tableName.equals(TableName.META_TABLE_NAME);
   }
 
   @Override
@@ -1365,7 +1395,8 @@ public class AccessController extends Ba
 
   @Override
   public void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<String> tableNamesList, List<HTableDescriptor> descriptors) throws IOException {
+      List<TableName> tableNamesList,
+      List<HTableDescriptor> descriptors) throws IOException {
     // If the list is empty, this is a request for all table descriptors and requires GLOBAL
     // ADMIN privs.
     if (tableNamesList == null || tableNamesList.isEmpty()) {
@@ -1375,18 +1406,17 @@ public class AccessController extends Ba
     // request can be granted.
     else {
       MasterServices masterServices = ctx.getEnvironment().getMasterServices();
-      for (String tableName: tableNamesList) {
+      for (TableName tableName: tableNamesList) {
         // Do not deny if the table does not exist
-        byte[] nameAsBytes = Bytes.toBytes(tableName);
         try {
-          masterServices.checkTableModifiable(nameAsBytes);
+          masterServices.checkTableModifiable(tableName);
         } catch (TableNotFoundException ex) {
           // Skip checks for a table that does not exist
           continue;
         } catch (TableNotDisabledException ex) {
           // We don't care about this
         }
-        requirePermission("getTableDescriptors", nameAsBytes, null, null,
+        requirePermission("getTableDescriptors", tableName, null, null,
           Permission.Action.ADMIN, Permission.Action.CREATE);
       }
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java Thu Aug  8 04:19:49 2013
@@ -23,6 +23,7 @@ import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.util.Byte
 @InterfaceStability.Evolving
 public class AuthResult {
   private final boolean allowed;
-  private final byte[] table;
+  private final TableName table;
   private final Permission.Action action;
   private final String request;
   private final String reason;
@@ -47,7 +48,7 @@ public class AuthResult {
   private final Map<byte[], ? extends Collection<?>> families;
 
   public AuthResult(boolean allowed, String request, String reason, User user,
-      Permission.Action action, byte[] table, byte[] family, byte[] qualifier) {
+      Permission.Action action, TableName table, byte[] family, byte[] qualifier) {
     this.allowed = allowed;
     this.request = request;
     this.reason = reason;
@@ -60,7 +61,7 @@ public class AuthResult {
   }
 
   public AuthResult(boolean allowed, String request, String reason, User user,
-        Permission.Action action, byte[] table,
+        Permission.Action action, TableName table,
         Map<byte[], ? extends Collection<?>> families) {
     this.allowed = allowed;
     this.request = request;
@@ -85,7 +86,7 @@ public class AuthResult {
     return reason;
   }
 
-  public byte[] getTable() {
+  public TableName getTableName() {
     return table;
   }
 
@@ -152,7 +153,7 @@ public class AuthResult {
         .append(user != null ? user.getName() : "UNKNOWN")
         .append(", ");
     sb.append("scope=")
-        .append(table == null ? "GLOBAL" : Bytes.toString(table))
+        .append(table == null ? "GLOBAL" : table)
         .append(", ");
     sb.append("family=")
       .append(toFamilyString())
@@ -168,23 +169,23 @@ public class AuthResult {
   }
 
   public static AuthResult allow(String request, String reason, User user,
-      Permission.Action action, byte[] table, byte[] family, byte[] qualifier) {
+      Permission.Action action, TableName table, byte[] family, byte[] qualifier) {
     return new AuthResult(true, request, reason, user, action, table, family, qualifier);
   }
 
   public static AuthResult allow(String request, String reason, User user,
-      Permission.Action action, byte[] table,
+      Permission.Action action, TableName table,
       Map<byte[], ? extends Collection<?>> families) {
     return new AuthResult(true, request, reason, user, action, table, families);
   }
 
   public static AuthResult deny(String request, String reason, User user,
-      Permission.Action action, byte[] table, byte[] family, byte[] qualifier) {
+      Permission.Action action, TableName table, byte[] family, byte[] qualifier) {
     return new AuthResult(false, request, reason, user, action, table, family, qualifier);
   }
 
   public static AuthResult deny(String request, String reason, User user,
-        Permission.Action action, byte[] table,
+        Permission.Action action, TableName table,
         Map<byte[], ? extends Collection<?>> families) {
     return new AuthResult(false, request, reason, user, action, table, families);
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java Thu Aug  8 04:19:49 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -150,7 +151,8 @@ public class SecureBulkLoadEndpoint exte
     try {
       getAccessController().prePrepareBulkLoad(env);
       String bulkToken = createStagingDir(baseStagingDir,
-          getActiveUser(), request.getTableName().toByteArray()).toString();
+          getActiveUser(),
+          TableName.valueOf(request.getTableName().toByteArray())).toString();
       done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
     } catch (IOException e) {
       ResponseConverter.setControllerException(controller, e);
@@ -166,7 +168,7 @@ public class SecureBulkLoadEndpoint exte
       getAccessController().preCleanupBulkLoad(env);
       fs.delete(createStagingDir(baseStagingDir,
           getActiveUser(),
-          env.getRegion().getTableDesc().getName(),
+          env.getRegion().getTableDesc().getTableName(),
           new Path(request.getBulkToken()).getName()),
           true);
       done.run(CleanupBulkLoadResponse.newBuilder().build());
@@ -260,15 +262,17 @@ public class SecureBulkLoadEndpoint exte
         .getCoprocessorHost().findCoprocessor(AccessController.class.getName());
   }
 
-  private Path createStagingDir(Path baseDir, User user, byte[] tableName) throws IOException {
-    String randomDir = user.getShortName()+"__"+Bytes.toString(tableName)+"__"+
+  private Path createStagingDir(Path baseDir,
+                                User user,
+                                TableName tableName) throws IOException {
+    String randomDir = user.getShortName()+"__"+ tableName +"__"+
         (new BigInteger(RANDOM_WIDTH, random).toString(RANDOM_RADIX));
     return createStagingDir(baseDir, user, tableName, randomDir);
   }
 
   private Path createStagingDir(Path baseDir,
                                 User user,
-                                byte[] tableName,
+                                TableName tableName,
                                 String randomDir) throws IOException {
     Path p = new Path(baseDir, randomDir);
     fs.mkdirs(p, PERM_ALL_ACCESS);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java Thu Aug  8 04:19:49 2013
@@ -24,6 +24,7 @@ import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.security.User;
@@ -92,8 +93,8 @@ public class TableAuthManager {
   /** Cache of global permissions */
   private volatile PermissionCache<Permission> globalCache;
 
-  private ConcurrentSkipListMap<byte[], PermissionCache<TablePermission>> tableCache =
-      new ConcurrentSkipListMap<byte[], PermissionCache<TablePermission>>(Bytes.BYTES_COMPARATOR);
+  private ConcurrentSkipListMap<TableName, PermissionCache<TablePermission>> tableCache =
+      new ConcurrentSkipListMap<TableName, PermissionCache<TablePermission>>();
 
   private Configuration conf;
   private ZKPermissionWatcher zkperms;
@@ -146,7 +147,8 @@ public class TableAuthManager {
     return this.zkperms;
   }
 
-  public void refreshCacheFromWritable(byte[] table, byte[] data) throws IOException {
+  public void refreshCacheFromWritable(TableName table,
+                                       byte[] data) throws IOException {
     if (data != null && data.length > 0) {
       ListMultimap<String,TablePermission> perms;
       try {
@@ -156,7 +158,7 @@ public class TableAuthManager {
       }
 
       if (perms != null) {
-        if (Bytes.equals(table, AccessControlLists.ACL_GLOBAL_NAME)) {
+        if (Bytes.equals(table.getName(), AccessControlLists.ACL_GLOBAL_NAME)) {
           updateGlobalCache(perms);
         } else {
           updateTableCache(table, perms);
@@ -199,7 +201,8 @@ public class TableAuthManager {
    * @param table
    * @param tablePerms
    */
-  private void updateTableCache(byte[] table, ListMultimap<String,TablePermission> tablePerms) {
+  private void updateTableCache(TableName table,
+                                ListMultimap<String,TablePermission> tablePerms) {
     PermissionCache<TablePermission> newTablePerms = new PermissionCache<TablePermission>();
 
     for (Map.Entry<String,TablePermission> entry : tablePerms.entries()) {
@@ -213,7 +216,7 @@ public class TableAuthManager {
     tableCache.put(table, newTablePerms);
   }
 
-  private PermissionCache<TablePermission> getTablePermissions(byte[] table) {
+  private PermissionCache<TablePermission> getTablePermissions(TableName table) {
     if (!tableCache.containsKey(table)) {
       tableCache.putIfAbsent(table, new PermissionCache<TablePermission>());
     }
@@ -267,13 +270,15 @@ public class TableAuthManager {
     return false;
   }
 
-  private boolean authorize(List<TablePermission> perms, byte[] table, byte[] family,
-      Permission.Action action) {
+  private boolean authorize(List<TablePermission> perms,
+                            TableName table, byte[] family,
+                            Permission.Action action) {
     return authorize(perms, table, family, null, action);
   }
 
-  private boolean authorize(List<TablePermission> perms, byte[] table, byte[] family,
-      byte[] qualifier, Permission.Action action) {
+  private boolean authorize(List<TablePermission> perms,
+                            TableName table, byte[] family,
+                            byte[] qualifier, Permission.Action action) {
     if (perms != null) {
       for (TablePermission p : perms) {
         if (p.implies(table, family, qualifier, action)) {
@@ -281,12 +286,12 @@ public class TableAuthManager {
         }
       }
     } else if (LOG.isDebugEnabled()) {
-      LOG.debug("No permissions found for table="+Bytes.toStringBinary(table));
+      LOG.debug("No permissions found for table="+table);
     }
     return false;
   }
 
-  public boolean authorize(User user, byte[] table, KeyValue kv,
+  public boolean authorize(User user, TableName table, KeyValue kv,
       Permission.Action action) {
     PermissionCache<TablePermission> tablePerms = tableCache.get(table);
     if (tablePerms != null) {
@@ -308,7 +313,7 @@ public class TableAuthManager {
     return false;
   }
 
-  private boolean authorize(List<TablePermission> perms, byte[] table, KeyValue kv,
+  private boolean authorize(List<TablePermission> perms, TableName table, KeyValue kv,
       Permission.Action action) {
     if (perms != null) {
       for (TablePermission p : perms) {
@@ -318,7 +323,7 @@ public class TableAuthManager {
       }
     } else if (LOG.isDebugEnabled()) {
       LOG.debug("No permissions for authorize() check, table=" +
-          Bytes.toStringBinary(table));
+          table);
     }
 
     return false;
@@ -342,18 +347,18 @@ public class TableAuthManager {
    * @param action
    * @return true if known and authorized, false otherwise
    */
-  public boolean authorizeUser(String username, byte[] table, byte[] family,
+  public boolean authorizeUser(String username, TableName table, byte[] family,
       Permission.Action action) {
     return authorizeUser(username, table, family, null, action);
   }
 
-  public boolean authorizeUser(String username, byte[] table, byte[] family,
+  public boolean authorizeUser(String username, TableName table, byte[] family,
       byte[] qualifier, Permission.Action action) {
     // global authorization supercedes table level
     if (authorizeUser(username, action)) {
       return true;
     }
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
+    if (table == null) table = AccessControlLists.ACL_TABLE;
     return authorize(getTablePermissions(table).getUser(username), table, family,
         qualifier, action);
   }
@@ -376,17 +381,17 @@ public class TableAuthManager {
    * @param action
    * @return true if known and authorized, false otherwise
    */
-  public boolean authorizeGroup(String groupName, byte[] table, byte[] family,
+  public boolean authorizeGroup(String groupName, TableName table, byte[] family,
       Permission.Action action) {
     // global authorization supercedes table level
     if (authorizeGroup(groupName, action)) {
       return true;
     }
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
+    if (table == null) table = AccessControlLists.ACL_TABLE;
     return authorize(getTablePermissions(table).getGroup(groupName), table, family, action);
   }
 
-  public boolean authorize(User user, byte[] table, byte[] family,
+  public boolean authorize(User user, TableName table, byte[] family,
       byte[] qualifier, Permission.Action action) {
     if (authorizeUser(user.getShortName(), table, family, qualifier, action)) {
       return true;
@@ -403,7 +408,7 @@ public class TableAuthManager {
     return false;
   }
 
-  public boolean authorize(User user, byte[] table, byte[] family,
+  public boolean authorize(User user, TableName table, byte[] family,
       Permission.Action action) {
     return authorize(user, table, family, null, action);
   }
@@ -415,7 +420,7 @@ public class TableAuthManager {
    * authorize() on the same column family would return true.
    */
   public boolean matchPermission(User user,
-      byte[] table, byte[] family, Permission.Action action) {
+      TableName table, byte[] family, Permission.Action action) {
     PermissionCache<TablePermission> tablePerms = tableCache.get(table);
     if (tablePerms != null) {
       List<TablePermission> userPerms = tablePerms.getUser(user.getShortName());
@@ -446,7 +451,7 @@ public class TableAuthManager {
   }
 
   public boolean matchPermission(User user,
-      byte[] table, byte[] family, byte[] qualifier,
+      TableName table, byte[] family, byte[] qualifier,
       Permission.Action action) {
     PermissionCache<TablePermission> tablePerms = tableCache.get(table);
     if (tablePerms != null) {
@@ -477,6 +482,10 @@ public class TableAuthManager {
   }
 
   public void remove(byte[] table) {
+    remove(TableName.valueOf(table));
+  }
+
+  public void remove(TableName table) {
     tableCache.remove(table);
   }
 
@@ -487,7 +496,7 @@ public class TableAuthManager {
    * @param table
    * @param perms
    */
-  public void setUserPermissions(String username, byte[] table,
+  public void setUserPermissions(String username, TableName table,
       List<TablePermission> perms) {
     PermissionCache<TablePermission> tablePerms = getTablePermissions(table);
     tablePerms.replaceUser(username, perms);
@@ -501,14 +510,14 @@ public class TableAuthManager {
    * @param table
    * @param perms
    */
-  public void setGroupPermissions(String group, byte[] table,
+  public void setGroupPermissions(String group, TableName table,
       List<TablePermission> perms) {
     PermissionCache<TablePermission> tablePerms = getTablePermissions(table);
     tablePerms.replaceGroup(group, perms);
     writeToZooKeeper(table, tablePerms);
   }
 
-  public void writeToZooKeeper(byte[] table,
+  public void writeToZooKeeper(TableName table,
       PermissionCache<TablePermission> tablePerms) {
     byte[] serialized = new byte[0];
     if (tablePerms != null) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java Thu Aug  8 04:19:49 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -92,10 +93,10 @@ public class ZKPermissionWatcher extends
   public void nodeDataChanged(String path) {
     if (aclZNode.equals(ZKUtil.getParent(path))) {
       // update cache on an existing table node
-      String table = ZKUtil.getNodeName(path);
+      TableName table = TableName.valueOf(ZKUtil.getNodeName(path));
       try {
         byte[] data = ZKUtil.getDataAndWatch(watcher, path);
-        authManager.refreshCacheFromWritable(Bytes.toBytes(table), data);
+        authManager.refreshCacheFromWritable(table, data);
       } catch (KeeperException ke) {
         LOG.error("Error reading data from zookeeper for node "+table, ke);
         // only option is to abort
@@ -125,14 +126,14 @@ public class ZKPermissionWatcher extends
     for (ZKUtil.NodeAndData n : nodes) {
       if (n.isEmpty()) continue;
       String path = n.getNode();
-      String table = ZKUtil.getNodeName(path);
+      TableName table = TableName.valueOf(ZKUtil.getNodeName(path));
       try {
         byte[] nodeData = n.getData();
         if (LOG.isDebugEnabled()) {
           LOG.debug("Updating permissions cache from node "+table+" with data: "+
               Bytes.toStringBinary(nodeData));
         }
-        authManager.refreshCacheFromWritable(Bytes.toBytes(table), nodeData);
+        authManager.refreshCacheFromWritable(table, nodeData);
       } catch (IOException ioe) {
         LOG.error("Failed parsing permissions for table '" + table +
             "' from zk", ioe);
@@ -145,16 +146,16 @@ public class ZKPermissionWatcher extends
    * @param tableName
    * @param permsData
    */
-  public void writeToZookeeper(byte[] tableName, byte[] permsData) {
+  public void writeToZookeeper(TableName tableName, byte[] permsData) {
     String zkNode = ZKUtil.joinZNode(watcher.baseZNode, ACL_NODE);
-    zkNode = ZKUtil.joinZNode(zkNode, Bytes.toString(tableName));
+    zkNode = ZKUtil.joinZNode(zkNode, tableName.getNameAsString());
 
     try {
       ZKUtil.createWithParents(watcher, zkNode);
       ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1);
     } catch (KeeperException e) {
       LOG.error("Failed updating permissions for table '" + 
-                Bytes.toString(tableName) + "'", e);
+                tableName + "'", e);
       watcher.abort("Failed writing node "+zkNode+" to zookeeper", e);
     }
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java Thu Aug  8 04:19:49 2013
@@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -52,7 +53,7 @@ public class TokenUtil {
       Configuration conf) throws IOException {
     HTable meta = null;
     try {
-      meta = new HTable(conf, ".META.");
+      meta = new HTable(conf, TableName.META_TABLE_NAME);
       CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
       AuthenticationProtos.AuthenticationService.BlockingInterface service =
           AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java Thu Aug  8 04:19:49 2013
@@ -39,11 +39,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.HLogLink;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -149,10 +151,12 @@ public final class ExportSnapshot extend
       Path path;
       if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) {
         String family = inputPath.getParent().getName();
-        String table = HFileLink.getReferencedTableName(inputPath.getName());
+        TableName table =
+            HFileLink.getReferencedTableName(inputPath.getName());
         String region = HFileLink.getReferencedRegionName(inputPath.getName());
         String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
-        path = new Path(table, new Path(region, new Path(family, hfile)));
+        path = new Path(FSUtils.getTableDir(new Path("./"), table),
+            new Path(region, new Path(family, hfile)));
       } else if (isHLogLinkPath(inputPath)) {
         String logName = inputPath.getName();
         path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName);
@@ -372,7 +376,8 @@ public final class ExportSnapshot extend
     SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
 
     final List<Pair<Path, Long>> files = new ArrayList<Pair<Path, Long>>();
-    final String table = snapshotDesc.getTable();
+    final TableName table =
+        TableName.valueOf(snapshotDesc.getTable());
     final Configuration conf = getConf();
 
     // Get snapshot files

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java Thu Aug  8 04:19:49 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.monitorin
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -107,25 +109,33 @@ public class RestoreSnapshotHelper {
   private final MonitoredTask status;
 
   private final SnapshotDescription snapshotDesc;
+  private final TableName snapshotTable;
   private final Path snapshotDir;
 
   private final HTableDescriptor tableDesc;
+  private final Path rootDir;
   private final Path tableDir;
 
   private final Configuration conf;
   private final FileSystem fs;
 
-  public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs,
-      final SnapshotDescription snapshotDescription, final Path snapshotDir,
-      final HTableDescriptor tableDescriptor, final Path tableDir,
-      final ForeignExceptionDispatcher monitor, final MonitoredTask status)
+  public RestoreSnapshotHelper(final Configuration conf,
+      final FileSystem fs,
+      final SnapshotDescription snapshotDescription,
+      final Path snapshotDir,
+      final HTableDescriptor tableDescriptor,
+      final Path rootDir,
+      final ForeignExceptionDispatcher monitor,
+      final MonitoredTask status)
   {
     this.fs = fs;
     this.conf = conf;
     this.snapshotDesc = snapshotDescription;
+    this.snapshotTable = TableName.valueOf(snapshotDescription.getTable());
     this.snapshotDir = snapshotDir;
     this.tableDesc = tableDescriptor;
-    this.tableDir = tableDir;
+    this.rootDir = rootDir;
+    this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName());
     this.monitor = monitor;
     this.status = status;
   }
@@ -311,7 +321,7 @@ public class RestoreSnapshotHelper {
     Map<String, List<String>> snapshotFiles =
                 SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir);
     Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
-    String tableName = tableDesc.getNameAsString();
+    String tableName = tableDesc.getTableName().getNameAsString();
 
     // Restore families present in the table
     for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
@@ -412,7 +422,7 @@ public class RestoreSnapshotHelper {
     }
 
     // create the regions on disk
-    ModifyRegionUtils.createRegions(conf, tableDir.getParent(),
+    ModifyRegionUtils.createRegions(conf, rootDir,
       tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
         public void fillRegion(final HRegion region) throws IOException {
           cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
@@ -437,7 +447,7 @@ public class RestoreSnapshotHelper {
       throws IOException {
     final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
     final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
-    final String tableName = tableDesc.getNameAsString();
+    final String tableName = tableDesc.getTableName().getNameAsString();
     SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir,
       new FSVisitor.StoreFileVisitor() {
         public void storeFile (final String region, final String family, final String hfile)
@@ -493,9 +503,9 @@ public class RestoreSnapshotHelper {
   private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
       final String hfileName) throws IOException {
     // Extract the referred information (hfile name and parent region)
-    String tableName = snapshotDesc.getTable();
-    Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(tableName,
-        regionInfo.getEncodedName()), familyDir.getName()), hfileName));
+    Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(
+        snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()),
+        hfileName));
     String snapshotRegionName = refPath.getParent().getParent().getName();
     String fileName = refPath.getName();
 
@@ -506,13 +516,13 @@ public class RestoreSnapshotHelper {
     // The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
     String refLink = fileName;
     if (!HFileLink.isHFileLink(fileName)) {
-      refLink = HFileLink.createHFileLinkName(tableName, snapshotRegionName, fileName);
+      refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
     }
     Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
 
     // Create the new reference
     Path linkPath = new Path(familyDir,
-      HFileLink.createHFileLinkName(tableName, regionInfo.getEncodedName(), hfileName));
+      HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
     InputStream in = new HFileLink(conf, linkPath).open(fs);
     OutputStream out = fs.create(outPath);
     IOUtils.copyBytes(in, out, conf);
@@ -527,7 +537,7 @@ public class RestoreSnapshotHelper {
    * @return the new HRegion instance
    */
   public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
-    return new HRegionInfo(tableDesc.getName(),
+    return new HRegionInfo(tableDesc.getTableName(),
                       snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
                       snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
   }
@@ -543,7 +553,7 @@ public class RestoreSnapshotHelper {
    */
   private void restoreWALs() throws IOException {
     final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
-                                Bytes.toBytes(snapshotDesc.getTable()), regionsMap);
+        snapshotTable, regionsMap);
     try {
       // Recover.Edits
       SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir,
@@ -578,7 +588,8 @@ public class RestoreSnapshotHelper {
       HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath());
       regions.add(hri);
     }
-    LOG.debug("found " + regions.size() + " regions for table=" + tableDesc.getNameAsString());
+    LOG.debug("found " + regions.size() + " regions for table=" +
+        tableDesc.getTableName().getNameAsString());
     return regions;
   }
 
@@ -591,7 +602,7 @@ public class RestoreSnapshotHelper {
    * @throws IOException
    */
   public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
-      final byte[] tableName) throws IOException {
+      final TableName tableName) throws IOException {
     HTableDescriptor htd = new HTableDescriptor(tableName);
     for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
       htd.addFamily(hcd);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java Thu Aug  8 04:19:49 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.snapshot
 import java.io.IOException;
 import java.util.Collections;
 
+import com.google.protobuf.ByteString;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -29,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -270,11 +272,11 @@ public class SnapshotDescriptionUtils {
   }
 
   /**
-   * Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
+   * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
    * @param fs filesystem where the snapshot was taken
    * @param snapshotDir directory where the snapshot was stored
    * @return the stored snapshot description
-   * @throws org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException if the
+   * @throws CorruptedSnapshotException if the
    * snapshot cannot be read
    */
   public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
@@ -284,7 +286,8 @@ public class SnapshotDescriptionUtils {
       FSDataInputStream in = null;
       try {
         in = fs.open(snapshotInfo);
-        return SnapshotDescription.parseFrom(in);
+        SnapshotDescription desc = SnapshotDescription.parseFrom(in);
+        return desc;
       } finally {
         if (in != null) in.close();
       }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java Thu Aug  8 04:19:49 2013
@@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.snapshot
 import java.io.IOException;
 import java.io.FileNotFoundException;
 import java.text.SimpleDateFormat;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.Date;
 
 import org.apache.commons.logging.Log;
@@ -33,6 +31,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -43,9 +43,6 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.HLogLink;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 
@@ -108,12 +105,14 @@ public final class SnapshotInfo extends 
     private long logSize = 0;
 
     private final SnapshotDescription snapshot;
+    private final TableName snapshotTable;
     private final Configuration conf;
     private final FileSystem fs;
 
     SnapshotStats(final Configuration conf, final FileSystem fs, final SnapshotDescription snapshot)
     {
       this.snapshot = snapshot;
+      this.snapshotTable = TableName.valueOf(snapshot.getTable());
       this.conf = conf;
       this.fs = fs;
     }
@@ -187,7 +186,7 @@ public final class SnapshotInfo extends 
      */
     FileInfo addStoreFile(final String region, final String family, final String hfile)
           throws IOException {
-      String table = this.snapshot.getTable();
+      TableName table = snapshotTable;
       Path path = new Path(family, HFileLink.createHFileLinkName(table, region, hfile));
       HFileLink link = new HFileLink(conf, path);
       boolean inArchive = false;
@@ -330,7 +329,7 @@ public final class SnapshotInfo extends 
     System.out.println("----------------------------------------");
     System.out.println("   Name: " + snapshotDesc.getName());
     System.out.println("   Type: " + snapshotDesc.getType());
-    System.out.println("  Table: " + snapshotDesc.getTable());
+    System.out.println("  Table: " + snapshotTableDesc.getTableName().getNameAsString());
     System.out.println(" Format: " + snapshotDesc.getVersion());
     System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime())));
     System.out.println();
@@ -357,7 +356,7 @@ public final class SnapshotInfo extends 
     }
 
     // Collect information about hfiles and logs in the snapshot
-    final String table = this.snapshotDesc.getTable();
+    final String table = snapshotTableDesc.getTableName().getNameAsString();
     final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, this.snapshotDesc);
     SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
       new SnapshotReferenceUtil.FileVisitor() {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java Thu Aug  8 04:19:49 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.io.HLogLink;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * If the snapshot has references to one or more log files,
@@ -95,8 +97,8 @@ class SnapshotLogSplitter implements Clo
 
   private final Map<byte[], byte[]> regionsMap;
   private final Configuration conf;
-  private final byte[] snapshotTableName;
-  private final byte[] tableName;
+  private final TableName snapshotTableName;
+  private final TableName tableName;
   private final Path tableDir;
   private final FileSystem fs;
 
@@ -105,11 +107,11 @@ class SnapshotLogSplitter implements Clo
    * @params regionsMap maps original region names to the new ones.
    */
   public SnapshotLogSplitter(final Configuration conf, final FileSystem fs,
-      final Path tableDir, final byte[] snapshotTableName,
+      final Path tableDir, final TableName snapshotTableName,
       final Map<byte[], byte[]> regionsMap) {
     this.regionsMap = regionsMap;
     this.snapshotTableName = snapshotTableName;
-    this.tableName = Bytes.toBytes(tableDir.getName());
+    this.tableName = FSUtils.getTableName(tableDir);
     this.tableDir = tableDir;
     this.conf = conf;
     this.fs = fs;
@@ -123,15 +125,15 @@ class SnapshotLogSplitter implements Clo
 
   public void splitLog(final String serverName, final String logfile) throws IOException {
     LOG.debug("Restore log=" + logfile + " server=" + serverName +
-              " for snapshotTable=" + Bytes.toString(snapshotTableName) +
-              " to table=" + Bytes.toString(tableName));
+              " for snapshotTable=" + snapshotTableName +
+              " to table=" + tableName);
     splitLog(new HLogLink(conf, serverName, logfile).getAvailablePath(fs));
   }
 
   public void splitRecoveredEdit(final Path editPath) throws IOException {
     LOG.debug("Restore recover.edits=" + editPath +
-              " for snapshotTable=" + Bytes.toString(snapshotTableName) +
-              " to table=" + Bytes.toString(tableName));
+              " for snapshotTable=" + snapshotTableName +
+              " to table=" + tableName);
     splitLog(editPath);
   }
 
@@ -154,7 +156,7 @@ class SnapshotLogSplitter implements Clo
         HLogKey key = entry.getKey();
 
         // We're interested only in the snapshot table that we're restoring
-        if (!Bytes.equals(key.getTablename(), snapshotTableName)) continue;
+        if (!key.getTablename().equals(snapshotTableName)) continue;
 
         // Writer for region.
         if (!Bytes.equals(regionName, key.getEncodedRegionName())) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java Thu Aug  8 04:19:49 2013
@@ -24,7 +24,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 
@@ -60,9 +62,8 @@ public class TableInfoCopyTask extends S
     LOG.debug("Attempting to copy table info for snapshot:"
         + ClientSnapshotDescriptionUtils.toString(this.snapshot));
     // get the HTable descriptor
-
     HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
-      this.snapshot.getTable());
+        TableName.valueOf(this.snapshot.getTable()));
     this.rethrowException();
     // write a copy of descriptor to the snapshot directory
     Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java Thu Aug  8 04:19:49 2013
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -593,7 +594,7 @@ public class ThriftServerRunner implemen
         HTableDescriptor[] tables = this.getHBaseAdmin().listTables();
         ArrayList<ByteBuffer> list = new ArrayList<ByteBuffer>(tables.length);
         for (int i = 0; i < tables.length; i++) {
-          list.add(ByteBuffer.wrap(tables[i].getName()));
+          list.add(ByteBuffer.wrap(tables[i].getTableName().getName()));
         }
         return list;
       } catch (IOException e) {
@@ -931,7 +932,7 @@ public class ThriftServerRunner implemen
         if (getHBaseAdmin().tableExists(tableName)) {
           throw new AlreadyExists("table name already in use");
         }
-        HTableDescriptor desc = new HTableDescriptor(tableName);
+        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
         for (ColumnDescriptor col : columnFamilies) {
           HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col);
           desc.addFamily(colDesc);
@@ -1378,13 +1379,13 @@ public class ThriftServerRunner implemen
     @Override
     public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
       try {
-        HTable table = getTable(HConstants.META_TABLE_NAME);
+        HTable table = getTable(TableName.META_TABLE_NAME.getName());
         byte[] row = getBytes(searchRow);
         Result startRowResult = table.getRowOrBefore(
           row, HConstants.CATALOG_FAMILY);
 
         if (startRowResult == null) {
-          throw new IOException("Cannot find row in .META., row="
+          throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
                                 + Bytes.toStringBinary(row));
         }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java Thu Aug  8 04:19:49 2013
@@ -23,6 +23,7 @@ import org.apache.commons.lang.time.Stop
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -163,7 +164,7 @@ public final class Canary implements Too
 
         if (tables_index >= 0) {
           for (int i = tables_index; i < args.length; i++) {
-            sniff(admin, sink, args[i]);
+            sniff(admin, sink, TableName.valueOf(args[i]));
           }
         } else {
           sniff();
@@ -202,7 +203,7 @@ public final class Canary implements Too
    * @param tableName
    * @throws Exception
    */
-  public static void sniff(final HBaseAdmin admin, String tableName)
+  public static void sniff(final HBaseAdmin admin, TableName tableName)
   throws Exception {
     sniff(admin, new StdOutSink(), tableName);
   }
@@ -214,10 +215,10 @@ public final class Canary implements Too
    * @param tableName
    * @throws Exception
    */
-  private static void sniff(final HBaseAdmin admin, final Sink sink, String tableName)
+  private static void sniff(final HBaseAdmin admin, final Sink sink, TableName tableName)
   throws Exception {
     if (admin.isTableAvailable(tableName)) {
-      sniff(admin, sink, admin.getTableDescriptor(tableName.getBytes()));
+      sniff(admin, sink, admin.getTableDescriptor(tableName));
     } else {
       LOG.warn(String.format("Table %s is not available", tableName));
     }
@@ -232,12 +233,12 @@ public final class Canary implements Too
     HTable table = null;
 
     try {
-      table = new HTable(admin.getConfiguration(), tableDesc.getName());
+      table = new HTable(admin.getConfiguration(), tableDesc.getTableName());
     } catch (TableNotFoundException e) {
       return;
     }
 
-    for (HRegionInfo region : admin.getTableRegions(tableDesc.getName())) {
+    for (HRegionInfo region : admin.getTableRegions(tableDesc.getTableName())) {
       try {
         sniffRegion(admin, sink, region, table);
       } catch (Exception e) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java Thu Aug  8 04:19:49 2013
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 
 /**
@@ -53,8 +54,8 @@ public class FSTableDescriptorMigrationT
    * migrated.
    */
   private static boolean needsMigration(FileSystem fs, Path rootDir) throws IOException {
-    Path metaTableDir = FSTableDescriptors.getTableDirectory(rootDir,
-      Bytes.toString(HConstants.META_TABLE_NAME));
+    Path metaTableDir = FSUtils.getTableDir(rootDir,
+      TableName.META_TABLE_NAME);
     FileStatus metaTableInfoStatus =
       FSTableDescriptors.getTableInfoPath(fs, metaTableDir);
     return metaTableInfoStatus == null;
@@ -86,14 +87,13 @@ public class FSTableDescriptorMigrationT
     }
     
     LOG.info("Migrating system tables");
-    migrateTableIfExists(fs, rootDir, HConstants.ROOT_TABLE_NAME);
     // migrate meta last because that's what we check to see if migration is complete
-    migrateTableIfExists(fs, rootDir, HConstants.META_TABLE_NAME);
+    migrateTableIfExists(fs, rootDir, TableName.META_TABLE_NAME);
   }
 
-  private static void migrateTableIfExists(FileSystem fs, Path rootDir, byte[] tableName)
+  private static void migrateTableIfExists(FileSystem fs, Path rootDir, TableName tableName)
   throws IOException {
-    Path tableDir = FSTableDescriptors.getTableDirectory(rootDir, Bytes.toString(tableName));
+    Path tableDir = FSUtils.getTableDir(rootDir, tableName);
     if (fs.exists(tableDir)) {
       migrateTable(fs, tableDir);
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java Thu Aug  8 04:19:49 2013
@@ -38,11 +38,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableInfoMissingException;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -84,8 +85,8 @@ public class FSTableDescriptors implemen
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<String, TableDescriptorAndModtime> cache =
-    new ConcurrentHashMap<String, TableDescriptorAndModtime>();
+  private final Map<TableName, TableDescriptorAndModtime> cache =
+    new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
 
   /**
    * Data structure to hold modification time and table descriptor.
@@ -140,32 +141,20 @@ public class FSTableDescriptors implemen
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public HTableDescriptor get(final byte [] tablename)
-  throws IOException {
-    return get(Bytes.toString(tablename));
-  }
-
-  /**
-   * Get the current table descriptor for the given table, or null if none exists.
-   * 
-   * Uses a local cache of the descriptor but still checks the filesystem on each call
-   * to see if a newer file has been created since the cached one was read.
-   */
-  @Override
-  public HTableDescriptor get(final String tablename)
+  public HTableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
-    if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
+    if (HTableDescriptor.ROOT_TABLEDESC.getTableName().equals(tablename)) {
       cachehits++;
       return HTableDescriptor.ROOT_TABLEDESC;
     }
-    if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
+    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) {
       cachehits++;
       return HTableDescriptor.META_TABLEDESC;
     }
     // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
        throw new IOException("No descriptor found for non table = " + tablename);
     }
 
@@ -212,14 +201,36 @@ public class FSTableDescriptors implemen
     for (Path d: tableDirs) {
       HTableDescriptor htd = null;
       try {
+        htd = get(FSUtils.getTableName(d));
+      } catch (FileNotFoundException fnfe) {
+        // inability of retrieving one HTD shouldn't stop getting the remaining
+        LOG.warn("Trouble retrieving htd", fnfe);
+      }
+      if (htd == null) continue;
+      htds.put(htd.getTableName().getNameAsString(), htd);
+    }
+    return htds;
+  }
 
-        htd = get(d.getName());
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+   */
+  @Override
+  public Map<String, HTableDescriptor> getByNamespace(String name)
+  throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    List<Path> tableDirs =
+        FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
+    for (Path d: tableDirs) {
+      HTableDescriptor htd = null;
+      try {
+        htd = get(FSUtils.getTableName(d));
       } catch (FileNotFoundException fnfe) {
         // inability of retrieving one HTD shouldn't stop getting the remaining
         LOG.warn("Trouble retrieving htd", fnfe);
       }
       if (htd == null) continue;
-      htds.put(d.getName(), htd);
+      htds.put(FSUtils.getTableName(d).getNameAsString(), htd);
     }
     return htds;
   }
@@ -233,19 +244,16 @@ public class FSTableDescriptors implemen
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
+    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
       throw new NotImplementedException();
     }
-    if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
-      throw new NotImplementedException();
-    }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
       throw new NotImplementedException(
         "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
     }
     updateTableDescriptor(htd);
-    long modtime = getTableInfoModtime(htd.getNameAsString());
-    this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd));
+    long modtime = getTableInfoModtime(htd.getTableName());
+    this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd));
   }
 
   /**
@@ -254,12 +262,12 @@ public class FSTableDescriptors implemen
    * from the FileSystem.
    */
   @Override
-  public HTableDescriptor remove(final String tablename)
+  public HTableDescriptor remove(final TableName tablename)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
     }
-    Path tabledir = getTableDirectory(tablename);
+    Path tabledir = getTableDir(tablename);
     if (this.fs.exists(tabledir)) {
       if (!this.fs.delete(tabledir, true)) {
         throw new IOException("Failed delete of " + tabledir.toString());
@@ -276,7 +284,7 @@ public class FSTableDescriptors implemen
    * @return true if exists
    * @throws IOException
    */
-  public boolean isTableInfoExists(String tableName) throws IOException {
+  public boolean isTableInfoExists(TableName tableName) throws IOException {
     return getTableInfoPath(tableName) != null;
   }
   
@@ -284,8 +292,8 @@ public class FSTableDescriptors implemen
    * Find the most current table info file for the given table in the hbase root directory.
    * @return The file status of the current table info file or null if it does not exist
    */
-  private FileStatus getTableInfoPath(final String tableName) throws IOException {
-    Path tableDir = getTableDirectory(tableName);
+  private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
+    Path tableDir = getTableDir(tableName);
     return getTableInfoPath(tableDir);
   }
 
@@ -384,17 +392,10 @@ public class FSTableDescriptors implemen
   /**
    * Return the table directory in HDFS
    */
-  @VisibleForTesting Path getTableDirectory(final String tableName) {
-    return getTableDirectory(rootdir, tableName);
+  @VisibleForTesting Path getTableDir(final TableName tableName) {
+    return FSUtils.getTableDir(rootdir, tableName);
   }
-  
-  /**
-   * Return the table directory in HDFS
-   */
-  static Path getTableDirectory(Path rootDir, String tableName) {
-    return FSUtils.getTablePath(rootDir, tableName);
-  }
-  
+
   private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
     @Override
     public boolean accept(Path p) {
@@ -460,7 +461,7 @@ public class FSTableDescriptors implemen
    * or <code>0</code> if no tableinfo file found.
    * @throws IOException
    */
-  private long getTableInfoModtime(final String tableName) throws IOException {
+  private long getTableInfoModtime(final TableName tableName) throws IOException {
     FileStatus status = getTableInfoPath(tableName);
     return status == null ? 0 : status.getModificationTime();
   }
@@ -471,8 +472,8 @@ public class FSTableDescriptors implemen
    * Returns null if it's not found.
    */
   public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-      Path hbaseRootDir, String tableName) throws IOException {
-    Path tableDir = getTableDirectory(hbaseRootDir, tableName);
+      Path hbaseRootDir, TableName tableName) throws IOException {
+    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
 
@@ -490,14 +491,14 @@ public class FSTableDescriptors implemen
     return readTableDescriptor(fs, status, false);
   }
   
-  private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName)
+  private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName)
   throws IOException {
     // ignore both -ROOT- and .META. tables
-    if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
-        || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
+    if (tableName.equals(TableName.ROOT_TABLE_NAME)
+        || tableName.equals(TableName.META_TABLE_NAME)) {
       return null;
     }
-    return getTableDescriptorAndModtime(getTableDirectory(tableName));
+    return getTableDescriptorAndModtime(getTableDir(tableName));
   }
 
   private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir)
@@ -545,7 +546,7 @@ public class FSTableDescriptors implemen
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    Path tableDir = getTableDirectory(htd.getNameAsString());
+    Path tableDir = getTableDir(htd.getTableName());
     Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
@@ -557,12 +558,12 @@ public class FSTableDescriptors implemen
    * Used in unit tests only.
    * @throws NotImplementedException if in read only mode
    */
-  public void deleteTableDescriptorIfExists(String tableName) throws IOException {
+  public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
     }
    
-    Path tableDir = getTableDirectory(tableName);
+    Path tableDir = getTableDir(tableName);
     Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
     deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
   }
@@ -683,7 +684,7 @@ public class FSTableDescriptors implemen
    */
   public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDirectory(htd.getNameAsString());
+    Path tableDir = getTableDir(htd.getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
   



Mime
View raw message