accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [27/48] Merge branch '1.5.1-SNAPSHOT' into 1.6.0-SNAPSHOT
Date Tue, 04 Feb 2014 17:54:56 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
index 4b177b5,0000000..36bbb53
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
@@@ -1,316 -1,0 +1,316 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.tableOps;
 +
 +import java.io.BufferedOutputStream;
 +import java.io.BufferedWriter;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.io.OutputStreamWriter;
 +import java.io.Serializable;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.zip.ZipEntry;
 +import java.util.zip.ZipOutputStream;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.DefaultConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.fate.Repo;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.conf.ServerConfiguration;
 +import org.apache.accumulo.server.conf.TableConfiguration;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.hadoop.fs.FSDataOutputStream;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +
 +class ExportInfo implements Serializable {
 +  
 +  private static final long serialVersionUID = 1L;
 +  
 +  public String tableName;
 +  public String tableID;
 +  public String exportDir;
 +  public String namespaceID;
 +}
 +
 +class WriteExportFiles extends MasterRepo {
 +  
 +  private static final long serialVersionUID = 1L;
 +  private final ExportInfo tableInfo;
 +  
 +  WriteExportFiles(ExportInfo tableInfo) {
 +    this.tableInfo = tableInfo;
 +  }
 +  
 +  private void checkOffline(Connector conn) throws Exception {
 +    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
 +      Tables.clearCache(conn.getInstance());
 +      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
 +        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
 +            "Table is not offline");
 +      }
 +    }
 +  }
 +  
 +  @Override
 +  public long isReady(long tid, Master master) throws Exception {
 +    
 +    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT)
 +        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
 +    if (reserved > 0)
 +      return reserved;
 +    
 +    Connector conn = master.getConnector();
 +    
 +    checkOffline(conn);
 +    
 +    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
 +    
 +    // scan for locations
 +    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
 +    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
 +    
 +    if (metaScanner.iterator().hasNext()) {
 +      return 500;
 +    }
 +    
 +    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
 +    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
 +    metaScanner.clearColumns();
 +    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
 +    
 +    if (metaScanner.iterator().hasNext()) {
 +      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
 +          "Write ahead logs found for table");
 +    }
 +    
 +    return 0;
 +  }
 +  
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +    Connector conn = master.getConnector();
 +    
 +    try {
 +      exportTable(master.getFileSystem(), conn, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
 +    } catch (IOException ioe) {
 +      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
 +          "Failed to create export files " + ioe.getMessage());
 +    }
 +    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
 +    Utils.unreserveTable(tableInfo.tableID, tid, false);
 +    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
 +    return null;
 +  }
 +  
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
 +    Utils.unreserveTable(tableInfo.tableID, tid, false);
 +  }
 +  
 +  public static void exportTable(VolumeManager fs, Connector conn, String tableName, String tableID, String exportDir) throws Exception {
 +    
 +    fs.mkdirs(new Path(exportDir));
 +    Path exportMetaFilePath = fs.getFileSystemByPath(new Path(exportDir)).makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
 +    
 +    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
 +    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
 +    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
 +    DataOutputStream dataOut = new DataOutputStream(bufOut);
 +    
 +    try {
 +      
 +      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
-       OutputStreamWriter osw = new OutputStreamWriter(dataOut);
++      OutputStreamWriter osw = new OutputStreamWriter(dataOut, Constants.UTF8);
 +      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
 +      osw.append("srcInstanceName:" + conn.getInstance().getInstanceName() + "\n");
 +      osw.append("srcInstanceID:" + conn.getInstance().getInstanceID() + "\n");
 +      osw.append("srcZookeepers:" + conn.getInstance().getZooKeepers() + "\n");
 +      osw.append("srcTableName:" + tableName + "\n");
 +      osw.append("srcTableID:" + tableID + "\n");
 +      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
 +      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
 +      
 +      osw.flush();
 +      dataOut.flush();
 +      
 +      exportConfig(conn, tableID, zipOut, dataOut);
 +      dataOut.flush();
 +      
 +      Map<String,String> uniqueFiles = exportMetadata(fs, conn, tableID, zipOut, dataOut);
 +      
 +      dataOut.close();
 +      dataOut = null;
 +      
 +      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
 +      
 +    } finally {
 +      if (dataOut != null)
 +        dataOut.close();
 +    }
 +  }
 +  
 +  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
-     BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false)));
++    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false), Constants.UTF8));
 +    
 +    try {
 +      for (String file : uniqueFiles.values()) {
 +        distcpOut.append(file);
 +        distcpOut.newLine();
 +      }
 +      
 +      distcpOut.append(exportMetaFilePath.toString());
 +      distcpOut.newLine();
 +      
 +      distcpOut.close();
 +      distcpOut = null;
 +      
 +    } finally {
 +      if (distcpOut != null)
 +        distcpOut.close();
 +    }
 +  }
 +  
 +  private static Map<String,String> exportMetadata(VolumeManager fs, Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut)
 +      throws IOException, TableNotFoundException {
 +    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
 +    
 +    Map<String,String> uniqueFiles = new HashMap<String,String>();
 +    
 +    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
 +    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
 +    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
 +    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
 +    
 +    for (Entry<Key,Value> entry : metaScanner) {
 +      entry.getKey().write(dataOut);
 +      entry.getValue().write(dataOut);
 +      
 +      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
 +        String path = fs.getFullPath(entry.getKey()).toString();
 +        String tokens[] = path.split("/");
 +        if (tokens.length < 1) {
 +          throw new RuntimeException("Illegal path " + path);
 +        }
 +        
 +        String filename = tokens[tokens.length - 1];
 +        
 +        String existingPath = uniqueFiles.get(filename);
 +        if (existingPath == null) {
 +          uniqueFiles.put(filename, path);
 +        } else if (!existingPath.equals(path)) {
 +          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
 +          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
 +        }
 +        
 +      }
 +    }
 +    return uniqueFiles;
 +  }
 +  
 +  private static void exportConfig(Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
 +      AccumuloSecurityException, TableNotFoundException, IOException {
 +    
 +    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
 +    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
 +    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
 +    
 +    TableConfiguration tableConfig = ServerConfiguration.getTableConfiguration(conn.getInstance(), tableID);
 +    
-     OutputStreamWriter osw = new OutputStreamWriter(dataOut);
++    OutputStreamWriter osw = new OutputStreamWriter(dataOut, Constants.UTF8);
 +    
 +    // only put props that are different than defaults and higher level configurations
 +    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
 +    for (Entry<String,String> prop : tableConfig) {
 +      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
 +        Property key = Property.getPropertyByKey(prop.getKey());
 +        
 +        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
 +          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
 +            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
 +          }
 +        }
 +      }
 +    }
 +    
 +    osw.flush();
 +  }
 +}
 +
 +public class ExportTable extends MasterRepo {
 +  private static final long serialVersionUID = 1L;
 +  
 +  private final ExportInfo tableInfo;
 +  
 +  public ExportTable(String tableName, String tableId, String exportDir) {
 +    tableInfo = new ExportInfo();
 +    tableInfo.tableName = tableName;
 +    tableInfo.exportDir = exportDir;
 +    tableInfo.tableID = tableId;
 +    Instance inst = HdfsZooInstance.getInstance();
 +    tableInfo.namespaceID = Tables.getNamespaceId(inst, tableId);
 +  }
 +  
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
 +  }
 +  
 +  @Override
 +  public Repo<Master> call(long tid, Master env) throws Exception {
 +    return new WriteExportFiles(tableInfo);
 +  }
 +  
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
 +  }
 +  
 +  public static final int VERSION = 1;
 +  
 +  public static final String DATA_VERSION_PROP = "srcDataVersion";
 +  public static final String EXPORT_VERSION_PROP = "exportVersion";
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index ddf1598,0000000..7e84c55
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@@ -1,614 -1,0 +1,614 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.tableOps;
 +
 +import java.io.BufferedInputStream;
 +import java.io.BufferedReader;
 +import java.io.BufferedWriter;
 +import java.io.DataInputStream;
 +import java.io.IOException;
 +import java.io.InputStreamReader;
 +import java.io.OutputStreamWriter;
 +import java.io.Serializable;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.zip.ZipEntry;
 +import java.util.zip.ZipInputStream;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.admin.TableOperationsImpl;
 +import org.apache.accumulo.core.client.impl.Namespaces;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.FileOperations;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.core.util.FastFormat;
 +import org.apache.accumulo.fate.Repo;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.security.AuditedSecurityOperation;
 +import org.apache.accumulo.server.security.SecurityOperation;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.tables.TableManager;
 +import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 +import org.apache.accumulo.server.util.MetadataTableUtil;
 +import org.apache.accumulo.server.util.TablePropUtil;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * 
 + */
 +class ImportedTableInfo implements Serializable {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  public String exportDir;
 +  public String user;
 +  public String tableName;
 +  public String tableId;
 +  public String importDir;
 +  public String namespaceId;
 +}
 +
 +class FinishImportTable extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  public FinishImportTable(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return 0;
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master env) throws Exception {
 +
 +    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
 +
 +    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
 +
 +    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
 +    Utils.unreserveTable(tableInfo.tableId, tid, true);
 +
 +    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
 +
 +    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
 +
 +    Logger.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
 +
 +    return null;
 +  }
 +
 +  @Override
 +  public String getReturn() {
 +    return tableInfo.tableId;
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {}
 +
 +}
 +
 +class MoveExportedFiles extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  MoveExportedFiles(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +    try {
 +      VolumeManager fs = master.getFileSystem();
 +
 +      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
 +
 +      for (String oldFileName : fileNameMappings.keySet()) {
 +        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
 +          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +              "File referenced by exported table does not exists " + oldFileName);
 +        }
 +      }
 +
 +      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
 +
 +      for (FileStatus fileStatus : files) {
 +        String newName = fileNameMappings.get(fileStatus.getPath().getName());
 +
 +        if (newName != null)
 +          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
 +      }
 +
 +      return new FinishImportTable(tableInfo);
 +    } catch (IOException ioe) {
 +      log.warn(ioe.getMessage(), ioe);
 +      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +          "Error renaming files " + ioe.getMessage());
 +    }
 +  }
 +}
 +
 +class PopulateMetadataTable extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  PopulateMetadataTable(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
-     BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt"))));
++    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt")), Constants.UTF8));
 +
 +    try {
 +      Map<String,String> map = new HashMap<String,String>();
 +
 +      String line = null;
 +      while ((line = in.readLine()) != null) {
 +        String sa[] = line.split(":", 2);
 +        map.put(sa[0], sa[1]);
 +      }
 +
 +      return map;
 +    } finally {
 +      in.close();
 +    }
 +
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +
 +    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
 +
 +    BatchWriter mbw = null;
 +    ZipInputStream zis = null;
 +
 +    try {
 +      VolumeManager fs = master.getFileSystem();
 +
 +      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
 +
 +      zis = new ZipInputStream(fs.open(path));
 +
 +      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
 +
 +      String bulkDir = new Path(tableInfo.importDir).getName();
 +
 +      ZipEntry zipEntry;
 +      while ((zipEntry = zis.getNextEntry()) != null) {
 +        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
 +          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
 +
 +          Key key = new Key();
 +          Value val = new Value();
 +
 +          Mutation m = null;
 +          Text currentRow = null;
 +          int dirCount = 0;
 +
 +          while (true) {
 +            key.readFields(in);
 +            val.readFields(in);
 +
 +            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
 +            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
 +
 +            Text cq;
 +
 +            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
 +              String oldName = new Path(key.getColumnQualifier().toString()).getName();
 +              String newName = fileNameMappings.get(oldName);
 +
 +              if (newName == null) {
 +                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +                    "File " + oldName + " does not exist in import dir");
 +              }
 +
 +              cq = new Text("/" + bulkDir + "/" + newName);
 +            } else {
 +              cq = key.getColumnQualifier();
 +            }
 +
 +            if (m == null) {
 +              m = new Mutation(metadataRow);
-               TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
++              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
 +              currentRow = metadataRow;
 +            }
 +
 +            if (!currentRow.equals(metadataRow)) {
 +              mbw.addMutation(m);
 +              m = new Mutation(metadataRow);
-               TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
++              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
 +            }
 +
 +            m.put(key.getColumnFamily(), cq, val);
 +
 +            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
 +              mbw.addMutation(m);
 +              break; // its the last column in the last row
 +            }
 +          }
 +
 +          break;
 +        }
 +      }
 +
 +      return new MoveExportedFiles(tableInfo);
 +    } catch (IOException ioe) {
 +      log.warn(ioe.getMessage(), ioe);
 +      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +          "Error reading " + path + " " + ioe.getMessage());
 +    } finally {
 +      if (zis != null) {
 +        try {
 +          zis.close();
 +        } catch (IOException ioe) {
 +          log.warn("Failed to close zip file ", ioe);
 +        }
 +      }
 +
 +      if (mbw != null) {
 +        mbw.close();
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master environment) throws Exception {
 +    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
 +  }
 +}
 +
 +class MapImportFileNames extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  MapImportFileNames(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master environment) throws Exception {
 +
 +    Path path = new Path(tableInfo.importDir, "mappings.txt");
 +
 +    BufferedWriter mappingsWriter = null;
 +
 +    try {
 +      VolumeManager fs = environment.getFileSystem();
 +
 +      fs.mkdirs(new Path(tableInfo.importDir));
 +
 +      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
 +
 +      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
 +
-       mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path)));
++      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), Constants.UTF8));
 +
 +      for (FileStatus fileStatus : files) {
 +        String fileName = fileStatus.getPath().getName();
 +        log.info("filename " + fileStatus.getPath().toString());
 +        String sa[] = fileName.split("\\.");
 +        String extension = "";
 +        if (sa.length > 1) {
 +          extension = sa[sa.length - 1];
 +
 +          if (!FileOperations.getValidExtensions().contains(extension)) {
 +            continue;
 +          }
 +        } else {
 +          // assume it is a map file
 +          extension = Constants.MAPFILE_EXTENSION;
 +        }
 +
 +        String newName = "I" + namer.getNextName() + "." + extension;
 +
 +        mappingsWriter.append(fileName);
 +        mappingsWriter.append(':');
 +        mappingsWriter.append(newName);
 +        mappingsWriter.newLine();
 +      }
 +
 +      mappingsWriter.close();
 +      mappingsWriter = null;
 +
 +      return new PopulateMetadataTable(tableInfo);
 +    } catch (IOException ioe) {
 +      log.warn(ioe.getMessage(), ioe);
 +      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +          "Error writing mapping file " + path + " " + ioe.getMessage());
 +    } finally {
 +      if (mappingsWriter != null)
 +        try {
 +          mappingsWriter.close();
 +        } catch (IOException ioe) {
 +          log.warn("Failed to close " + path, ioe);
 +        }
 +    }
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
 +  }
 +}
 +
 +class CreateImportDir extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  CreateImportDir(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +
 +    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
 +
 +    Path base = master.getFileSystem().matchingFileSystem(new Path(tableInfo.exportDir), ServerConstants.getTablesDirs());
 +    Path directory = new Path(base, tableInfo.tableId);
 +
 +    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
 +
 +    tableInfo.importDir = newBulkDir.toString();
 +
 +    return new MapImportFileNames(tableInfo);
 +  }
 +}
 +
 +class ImportPopulateZookeeper extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  ImportPopulateZookeeper(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
 +  }
 +
 +  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
 +
 +    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
 +
 +    try {
 +      FileSystem ns = fs.getFileSystemByPath(path);
 +      return TableOperationsImpl.getExportedProps(ns, path);
 +    } catch (IOException ioe) {
 +      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +          "Error reading table props from " + path + " " + ioe.getMessage());
 +    }
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master env) throws Exception {
 +    // reserve the table name in zookeeper or fail
 +
 +    Utils.tableNameLock.lock();
 +    try {
 +      // write tableName & tableId to zookeeper
 +      Instance instance = HdfsZooInstance.getInstance();
 +
 +      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
 +
 +      String namespace = Tables.qualify(tableInfo.tableName).getFirst();
 +      String namespaceId = Namespaces.getNamespaceId(instance, namespace);
 +      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
 +
 +      Tables.clearCache(instance);
 +    } finally {
 +      Utils.tableNameLock.unlock();
 +    }
 +
 +    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
 +      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
 +        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +            "Invalid table property " + entry.getKey());
 +      }
 +
 +    return new CreateImportDir(tableInfo);
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    Instance instance = HdfsZooInstance.getInstance();
 +    TableManager.getInstance().removeTable(tableInfo.tableId);
 +    Utils.unreserveTable(tableInfo.tableId, tid, true);
 +    Tables.clearCache(instance);
 +  }
 +}
 +
 +class ImportSetupPermissions extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  public ImportSetupPermissions(ImportedTableInfo ti) {
 +    this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return 0;
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master env) throws Exception {
 +    // give all table permissions to the creator
 +    SecurityOperation security = AuditedSecurityOperation.getInstance();
 +    for (TablePermission permission : TablePermission.values()) {
 +      try {
 +        security.grantTablePermission(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
 +      } catch (ThriftSecurityException e) {
 +        Logger.getLogger(ImportSetupPermissions.class).error(e.getMessage(), e);
 +        throw e;
 +      }
 +    }
 +
 +    // setup permissions in zookeeper before table info in zookeeper
 +    // this way concurrent users will not get a spurious permission denied
 +    // error
 +    return new ImportPopulateZookeeper(tableInfo);
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.tableId, tableInfo.namespaceId);
 +  }
 +}
 +
 +public class ImportTable extends MasterRepo {
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  public ImportTable(String user, String tableName, String exportDir, String namespaceId) {
 +    tableInfo = new ImportedTableInfo();
 +    tableInfo.tableName = tableName;
 +    tableInfo.user = user;
 +    tableInfo.exportDir = exportDir;
 +    tableInfo.namespaceId = namespaceId;
 +  }
 +
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid)
 +        + Utils.reserveNamespace(tableInfo.namespaceId, tid, false, true, TableOperation.IMPORT);
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master env) throws Exception {
 +    checkVersions(env);
 +
 +    // first step is to reserve a table id.. if the machine fails during this step
 +    // it is ok to retry... the only side effect is that a table id may not be used
 +    // or skipped
 +
 +    // assuming only the master process is creating tables
 +
 +    Utils.idLock.lock();
 +    try {
 +      Instance instance = HdfsZooInstance.getInstance();
 +      tableInfo.tableId = Utils.getNextTableId(tableInfo.tableName, instance);
 +      return new ImportSetupPermissions(tableInfo);
 +    } finally {
 +      Utils.idLock.unlock();
 +    }
 +  }
 +
 +  public void checkVersions(Master env) throws ThriftTableOperationException {
 +    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
 +
 +    ZipInputStream zis = null;
 +
 +    try {
 +      zis = new ZipInputStream(env.getFileSystem().open(path));
 +
 +      Integer exportVersion = null;
 +      Integer dataVersion = null;
 +
 +      ZipEntry zipEntry;
 +      while ((zipEntry = zis.getNextEntry()) != null) {
 +        if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
-           BufferedReader in = new BufferedReader(new InputStreamReader(zis));
++          BufferedReader in = new BufferedReader(new InputStreamReader(zis, Constants.UTF8));
 +          String line = null;
 +          while ((line = in.readLine()) != null) {
 +            String sa[] = line.split(":", 2);
 +            if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
 +              exportVersion = Integer.parseInt(sa[1]);
 +            } else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
 +              dataVersion = Integer.parseInt(sa[1]);
 +            }
 +          }
 +
 +          break;
 +        }
 +      }
 +
 +      zis.close();
 +      zis = null;
 +
 +      if (exportVersion == null || exportVersion > ExportTable.VERSION)
 +        throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +            "Incompatible export version " + exportVersion);
 +
 +      if (dataVersion == null || dataVersion > ServerConstants.DATA_VERSION)
 +        throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +            "Incompatible data version " + exportVersion);
 +
 +    } catch (IOException ioe) {
 +      log.warn(ioe.getMessage(), ioe);
 +      throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
 +          "Failed to read export metadata " + ioe.getMessage());
 +    } finally {
 +      if (zis != null)
 +        try {
 +          zis.close();
 +        } catch (IOException ioe) {
 +          log.warn(ioe.getMessage(), ioe);
 +        }
 +    }
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
 +    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
index a261cbb,0000000..8c5ed00
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
@@@ -1,112 -1,0 +1,112 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.tableOps;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.NamespaceNotFoundException;
 +import org.apache.accumulo.core.client.impl.Namespaces;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.Repo;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.log4j.Logger;
 +
 +public class RenameTable extends MasterRepo {
 +
 +  private static final long serialVersionUID = 1L;
 +  private String tableId;
 +  private String oldTableName;
 +  private String newTableName;
 +  private String namespaceId;
 +
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.RENAME) + Utils.reserveTable(tableId, tid, true, true, TableOperation.RENAME);
 +  }
 +
 +  public RenameTable(String tableId, String oldTableName, String newTableName) throws NamespaceNotFoundException {
 +    this.tableId = tableId;
 +    this.oldTableName = oldTableName;
 +    this.newTableName = newTableName;
 +    Instance inst = HdfsZooInstance.getInstance();
 +    this.namespaceId = Tables.getNamespaceId(inst, tableId);
 +  }
 +
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +
 +    Instance instance = master.getInstance();
 +    Pair<String,String> qualifiedOldTableName = Tables.qualify(oldTableName);
 +    Pair<String,String> qualifiedNewTableName = Tables.qualify(newTableName);
 +
 +    // ensure no attempt is made to rename across namespaces
 +    if (newTableName.contains(".") && !namespaceId.equals(Namespaces.getNamespaceId(instance, qualifiedNewTableName.getFirst())))
 +      throw new ThriftTableOperationException(tableId, oldTableName, TableOperation.RENAME, TableOperationExceptionType.INVALID_NAME,
 +          "Namespace in new table name does not match the old table name");
 +
 +    IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
 +
 +    Utils.tableNameLock.lock();
 +    try {
 +      Utils.checkTableDoesNotExist(instance, newTableName, tableId, TableOperation.RENAME);
 +
 +      final String newName = qualifiedNewTableName.getSecond();
 +      final String oldName = qualifiedOldTableName.getSecond();
 +
 +      final String tap = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAME;
 +
 +      zoo.mutate(tap, null, null, new Mutator() {
 +        @Override
 +        public byte[] mutate(byte[] current) throws Exception {
-           final String currentName = new String(current);
++          final String currentName = new String(current, Constants.UTF8);
 +          if (currentName.equals(newName))
 +            return null; // assume in this case the operation is running again, so we are done
 +          if (!currentName.equals(oldName)) {
 +            throw new ThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND,
 +                "Name changed while processing");
 +          }
-           return newName.getBytes();
++          return newName.getBytes(Constants.UTF8);
 +        }
 +      });
 +      Tables.clearCache(instance);
 +    } finally {
 +      Utils.tableNameLock.unlock();
 +      Utils.unreserveTable(tableId, tid, true);
 +      Utils.unreserveNamespace(this.namespaceId, tid, false);
 +    }
 +
 +    Logger.getLogger(RenameTable.class).debug("Renamed table " + tableId + " " + oldTableName + " " + newTableName);
 +
 +    return null;
 +  }
 +
 +  @Override
 +  public void undo(long tid, Master env) throws Exception {
 +    Utils.unreserveTable(tableId, tid, true);
 +    Utils.unreserveNamespace(namespaceId, tid, false);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index 564d939,0000000..577f5d5
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@@ -1,160 -1,0 +1,163 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.tableOps;
 +
 +import java.math.BigInteger;
 +import java.util.concurrent.locks.Lock;
 +import java.util.concurrent.locks.ReentrantLock;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.impl.Namespaces;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 +import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 +import org.apache.accumulo.fate.zookeeper.ZooReservation;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.zookeeper.ZooQueueLock;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.log4j.Logger;
 +import org.apache.zookeeper.KeeperException;
 +
 +public class Utils {
- 
++  private static final byte[] ZERO_BYTE = new byte[] {'0'};
++  
 +  static void checkTableDoesNotExist(Instance instance, String tableName, String tableId, TableOperation operation) throws ThriftTableOperationException {
 +
 +    String id = Tables.getNameToIdMap(instance).get(tableName);
 +
 +    if (id != null && !id.equals(tableId))
 +      throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.EXISTS, null);
 +  }
 +
 +  static String getNextTableId(String tableName, Instance instance) throws ThriftTableOperationException {
 +
 +    String tableId = null;
 +    try {
 +      IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
 +      final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
-       byte[] nid = zoo.mutate(ntp, "0".getBytes(), ZooUtil.PUBLIC, new Mutator() {
++      byte[] nid = zoo.mutate(ntp, ZERO_BYTE, ZooUtil.PUBLIC, new Mutator() {
 +        @Override
 +        public byte[] mutate(byte[] currentValue) throws Exception {
-           BigInteger nextId = new BigInteger(new String(currentValue), Character.MAX_RADIX);
++          BigInteger nextId = new BigInteger(new String(currentValue, Constants.UTF8), Character.MAX_RADIX);
 +          nextId = nextId.add(BigInteger.ONE);
-           return nextId.toString(Character.MAX_RADIX).getBytes();
++          return nextId.toString(Character.MAX_RADIX).getBytes(Constants.UTF8);
 +        }
 +      });
-       return new String(nid);
++      return new String(nid, Constants.UTF8);
 +    } catch (Exception e1) {
 +      Logger.getLogger(CreateTable.class).error("Failed to assign tableId to " + tableName, e1);
 +      throw new ThriftTableOperationException(tableId, tableName, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
 +    }
 +  }
 +
 +  static final Lock tableNameLock = new ReentrantLock();
 +  static final Lock idLock = new ReentrantLock();
 +  private static final Logger log = Logger.getLogger(Utils.class);
 +
 +  public static long reserveTable(String tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception {
 +    if (getLock(tableId, tid, writeLock).tryLock()) {
 +      if (tableMustExist) {
 +        Instance instance = HdfsZooInstance.getInstance();
 +        IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 +        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
 +          throw new ThriftTableOperationException(tableId, "", op, TableOperationExceptionType.NOTFOUND, "Table does not exist");
 +      }
 +      log.info("table " + tableId + " (" + Long.toHexString(tid) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
 +      return 0;
 +    } else
 +      return 100;
 +  }
 +
 +  public static void unreserveTable(String tableId, long tid, boolean writeLock) throws Exception {
 +    getLock(tableId, tid, writeLock).unlock();
 +    log.info("table " + tableId + " (" + Long.toHexString(tid) + ") unlocked for " + (writeLock ? "write" : "read"));
 +  }
 +
 +  public static void unreserveNamespace(String namespaceId, long id, boolean writeLock) throws Exception {
 +    getLock(namespaceId, id, writeLock).unlock();
 +    log.info("namespace " + namespaceId + " (" + Long.toHexString(id) + ") unlocked for " + (writeLock ? "write" : "read"));
 +  }
 +
 +  public static long reserveNamespace(String namespaceId, long id, boolean writeLock, boolean mustExist, TableOperation op) throws Exception {
 +    if (getLock(namespaceId, id, writeLock).tryLock()) {
 +      if (mustExist) {
 +        Instance instance = HdfsZooInstance.getInstance();
 +        IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 +        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId))
 +          throw new ThriftTableOperationException(namespaceId, "", op, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist");
 +      }
 +      log.info("namespace " + namespaceId + " (" + Long.toHexString(id) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
 +      return 0;
 +    } else
 +      return 100;
 +  }
 +
 +  public static long reserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
 +    Instance instance = HdfsZooInstance.getInstance();
 +
-     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
++    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
++        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
 +
 +    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 +
 +    if (ZooReservation.attempt(zk, resvPath, String.format("%016x", tid), "")) {
 +      return 0;
 +    } else
 +      return 50;
 +  }
 +
 +  public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
 +    Instance instance = HdfsZooInstance.getInstance();
-     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
++    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
++        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
 +    ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
 +  }
 +
 +  private static Lock getLock(String tableId, long tid, boolean writeLock) throws Exception {
-     byte[] lockData = String.format("%016x", tid).getBytes();
++    byte[] lockData = String.format("%016x", tid).getBytes(Constants.UTF8);
 +    ZooQueueLock qlock = new ZooQueueLock(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLE_LOCKS + "/" + tableId, false);
 +    Lock lock = DistributedReadWriteLock.recoverLock(qlock, lockData);
 +    if (lock == null) {
 +      DistributedReadWriteLock locker = new DistributedReadWriteLock(qlock, lockData);
 +      if (writeLock)
 +        lock = locker.writeLock();
 +      else
 +        lock = locker.readLock();
 +    }
 +    return lock;
 +  }
 +
 +  public static Lock getReadLock(String tableId, long tid) throws Exception {
 +    return Utils.getLock(tableId, tid, false);
 +  }
 +
 +  static void checkNamespaceDoesNotExist(Instance instance, String namespace, String namespaceId, TableOperation operation)
 +      throws ThriftTableOperationException {
 +
 +    String n = Namespaces.getNameToIdMap(instance).get(namespace);
 +
 +    if (n != null && !n.equals(namespaceId))
 +      throw new ThriftTableOperationException(null, namespace, operation, TableOperationExceptionType.NAMESPACE_EXISTS, null);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
index 7189637,0000000..20b7328
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@@ -1,92 -1,0 +1,92 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.tserverOps;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.Repo;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.master.EventCoordinator.Listener;
 +import org.apache.accumulo.master.tableOps.MasterRepo;
 +import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 +import org.apache.accumulo.server.master.state.TServerInstance;
 +import org.apache.accumulo.server.zookeeper.ZooLock;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.log4j.Logger;
 +import org.apache.thrift.transport.TTransportException;
 +
 +public class ShutdownTServer extends MasterRepo {
 +  
 +  private static final long serialVersionUID = 1L;
 +  private static final Logger log = Logger.getLogger(ShutdownTServer.class);
 +  private TServerInstance server;
 +  private boolean force;
 +  
 +  public ShutdownTServer(TServerInstance server, boolean force) {
 +    this.server = server;
 +    this.force = force;
 +  }
 +  
 +  @Override
 +  public long isReady(long tid, Master environment) throws Exception {
 +    return 0;
 +  }
 +  
 +  @Override
 +  public Repo<Master> call(long tid, Master master) throws Exception {
 +    // suppress assignment of tablets to the server
 +    if (force) {
 +      String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZTSERVERS + "/" + server.getLocation();
 +      ZooLock.deleteLock(path);
 +      path = ZooUtil.getRoot(master.getInstance()) + Constants.ZDEADTSERVERS + "/" + server.getLocation();
 +      IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-       zoo.putPersistentData(path, "forced down".getBytes(), NodeExistsPolicy.OVERWRITE);
++      zoo.putPersistentData(path, "forced down".getBytes(Constants.UTF8), NodeExistsPolicy.OVERWRITE);
 +      return null;
 +    }
 +    
 +    // TODO move this to isReady() and drop while loop? - ACCUMULO-1259
 +    Listener listener = master.getEventCoordinator().getListener();
 +    master.shutdownTServer(server);
 +    while (master.onlineTabletServers().contains(server)) {
 +      TServerConnection connection = master.getConnection(server);
 +      if (connection != null) {
 +        try {
 +          TabletServerStatus status = connection.getTableMap(false);
 +          if (status.tableMap != null && status.tableMap.isEmpty()) {
 +            log.info("tablet server hosts no tablets " + server);
 +            connection.halt(master.getMasterLock());
 +            log.info("tablet server asked to halt " + server);
 +            break;
 +          }
 +        } catch (TTransportException ex) {
 +          // expected
 +        } catch (Exception ex) {
 +          log.error("Error talking to tablet server " + server + ": " + ex);
 +        }
 +      }
 +      listener.waitForEvents(1000);
 +    }
 +    
 +    return null;
 +  }
 +  
 +  @Override
 +  public void undo(long tid, Master m) throws Exception {}
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
index f794112,0000000..8fcb689
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
@@@ -1,88 -1,0 +1,92 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.util;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.cli.Help;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.AdminUtil;
 +import org.apache.accumulo.fate.ZooStore;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +
 +import com.beust.jcommander.JCommander;
 +import com.beust.jcommander.Parameter;
 +import com.beust.jcommander.Parameters;
 +
 +/**
 + * A utility to administer FATE operations
 + */
 +public class FateAdmin {
 +  
 +  static class TxOpts {
 +    @Parameter(description = "<txid>", required = true)
 +    List<String> args = new ArrayList<String>();
 +  }
 +  
 +  @Parameters(commandDescription = "Stop an existing FATE by transaction id")
 +  static class FailOpts extends TxOpts {}
 +  
 +  @Parameters(commandDescription = "Delete an existing FATE by transaction id")
 +  static class DeleteOpts extends TxOpts {}
 +  
 +  @Parameters(commandDescription = "List the existing FATE transactions")
 +  static class PrintOpts {}
 +  
 +  public static void main(String[] args) throws Exception {
 +    Help opts = new Help();
 +    JCommander jc = new JCommander(opts);
 +    jc.setProgramName(FateAdmin.class.getName());
 +    jc.addCommand("fail", new FailOpts());
 +    jc.addCommand("delete", new DeleteOpts());
 +    jc.addCommand("print", new PrintOpts());
 +    jc.parse(args);
 +    if (opts.help || jc.getParsedCommand() == null) {
 +      jc.usage();
 +      System.exit(1);
 +    }
 +    
 +    System.err.printf("This tool has been deprecated%nFATE administration now available within 'accumulo shell'%n$ fate fail <txid>... | delete <txid>... | print [<txid>...]%n%n");
 +    
 +    AdminUtil<Master> admin = new AdminUtil<Master>();
 +    
 +    Instance instance = HdfsZooInstance.getInstance();
 +    String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
 +    String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
 +    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 +    ZooStore<Master> zs = new ZooStore<Master>(path, zk);
 +    
 +    if (jc.getParsedCommand().equals("fail")) {
-       admin.prepFail(zs, zk, masterPath, args[1]);
++      if (!admin.prepFail(zs, zk, masterPath, args[1])) {
++        System.exit(1);
++      }
 +    } else if (jc.getParsedCommand().equals("delete")) {
-       admin.prepDelete(zs, zk, masterPath, args[1]);
++      if (!admin.prepDelete(zs, zk, masterPath, args[1])) {
++        System.exit(1);
++      }
 +      admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, args[1]);
 +    } else if (jc.getParsedCommand().equals("print")) {
 +      admin.print(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
----------------------------------------------------------------------
diff --cc server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
index 5439b8a,0000000..1eeb04e
mode 100644,000000..100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
@@@ -1,94 -1,0 +1,95 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.monitor;
 +
 +import javax.servlet.http.HttpServlet;
 +
 +import org.apache.accumulo.core.conf.Property;
 +import org.mortbay.jetty.Server;
 +import org.mortbay.jetty.bio.SocketConnector;
 +import org.mortbay.jetty.handler.ContextHandlerCollection;
 +import org.mortbay.jetty.security.SslSocketConnector;
 +import org.mortbay.jetty.servlet.Context;
 +import org.mortbay.jetty.servlet.SessionHandler;
 +
 +public class EmbeddedWebServer {
++  private static String EMPTY = "";
 +  
 +  Server server = null;
 +  SocketConnector sock;
 +  ContextHandlerCollection handler;
 +  Context root;
 +  boolean usingSsl;
 +  
 +  public EmbeddedWebServer() {
 +    this("0.0.0.0", 0);
 +  }
 +  
 +  public EmbeddedWebServer(String host, int port) {
 +    server = new Server();
 +    handler = new ContextHandlerCollection();
 +    root = new Context(handler, "/", new SessionHandler(), null, null, null);
 +    
-     if (Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTORE).equals("")
-         || Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTOREPASS).equals("")
-         || Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTORE).equals("")
-         || Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTOREPASS).equals("")) {
++    if (EMPTY.equals(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTORE))
++        || EMPTY.equals(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTOREPASS))
++        || EMPTY.equals(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTORE))
++        || EMPTY.equals(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTOREPASS))) {
 +      sock = new SocketConnector();
 +      usingSsl = false;
 +    } else {
 +      sock = new SslSocketConnector();
 +      ((SslSocketConnector) sock).setKeystore(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTORE));
 +      ((SslSocketConnector) sock).setKeyPassword(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_KEYSTOREPASS));
 +      ((SslSocketConnector) sock).setTruststore(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTORE));
 +      ((SslSocketConnector) sock).setTrustPassword(Monitor.getSystemConfiguration().get(Property.MONITOR_SSL_TRUSTSTOREPASS));
 +      usingSsl = true;
 +    }
 +    sock.setHost(host);
 +    sock.setPort(port);
 +  }
 +  
 +  public void addServlet(Class<? extends HttpServlet> klass, String where) {
 +    root.addServlet(klass, where);
 +  }
 +  
 +  public int getPort() {
 +    return sock.getLocalPort();
 +  }
 +  
 +  public void start() {
 +    try {
 +      server.addConnector(sock);
 +      server.setHandler(handler);
 +      server.start();
 +    } catch (Exception e) {
 +      stop();
 +      throw new RuntimeException(e);
 +    }
 +  }
 +  
 +  public void stop() {
 +    try {
 +      server.stop();
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +  
 +  public boolean isUsingSsl() {
 +    return usingSsl;
 +  }
 +}


Mime
View raw message