Return-Path: X-Original-To: apmail-accumulo-commits-archive@www.apache.org Delivered-To: apmail-accumulo-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id CE41810DDB for ; Fri, 6 Sep 2013 18:22:57 +0000 (UTC) Received: (qmail 63072 invoked by uid 500); 6 Sep 2013 18:22:53 -0000 Delivered-To: apmail-accumulo-commits-archive@accumulo.apache.org Received: (qmail 62894 invoked by uid 500); 6 Sep 2013 18:22:53 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 60275 invoked by uid 99); 6 Sep 2013 18:22:31 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 06 Sep 2013 18:22:31 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 05652902A95; Fri, 6 Sep 2013 18:22:31 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: ctubbsii@apache.org To: commits@accumulo.apache.org Date: Fri, 06 Sep 2013 18:23:15 -0000 Message-Id: <98eee7d7fcc44d1cb52e4fd4abbd5ead@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [47/53] [abbrv] ACCUMULO-658 consistent package names to avoid overlapped sealed jars http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java new file mode 100644 index 0000000..9cd53a3 --- /dev/null +++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.server.util; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.InstanceOperations; +import org.apache.accumulo.core.client.impl.ClientExec; +import org.apache.accumulo.core.client.impl.MasterClient; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.master.thrift.MasterClientService; +import org.apache.accumulo.core.metadata.MetadataTable; +import org.apache.accumulo.core.security.Credentials; +import org.apache.accumulo.core.util.AddressUtil; +import org.apache.accumulo.server.cli.ClientOpts; +import org.apache.accumulo.server.security.SystemCredentials; +import org.apache.accumulo.trace.instrument.Tracer; +import org.apache.log4j.Logger; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import com.beust.jcommander.Parameters; + +public class Admin { + private static final Logger log = Logger.getLogger(Admin.class); + + static class AdminOpts extends ClientOpts { + @Parameter(names = {"-f", "--force"}, description = "force the given server to stop by removing its lock") + boolean force = false; + } + + @Parameters(commandDescription = "stop the tablet server on the given hosts") + static class StopCommand { + @Parameter(description = " { ... }") + List args = new ArrayList(); + } + + @Parameters(commandDescription = "Ping tablet servers. If no arguments, pings all.") + static class PingCommand { + @Parameter(description = "{ ... }") + List args = new ArrayList(); + } + + @Parameters(commandDescription = "print tablets that are offline in online tables") + static class CheckTabletsCommand { + @Parameter(names = "--fixFiles", description = "Remove dangling file pointers") + boolean fixFiles = false; + + @Parameter(names = {"-t", "--table"}, description = "Table to check, if not set checks all tables") + String table = null; + } + + @Parameters(commandDescription = "stop the master") + static class StopMasterCommand {} + + @Parameters(commandDescription = "stop all the servers") + static class StopAllCommand {} + + @Parameters(commandDescription = "list Accumulo instances in zookeeper") + static class ListInstancesCommand { + @Parameter(names = "--print-errors", description = "display errors while listing instances") + boolean printErrors = false; + @Parameter(names = "--print-all", description = "print information for all instances, not just those with names") + boolean printAll = false; + } + + public static void main(String[] args) { + boolean everything; + + AdminOpts opts = new AdminOpts(); + JCommander cl = new JCommander(opts); + cl.setProgramName(Admin.class.getName()); + + CheckTabletsCommand checkTabletsCommand = new CheckTabletsCommand(); + cl.addCommand("checkTablets", checkTabletsCommand); + + ListInstancesCommand listIntancesOpts = new ListInstancesCommand(); + cl.addCommand("listInstances", listIntancesOpts); + + PingCommand pingCommand = new PingCommand(); + cl.addCommand("ping", pingCommand); + + StopCommand stopOpts = new StopCommand(); + cl.addCommand("stop", stopOpts); + StopAllCommand stopAllOpts = new StopAllCommand(); + cl.addCommand("stopAll", stopAllOpts); + StopMasterCommand stopMasterOpts = new StopMasterCommand(); + cl.addCommand("stopMaster", stopMasterOpts); + cl.parse(args); + + if (opts.help || cl.getParsedCommand() == null) { + cl.usage(); + return; + } + Instance instance = opts.getInstance(); + + try { + String principal; + AuthenticationToken token; + if (opts.getToken() == null) { + principal = SystemCredentials.get().getPrincipal(); + token = SystemCredentials.get().getToken(); + } else { + principal = opts.principal; + token = opts.getToken(); + } + + int rc = 0; + + if (cl.getParsedCommand().equals("listInstances")) { + ListInstances.listInstances(instance.getZooKeepers(), listIntancesOpts.printAll, listIntancesOpts.printErrors); + } else if (cl.getParsedCommand().equals("ping")) { + if (ping(instance, principal, token, pingCommand.args) != 0) + rc = 4; + } else if (cl.getParsedCommand().equals("checkTablets")) { + System.out.println("\n*** Looking for offline tablets ***\n"); + if (FindOfflineTablets.findOffline(instance, new Credentials(principal, token), checkTabletsCommand.table) != 0) + rc = 5; + System.out.println("\n*** Looking for missing files ***\n"); + if (checkTabletsCommand.table == null) { + if (RemoveEntriesForMissingFiles.checkAllTables(instance, principal, token, checkTabletsCommand.fixFiles) != 0) + rc = 6; + } else { + if (RemoveEntriesForMissingFiles.checkTable(instance, principal, token, checkTabletsCommand.table, checkTabletsCommand.fixFiles) != 0) + rc = 6; + } + + } else if (cl.getParsedCommand().equals("stop")) { + stopTabletServer(instance, new Credentials(principal, token), stopOpts.args, opts.force); + } else { + everything = cl.getParsedCommand().equals("stopAll"); + + if (everything) + flushAll(instance, principal, token); + + stopServer(instance, new Credentials(principal, token), everything); + } + + if (rc != 0) + System.exit(rc); + } catch (AccumuloException e) { + log.error(e, e); + System.exit(1); + } catch (AccumuloSecurityException e) { + log.error(e, e); + System.exit(2); + } catch (Exception e) { + log.error(e, e); + System.exit(3); + } + } + + private static int ping(Instance instance, String principal, AuthenticationToken token, List args) throws AccumuloException, + AccumuloSecurityException { + + InstanceOperations io = instance.getConnector(principal, token).instanceOperations(); + + if (args.size() == 0) { + args = io.getTabletServers(); + } + + int unreachable = 0; + + for (String tserver : args) { + try { + io.ping(tserver); + System.out.println(tserver + " OK"); + } catch (AccumuloException ae) { + System.out.println(tserver + " FAILED (" + ae.getMessage() + ")"); + unreachable++; + } + } + + System.out.printf("\n%d of %d tablet servers unreachable\n\n", unreachable, args.size()); + return unreachable; + } + + /** + * flushing during shutdown is a performance optimization, its not required. The method will make an attempt to initiate flushes of all tables and give up if + * it takes too long. + * + */ + private static void flushAll(final Instance instance, final String principal, final AuthenticationToken token) throws AccumuloException, + AccumuloSecurityException { + + final AtomicInteger flushesStarted = new AtomicInteger(0); + + Runnable flushTask = new Runnable() { + + @Override + public void run() { + try { + Connector conn = instance.getConnector(principal, token); + Set tables = conn.tableOperations().tableIdMap().keySet(); + for (String table : tables) { + if (table.equals(MetadataTable.NAME)) + continue; + try { + conn.tableOperations().flush(table, null, null, false); + flushesStarted.incrementAndGet(); + } catch (TableNotFoundException e) {} + } + } catch (Exception e) { + log.warn("Failed to intiate flush " + e.getMessage()); + } + } + }; + + Thread flusher = new Thread(flushTask); + flusher.setDaemon(true); + flusher.start(); + + long start = System.currentTimeMillis(); + try { + flusher.join(3000); + } catch (InterruptedException e) {} + + while (flusher.isAlive() && System.currentTimeMillis() - start < 15000) { + int flushCount = flushesStarted.get(); + try { + flusher.join(1000); + } catch (InterruptedException e) {} + + if (flushCount == flushesStarted.get()) { + // no progress was made while waiting for join... maybe its stuck, stop waiting on it + break; + } + } + } + + private static void stopServer(final Instance instance, final Credentials credentials, final boolean tabletServersToo) throws AccumuloException, + AccumuloSecurityException { + MasterClient.execute(instance, new ClientExec() { + @Override + public void execute(MasterClientService.Client client) throws Exception { + client.shutdown(Tracer.traceInfo(), credentials.toThrift(instance), tabletServersToo); + } + }); + } + + private static void stopTabletServer(final Instance instance, final Credentials creds, List servers, final boolean force) throws AccumuloException, + AccumuloSecurityException { + for (String server : servers) { + InetSocketAddress address = AddressUtil.parseAddress(server); + final String finalServer = org.apache.accumulo.core.util.AddressUtil.toString(address); + log.info("Stopping server " + finalServer); + MasterClient.execute(instance, new ClientExec() { + @Override + public void execute(MasterClientService.Client client) throws Exception { + client.shutdownTabletServer(Tracer.traceInfo(), creds.toThrift(instance), finalServer, force); + } + }); + } + } +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java new file mode 100644 index 0000000..21f01ce --- /dev/null +++ b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.server.util; + +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.impl.Tables; +import org.apache.accumulo.core.conf.DefaultConfiguration; +import org.apache.accumulo.core.data.KeyExtent; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.master.state.tables.TableState; +import org.apache.accumulo.core.metadata.MetadataTable; +import org.apache.accumulo.core.metadata.RootTable; +import org.apache.accumulo.core.metadata.schema.MetadataSchema; +import org.apache.accumulo.core.security.Credentials; +import org.apache.accumulo.server.cli.ClientOpts; +import org.apache.accumulo.server.master.LiveTServerSet; +import org.apache.accumulo.server.master.LiveTServerSet.Listener; +import org.apache.accumulo.server.master.state.DistributedStoreException; +import org.apache.accumulo.server.master.state.MetaDataTableScanner; +import org.apache.accumulo.server.master.state.TServerInstance; +import org.apache.accumulo.server.master.state.TabletLocationState; +import org.apache.accumulo.server.master.state.TabletState; +import org.apache.accumulo.server.master.state.ZooTabletStateStore; +import org.apache.accumulo.server.security.SystemCredentials; +import org.apache.accumulo.server.tables.TableManager; +import org.apache.hadoop.io.Text; +import org.apache.log4j.Logger; + +public class FindOfflineTablets { + private static final Logger log = Logger.getLogger(FindOfflineTablets.class); + + public static void main(String[] args) throws Exception { + ClientOpts opts = new ClientOpts(); + opts.parseArgs(FindOfflineTablets.class.getName(), args); + Instance instance = opts.getInstance(); + SystemCredentials creds = SystemCredentials.get(); + + findOffline(instance, creds, null); + } + + static int findOffline(Instance instance, Credentials creds, String tableName) throws AccumuloException, TableNotFoundException { + + final AtomicBoolean scanning = new AtomicBoolean(false); + + LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() { + @Override + public void update(LiveTServerSet current, Set deleted, Set added) { + if (!deleted.isEmpty() && scanning.get()) + log.warn("Tablet servers deleted while scanning: " + deleted); + if (!added.isEmpty() && scanning.get()) + log.warn("Tablet servers added while scanning: " + added); + } + }); + tservers.startListeningForTabletServerChanges(); + scanning.set(true); + + Iterator zooScanner; + try { + zooScanner = new ZooTabletStateStore().iterator(); + } catch (DistributedStoreException e) { + throw new AccumuloException(e); + } + + int offline = 0; + + System.out.println("Scanning zookeeper"); + if ((offline = checkTablets(zooScanner, tservers)) > 0) + return offline; + + if (RootTable.NAME.equals(tableName)) + return 0; + + System.out.println("Scanning " + RootTable.NAME); + Iterator rootScanner = new MetaDataTableScanner(instance, creds, MetadataSchema.TabletsSection.getRange(), RootTable.NAME); + if ((offline = checkTablets(rootScanner, tservers)) > 0) + return offline; + + if (MetadataTable.NAME.equals(tableName)) + return 0; + + System.out.println("Scanning " + MetadataTable.NAME); + + Range range = MetadataSchema.TabletsSection.getRange(); + if (tableName != null) { + String tableId = Tables.getTableId(instance, tableName); + range = new KeyExtent(new Text(tableId), null, null).toMetadataRange(); + } + + Iterator metaScanner = new MetaDataTableScanner(instance, creds, range, MetadataTable.NAME); + return checkTablets(metaScanner, tservers); + } + + private static int checkTablets(Iterator scanner, LiveTServerSet tservers) { + int offline = 0; + + while (scanner.hasNext()) { + TabletLocationState locationState = scanner.next(); + TabletState state = locationState.getState(tservers.getCurrentServers()); + if (state != null && state != TabletState.HOSTED + && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE) { + System.out.println(locationState + " is " + state + " #walogs:" + locationState.walogs.size()); + offline++; + } + } + + return offline; + } + +} http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/server/util/Initialize.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/util/Initialize.java deleted file mode 100644 index 0eb6d36..0000000 --- a/server/base/src/main/java/org/apache/accumulo/server/util/Initialize.java +++ /dev/null @@ -1,522 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.server.util; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map.Entry; -import java.util.UUID; - -import jline.console.ConsoleReader; - -import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.cli.Help; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException; -import org.apache.accumulo.core.conf.AccumuloConfiguration; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.conf.SiteConfiguration; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.KeyExtent; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.file.FileOperations; -import org.apache.accumulo.core.file.FileSKVWriter; -import org.apache.accumulo.core.iterators.user.VersioningIterator; -import org.apache.accumulo.core.master.state.tables.TableState; -import org.apache.accumulo.core.master.thrift.MasterGoalState; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.RootTable; -import org.apache.accumulo.core.metadata.schema.MetadataSchema; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily; -import org.apache.accumulo.core.security.SecurityUtil; -import org.apache.accumulo.core.util.CachedConfiguration; -import org.apache.accumulo.core.zookeeper.ZooUtil; -import org.apache.accumulo.fate.zookeeper.IZooReaderWriter; -import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy; -import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy; -import org.apache.accumulo.server.ServerConstants; -import org.apache.accumulo.server.client.HdfsZooInstance; -import org.apache.accumulo.server.conf.ServerConfiguration; -import org.apache.accumulo.server.fs.VolumeManager; -import org.apache.accumulo.server.fs.VolumeManagerImpl; -import org.apache.accumulo.server.security.AuditedSecurityOperation; -import org.apache.accumulo.server.security.SystemCredentials; -import org.apache.accumulo.server.tables.TableManager; -import org.apache.accumulo.server.tabletserver.TabletTime; -import org.apache.accumulo.server.zookeeper.ZooReaderWriter; -import org.apache.accumulo.tserver.constraints.MetadataConstraints; -import org.apache.accumulo.tserver.iterators.MetadataBulkLoadFilter; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.apache.log4j.Logger; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.ZooDefs.Ids; - -import com.beust.jcommander.Parameter; - -/** - * This class is used to setup the directory structure and the root tablet to get an instance started - * - */ -public class Initialize { - private static final Logger log = Logger.getLogger(Initialize.class); - private static final String DEFAULT_ROOT_USER = "root"; - public static final String TABLE_TABLETS_TABLET_DIR = "/table_info"; - - private static ConsoleReader reader = null; - - private static ConsoleReader getConsoleReader() throws IOException { - if (reader == null) - reader = new ConsoleReader(); - return reader; - } - - private static HashMap initialMetadataConf = new HashMap(); - static { - initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K"); - initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5"); - initialMetadataConf.put(Property.TABLE_WALOG_ENABLED.getKey(), "true"); - initialMetadataConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1"); - initialMetadataConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M"); - initialMetadataConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1", MetadataConstraints.class.getName()); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers", "10," + VersioningIterator.class.getName()); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions", "1"); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers", "10," + VersioningIterator.class.getName()); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions", "1"); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers", "10," + VersioningIterator.class.getName()); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions", "1"); - initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName()); - initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false"); - initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet", - String.format("%s,%s", TabletsSection.TabletColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME)); - initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME, - LogColumnFamily.NAME, TabletsSection.ServerColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME)); - initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server"); - initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), ""); - initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true"); - initialMetadataConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true"); - } - - public static boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException { - if (!ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI).equals("")) - log.info("Hadoop Filesystem is " + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI)); - else - log.info("Hadoop Filesystem is " + FileSystem.getDefaultUri(conf)); - - log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getBaseDirs())); - log.info("Zookeeper server is " + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST)); - log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running"); - if (!zookeeperAvailable()) { - log.fatal("Zookeeper needs to be up and running in order to init. Exiting ..."); - return false; - } - if (ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) { - ConsoleReader c = getConsoleReader(); - c.beep(); - c.println(); - c.println(); - c.println("Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it."); - c.println(); - c.println(); - c.println("You can change the instance secret in accumulo by using:"); - c.println(" bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName() + " oldPassword newPassword."); - c.println("You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. Without this accumulo will not operate correctly"); - } - - try { - if (isInitialized(fs)) { - log.fatal("It appears this location was previously initialized, exiting ... "); - return false; - } - } catch (IOException e) { - throw new RuntimeException(e); - } - - // prompt user for instance name and root password early, in case they - // abort, we don't leave an inconsistent HDFS/ZooKeeper structure - String instanceNamePath; - try { - instanceNamePath = getInstanceNamePath(opts); - } catch (Exception e) { - log.fatal("Failed to talk to zookeeper", e); - return false; - } - opts.rootpass = getRootPassword(opts); - return initialize(opts, instanceNamePath, fs); - } - - public static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) { - - UUID uuid = UUID.randomUUID(); - try { - initZooKeeper(opts, uuid.toString(), instanceNamePath); - } catch (Exception e) { - log.fatal("Failed to initialize zookeeper", e); - return false; - } - - try { - initFileSystem(opts, fs, uuid); - } catch (Exception e) { - log.fatal("Failed to initialize filesystem", e); - return false; - } - - try { - initSecurity(opts, uuid.toString()); - } catch (Exception e) { - log.fatal("Failed to initialize security", e); - return false; - } - return true; - } - - private static boolean zookeeperAvailable() { - IZooReaderWriter zoo = ZooReaderWriter.getInstance(); - try { - return zoo.exists("/"); - } catch (KeeperException e) { - return false; - } catch (InterruptedException e) { - return false; - } - } - - private static Path[] paths(String[] paths) { - Path[] result = new Path[paths.length]; - for (int i = 0; i < paths.length; i++) { - result[i] = new Path(paths[i]); - } - return result; - } - - private static T[] concat(T[] a, T[] b) { - List result = new ArrayList(a.length + b.length); - for (int i = 0; i < a.length; i++) { - result.add(a[i]); - } - for (int i = 0; i < b.length; i++) { - result.add(b[i]); - } - return result.toArray(a); - } - - private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid) throws IOException { - FileStatus fstat; - - // the actual disk locations of the root table and tablets - final Path rootTablet = new Path(ServerConstants.getRootTabletDir()); - - // the actual disk locations of the metadata table and tablets - final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs()); - final Path[] tableMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), TABLE_TABLETS_TABLET_DIR)); - final Path[] defaultMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION)); - - fs.mkdirs(new Path(ServerConstants.getDataVersionLocation(), "" + ServerConstants.DATA_VERSION)); - - // create an instance id - fs.mkdirs(ServerConstants.getInstanceIdLocation()); - fs.createNewFile(new Path(ServerConstants.getInstanceIdLocation(), uuid.toString())); - - // initialize initial metadata config in zookeeper - initMetadataConfig(); - - // create metadata table - for (Path mtd : metadataTableDirs) { - try { - fstat = fs.getFileStatus(mtd); - if (!fstat.isDir()) { - log.fatal("location " + mtd.toString() + " exists but is not a directory"); - return; - } - } catch (FileNotFoundException fnfe) { - if (!fs.mkdirs(mtd)) { - log.fatal("unable to create directory " + mtd.toString()); - return; - } - } - } - - // create root table and tablet - try { - fstat = fs.getFileStatus(rootTablet); - if (!fstat.isDir()) { - log.fatal("location " + rootTablet.toString() + " exists but is not a directory"); - return; - } - } catch (FileNotFoundException fnfe) { - if (!fs.mkdirs(rootTablet)) { - log.fatal("unable to create directory " + rootTablet.toString()); - return; - } - } - - // populate the root tablet with info about the default tablet - // the root tablet contains the key extent and locations of all the - // metadata tablets - String initRootTabFile = rootTablet + "/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration()); - FileSystem ns = fs.getFileSystemByPath(new Path(initRootTabFile)); - FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(), AccumuloConfiguration.getDefaultConfiguration()); - mfw.startDefaultLocalityGroup(); - - Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow())); - - // table tablet's directory - Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(), - TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0); - mfw.append(tableDirKey, new Value(TABLE_TABLETS_TABLET_DIR.getBytes())); - - // table tablet time - Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(), - TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0); - mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes())); - - // table tablet's prevrow - Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(), - TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0); - mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null)); - - // ----------] default tablet info - Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null)); - - // default's directory - Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(), - TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0); - mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes())); - - // default's time - Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(), - TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0); - mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes())); - - // default's prevrow - Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(), - TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0); - mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow())); - - mfw.close(); - - // create table and default tablets directories - for (Path dir : concat(defaultMetadataTabletDirs, tableMetadataTabletDirs)) { - try { - fstat = fs.getFileStatus(dir); - if (!fstat.isDir()) { - log.fatal("location " + dir.toString() + " exists but is not a directory"); - return; - } - } catch (FileNotFoundException fnfe) { - try { - fstat = fs.getFileStatus(dir); - if (!fstat.isDir()) { - log.fatal("location " + dir.toString() + " exists but is not a directory"); - return; - } - } catch (FileNotFoundException fnfe2) { - // create table info dir - if (!fs.mkdirs(dir)) { - log.fatal("unable to create directory " + dir.toString()); - return; - } - } - - // create default dir - if (!fs.mkdirs(dir)) { - log.fatal("unable to create directory " + dir.toString()); - return; - } - } - } - } - - private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath) throws KeeperException, InterruptedException { - // setup basic data in zookeeper - IZooReaderWriter zoo = ZooReaderWriter.getInstance(); - ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE); - ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT + Constants.ZINSTANCES, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE); - - // setup instance name - if (opts.clearInstanceName) - zoo.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP); - zoo.putPersistentData(instanceNamePath, uuid.getBytes(), NodeExistsPolicy.FAIL); - - // setup the instance - String zkInstanceRoot = Constants.ZROOT + "/" + uuid; - zoo.putPersistentData(zkInstanceRoot, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID, NodeExistsPolicy.FAIL); - TableManager.prepareNewTableState(uuid, RootTable.ID, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL); - TableManager.prepareNewTableState(uuid, MetadataTable.ID, MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(), NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, new byte[0], NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, new byte[] {'0'}, NodeExistsPolicy.FAIL); - zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, new byte[] {'0'}, NodeExistsPolicy.FAIL); - } - - private static String getInstanceNamePath(Opts opts) throws IOException, KeeperException, InterruptedException { - // setup the instance name - String instanceName, instanceNamePath = null; - boolean exists = true; - do { - if (opts.cliInstanceName == null) { - instanceName = getConsoleReader().readLine("Instance name : "); - } else { - instanceName = opts.cliInstanceName; - } - if (instanceName == null) - System.exit(0); - instanceName = instanceName.trim(); - if (instanceName.length() == 0) - continue; - instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName; - if (opts.clearInstanceName) { - exists = false; - break; - } else if (exists = ZooReaderWriter.getInstance().exists(instanceNamePath)) { - String decision = getConsoleReader().readLine("Instance name \"" + instanceName + "\" exists. Delete existing entry from zookeeper? [Y/N] : "); - if (decision == null) - System.exit(0); - if (decision.length() == 1 && decision.toLowerCase(Locale.ENGLISH).charAt(0) == 'y') { - opts.clearInstanceName = true; - exists = false; - } - } - } while (exists); - return instanceNamePath; - } - - private static byte[] getRootPassword(Opts opts) throws IOException { - if (opts.cliPassword != null) { - return opts.cliPassword.getBytes(); - } - String rootpass; - String confirmpass; - do { - rootpass = getConsoleReader() - .readLine("Enter initial password for " + DEFAULT_ROOT_USER + " (this may not be applicable for your security setup): ", '*'); - if (rootpass == null) - System.exit(0); - confirmpass = getConsoleReader().readLine("Confirm initial password for " + DEFAULT_ROOT_USER + ": ", '*'); - if (confirmpass == null) - System.exit(0); - if (!rootpass.equals(confirmpass)) - log.error("Passwords do not match"); - } while (!rootpass.equals(confirmpass)); - return rootpass.getBytes(); - } - - private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException { - AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()), DEFAULT_ROOT_USER, - opts.rootpass); - } - - protected static void initMetadataConfig() throws IOException { - try { - Configuration conf = CachedConfiguration.getInstance(); - int max = conf.getInt("dfs.replication.max", 512); - // Hadoop 0.23 switched the min value configuration name - int min = Math.max(conf.getInt("dfs.replication.min", 1), conf.getInt("dfs.namenode.replication.min", 1)); - if (max < 5) - setMetadataReplication(max, "max"); - if (min > 5) - setMetadataReplication(min, "min"); - for (Entry entry : initialMetadataConf.entrySet()) { - if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue())) - throw new IOException("Cannot create per-table property " + entry.getKey()); - if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue())) - throw new IOException("Cannot create per-table property " + entry.getKey()); - } - } catch (Exception e) { - log.fatal("error talking to zookeeper", e); - throw new IOException(e); - } - } - - private static void setMetadataReplication(int replication, String reason) throws IOException { - String rep = getConsoleReader().readLine( - "Your HDFS replication " + reason + " is not compatible with our default " + MetadataTable.NAME + " replication of 5. What do you want to set your " - + MetadataTable.NAME + " replication to? (" + replication + ") "); - if (rep == null || rep.length() == 0) - rep = Integer.toString(replication); - else - // Lets make sure it's a number - Integer.parseInt(rep); - initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep); - } - - public static boolean isInitialized(VolumeManager fs) throws IOException { - return (fs.exists(ServerConstants.getInstanceIdLocation()) || fs.exists(ServerConstants.getDataVersionLocation())); - } - - static class Opts extends Help { - @Parameter(names = "--reset-security", description = "just update the security information") - boolean resetSecurity = false; - @Parameter(names = "--clear-instance-name", description = "delete any existing instance name without prompting") - boolean clearInstanceName = false; - @Parameter(names = "--instance-name", description = "the instance name, if not provided, will prompt") - String cliInstanceName; - @Parameter(names = "--password", description = "set the password on the command line") - String cliPassword; - - byte[] rootpass = null; - } - - public static void main(String[] args) { - Opts opts = new Opts(); - opts.parseArgs(Initialize.class.getName(), args); - - try { - SecurityUtil.serverLogin(); - Configuration conf = CachedConfiguration.getInstance(); - - @SuppressWarnings("deprecation") - VolumeManager fs = VolumeManagerImpl.get(SiteConfiguration.getSiteConfiguration()); - - if (opts.resetSecurity) { - if (isInitialized(fs)) { - opts.rootpass = getRootPassword(opts); - initSecurity(opts, HdfsZooInstance.getInstance().getInstanceID()); - } else { - log.fatal("Attempted to reset security on accumulo before it was initialized"); - } - } else if (!doInit(opts, conf, fs)) - System.exit(-1); - } catch (Exception e) { - log.fatal(e, e); - throw new RuntimeException(e); - } - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java index 952d272..987eba9 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java @@ -48,11 +48,11 @@ import org.apache.accumulo.core.util.ColumnFQ; import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.fate.zookeeper.IZooReaderWriter; import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.server.client.HdfsZooInstance; import org.apache.accumulo.server.fs.FileRef; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.fs.VolumeManagerImpl; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.accumulo.server.zookeeper.ZooLock; import org.apache.accumulo.server.zookeeper.ZooReaderWriter; import org.apache.hadoop.io.Text; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java index 8dac384..f9a23a6 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java @@ -23,7 +23,7 @@ import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.server.ServerConstants; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.fs.VolumeManagerImpl; -import org.apache.accumulo.server.tabletserver.UniqueNameAllocator; +import org.apache.accumulo.server.tablets.UniqueNameAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.log4j.Logger; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/tserver/constraints/MetadataConstraints.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/tserver/constraints/MetadataConstraints.java b/server/base/src/main/java/org/apache/accumulo/tserver/constraints/MetadataConstraints.java deleted file mode 100644 index f190cee..0000000 --- a/server/base/src/main/java/org/apache/accumulo/tserver/constraints/MetadataConstraints.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.tserver.constraints; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; - -import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.constraints.Constraint; -import org.apache.accumulo.core.data.ColumnUpdate; -import org.apache.accumulo.core.data.KeyExtent; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.schema.DataFileValue; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily; -import org.apache.accumulo.core.util.ColumnFQ; -import org.apache.accumulo.core.zookeeper.ZooUtil; -import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator; -import org.apache.accumulo.server.client.HdfsZooInstance; -import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator; -import org.apache.accumulo.server.zookeeper.ZooCache; -import org.apache.accumulo.server.zookeeper.ZooLock; -import org.apache.hadoop.io.Text; -import org.apache.log4j.Logger; - -public class MetadataConstraints implements Constraint { - - private ZooCache zooCache = null; - private String zooRoot = null; - - private static final Logger log = Logger.getLogger(MetadataConstraints.class); - - private static boolean[] validTableNameChars = new boolean[256]; - - { - for (int i = 0; i < 256; i++) { - validTableNameChars[i] = ((i >= 'a' && i <= 'z') || (i >= '0' && i <= '9')) || i == '!'; - } - } - - private static final HashSet validColumnQuals = new HashSet(Arrays.asList(new ColumnFQ[] { - TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, - TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN, - TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN})); - - private static final HashSet validColumnFams = new HashSet(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME, - LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME, - TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME, - ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME})); - - private static boolean isValidColumn(ColumnUpdate cu) { - - if (validColumnFams.contains(new Text(cu.getColumnFamily()))) - return true; - - if (validColumnQuals.contains(new ColumnFQ(cu))) - return true; - - return false; - } - - static private ArrayList addViolation(ArrayList lst, int violation) { - if (lst == null) - lst = new ArrayList(); - lst.add((short) violation); - return lst; - } - - static private ArrayList addIfNotPresent(ArrayList lst, int intViolation) { - if (lst == null) - return addViolation(lst, intViolation); - short violation = (short) intViolation; - if (!lst.contains(violation)) - return addViolation(lst, intViolation); - return lst; - } - - @Override - public List check(Environment env, Mutation mutation) { - - ArrayList violations = null; - - Collection colUpdates = mutation.getUpdates(); - - // check the row, it should contains at least one ; or end with < - boolean containsSemiC = false; - - byte[] row = mutation.getRow(); - - // always allow rows that fall within reserved areas - if (row.length > 0 && row[0] == '~') - return null; - if (row.length > 2 && row[0] == '!' && row[1] == '!' && row[2] == '~') - return null; - - for (byte b : row) { - if (b == ';') { - containsSemiC = true; - } - - if (b == ';' || b == '<') - break; - - if (!validTableNameChars[0xff & b]) { - violations = addIfNotPresent(violations, 4); - } - } - - if (!containsSemiC) { - // see if last row char is < - if (row.length == 0 || row[row.length - 1] != '<') { - violations = addIfNotPresent(violations, 4); - } - } else { - if (row.length == 0) { - violations = addIfNotPresent(violations, 4); - } - } - - if (row.length > 0 && row[0] == '!') { - if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) { - violations = addIfNotPresent(violations, 4); - } - } - - // ensure row is not less than Constants.METADATA_TABLE_ID - if (new Text(row).compareTo(new Text(MetadataTable.ID)) < 0) { - violations = addViolation(violations, 5); - } - - boolean checkedBulk = false; - - for (ColumnUpdate columnUpdate : colUpdates) { - Text columnFamily = new Text(columnUpdate.getColumnFamily()); - - if (columnUpdate.isDeleted()) { - if (!isValidColumn(columnUpdate)) { - violations = addViolation(violations, 2); - } - continue; - } - - if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) { - violations = addViolation(violations, 6); - } - - if (columnFamily.equals(DataFileColumnFamily.NAME)) { - try { - DataFileValue dfv = new DataFileValue(columnUpdate.getValue()); - - if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) { - violations = addViolation(violations, 1); - } - } catch (NumberFormatException nfe) { - violations = addViolation(violations, 1); - } catch (ArrayIndexOutOfBoundsException aiooe) { - violations = addViolation(violations, 1); - } - } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) { - - } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) { - if (!columnUpdate.isDeleted() && !checkedBulk) { - // splits, which also write the time reference, are allowed to write this reference even when - // the transaction is not running because the other half of the tablet is holding a reference - // to the file. - boolean isSplitMutation = false; - // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information, - // but it writes everything. We allow it to re-write the bulk information if it is setting the location. - // See ACCUMULO-1230. - boolean isLocationMutation = false; - - HashSet dataFiles = new HashSet(); - HashSet loadedFiles = new HashSet(); - - String tidString = new String(columnUpdate.getValue()); - int otherTidCount = 0; - - for (ColumnUpdate update : mutation.getUpdates()) { - if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) { - isSplitMutation = true; - } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) { - isLocationMutation = true; - } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) { - dataFiles.add(new Text(update.getColumnQualifier())); - } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) { - loadedFiles.add(new Text(update.getColumnQualifier())); - - if (!new String(update.getValue()).equals(tidString)) { - otherTidCount++; - } - } - } - - if (!isSplitMutation && !isLocationMutation) { - long tid = Long.parseLong(tidString); - - try { - if (otherTidCount > 0 || !dataFiles.equals(loadedFiles) || !getArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) { - violations = addViolation(violations, 8); - } - } catch (Exception ex) { - violations = addViolation(violations, 8); - } - } - - checkedBulk = true; - } - } else { - if (!isValidColumn(columnUpdate)) { - violations = addViolation(violations, 2); - } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0 - && (violations == null || !violations.contains((short) 4))) { - KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null); - - Text per = KeyExtent.decodePrevEndRow(new Value(columnUpdate.getValue())); - - boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0; - - if (!prevEndRowLessThanEndRow) { - violations = addViolation(violations, 3); - } - } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) { - if (zooCache == null) { - zooCache = new ZooCache(); - } - - if (zooRoot == null) { - zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance()); - } - - boolean lockHeld = false; - String lockId = new String(columnUpdate.getValue()); - - try { - lockHeld = ZooLock.isLockHeld(zooCache, new ZooUtil.LockID(zooRoot, lockId)); - } catch (Exception e) { - log.debug("Failed to verify lock was held " + lockId + " " + e.getMessage()); - } - - if (!lockHeld) { - violations = addViolation(violations, 7); - } - } - - } - } - - if (violations != null) { - log.debug("violating metadata mutation : " + new String(mutation.getRow())); - for (ColumnUpdate update : mutation.getUpdates()) { - log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " - + (update.isDeleted() ? "[delete]" : new String(update.getValue()))); - } - } - - return violations; - } - - protected Arbitrator getArbitrator() { - return new ZooArbitrator(); - } - - @Override - public String getViolationDescription(short violationCode) { - switch (violationCode) { - case 1: - return "data file size must be a non-negative integer"; - case 2: - return "Invalid column name given."; - case 3: - return "Prev end row is greater than or equal to end row."; - case 4: - return "Invalid metadata row format"; - case 5: - return "Row can not be less than " + MetadataTable.ID; - case 6: - return "Empty values are not allowed for any " + MetadataTable.NAME + " column"; - case 7: - return "Lock not held in zookeeper by writer"; - case 8: - return "Bulk load transaction no longer running"; - } - return null; - } - - @Override - protected void finalize() { - if (zooCache != null) - zooCache.clear(); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/main/java/org/apache/accumulo/tserver/iterators/MetadataBulkLoadFilter.java ---------------------------------------------------------------------- diff --git a/server/base/src/main/java/org/apache/accumulo/tserver/iterators/MetadataBulkLoadFilter.java b/server/base/src/main/java/org/apache/accumulo/tserver/iterators/MetadataBulkLoadFilter.java deleted file mode 100644 index 8c4c4e2..0000000 --- a/server/base/src/main/java/org/apache/accumulo/tserver/iterators/MetadataBulkLoadFilter.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.tserver.iterators; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.Filter; -import org.apache.accumulo.core.iterators.IteratorEnvironment; -import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope; -import org.apache.accumulo.core.iterators.SortedKeyValueIterator; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; -import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator; -import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator; -import org.apache.log4j.Logger; - -/** - * A special iterator for the metadata table that removes inactive bulk load flags - * - */ -public class MetadataBulkLoadFilter extends Filter { - private static Logger log = Logger.getLogger(MetadataBulkLoadFilter.class); - - enum Status { - ACTIVE, INACTIVE - } - - Map bulkTxStatusCache; - Arbitrator arbitrator; - - @Override - public boolean accept(Key k, Value v) { - if (!k.isDeleted() && k.compareColumnFamily(TabletsSection.BulkFileColumnFamily.NAME) == 0) { - long txid = Long.valueOf(v.toString()); - - Status status = bulkTxStatusCache.get(txid); - if (status == null) { - try { - if (arbitrator.transactionComplete(Constants.BULK_ARBITRATOR_TYPE, txid)) { - status = Status.INACTIVE; - } else { - status = Status.ACTIVE; - } - } catch (Exception e) { - status = Status.ACTIVE; - log.error(e, e); - } - - bulkTxStatusCache.put(txid, status); - } - - return status == Status.ACTIVE; - } - - return true; - } - - @Override - public void init(SortedKeyValueIterator source, Map options, IteratorEnvironment env) throws IOException { - super.init(source, options, env); - - if (env.getIteratorScope() == IteratorScope.scan) { - throw new IOException("This iterator not intended for use at scan time"); - } - - bulkTxStatusCache = new HashMap(); - arbitrator = getArbitrator(); - } - - protected Arbitrator getArbitrator() { - return new ZooArbitrator(); - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java ---------------------------------------------------------------------- diff --git a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java index fbae24c..d0b2e9e 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java @@ -28,7 +28,7 @@ import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator; -import org.apache.accumulo.tserver.constraints.MetadataConstraints; +import org.apache.accumulo.server.constraints.MetadataConstraints; import org.apache.hadoop.io.Text; import org.apache.log4j.Level; import org.apache.log4j.Logger; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java ---------------------------------------------------------------------- diff --git a/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java b/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java index 1b95531..4a45e99 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java @@ -34,7 +34,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; import org.apache.accumulo.core.util.ColumnFQ; import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator; -import org.apache.accumulo.tserver.iterators.MetadataBulkLoadFilter; +import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter; import org.apache.hadoop.io.Text; import org.junit.Assert; import org.junit.Test; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/Master.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java index 511415a..be8bd34 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/Master.java +++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java @@ -89,24 +89,11 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator; import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason; import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy; import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy; -import org.apache.accumulo.master.LiveTServerSet.TServerConnection; import org.apache.accumulo.master.balancer.DefaultLoadBalancer; import org.apache.accumulo.master.balancer.TabletBalancer; import org.apache.accumulo.master.recovery.RecoveryManager; -import org.apache.accumulo.master.state.CurrentState; -import org.apache.accumulo.master.state.DeadServerList; -import org.apache.accumulo.master.state.MergeInfo; -import org.apache.accumulo.master.state.MergeState; -import org.apache.accumulo.master.state.MetaDataStateStore; -import org.apache.accumulo.master.state.RootTabletStateStore; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TableCounts; -import org.apache.accumulo.master.state.TabletLocationState; import org.apache.accumulo.master.state.TabletMigration; -import org.apache.accumulo.master.state.TabletServerState; -import org.apache.accumulo.master.state.TabletState; -import org.apache.accumulo.master.state.ZooStore; -import org.apache.accumulo.master.state.ZooTabletStateStore; import org.apache.accumulo.master.tableOps.BulkImport; import org.apache.accumulo.master.tableOps.CancelCompactions; import org.apache.accumulo.master.tableOps.ChangeTableState; @@ -127,6 +114,20 @@ import org.apache.accumulo.server.client.HdfsZooInstance; import org.apache.accumulo.server.conf.ServerConfiguration; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.fs.VolumeManagerImpl; +import org.apache.accumulo.server.master.LiveTServerSet; +import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection; +import org.apache.accumulo.server.master.state.CurrentState; +import org.apache.accumulo.server.master.state.DeadServerList; +import org.apache.accumulo.server.master.state.MergeInfo; +import org.apache.accumulo.server.master.state.MergeState; +import org.apache.accumulo.server.master.state.MetaDataStateStore; +import org.apache.accumulo.server.master.state.RootTabletStateStore; +import org.apache.accumulo.server.master.state.TServerInstance; +import org.apache.accumulo.server.master.state.TabletLocationState; +import org.apache.accumulo.server.master.state.TabletServerState; +import org.apache.accumulo.server.master.state.TabletState; +import org.apache.accumulo.server.master.state.ZooStore; +import org.apache.accumulo.server.master.state.ZooTabletStateStore; import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.apache.accumulo.server.security.SecurityOperation; import org.apache.accumulo.server.security.SystemCredentials; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java index ee31cb9..e1ca031 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java +++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java @@ -55,23 +55,23 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException; import org.apache.accumulo.core.util.Daemon; import org.apache.accumulo.core.util.UtilWaitThread; -import org.apache.accumulo.master.LiveTServerSet.TServerConnection; import org.apache.accumulo.master.Master.TabletGoalState; -import org.apache.accumulo.master.state.Assignment; -import org.apache.accumulo.master.state.DistributedStoreException; -import org.apache.accumulo.master.state.MergeInfo; -import org.apache.accumulo.master.state.MergeState; import org.apache.accumulo.master.state.MergeStats; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TableCounts; import org.apache.accumulo.master.state.TableStats; -import org.apache.accumulo.master.state.TabletLocationState; -import org.apache.accumulo.master.state.TabletState; -import org.apache.accumulo.master.state.TabletStateStore; import org.apache.accumulo.server.fs.FileRef; +import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection; +import org.apache.accumulo.server.master.state.Assignment; +import org.apache.accumulo.server.master.state.DistributedStoreException; +import org.apache.accumulo.server.master.state.MergeInfo; +import org.apache.accumulo.server.master.state.MergeState; +import org.apache.accumulo.server.master.state.TServerInstance; +import org.apache.accumulo.server.master.state.TabletLocationState; +import org.apache.accumulo.server.master.state.TabletState; +import org.apache.accumulo.server.master.state.TabletStateStore; import org.apache.accumulo.server.security.SystemCredentials; import org.apache.accumulo.server.tables.TableManager; -import org.apache.accumulo.server.tabletserver.TabletTime; +import org.apache.accumulo.server.tablets.TabletTime; import org.apache.accumulo.server.util.MetadataTableUtil; import org.apache.hadoop.io.Text; import org.apache.thrift.TException; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancer.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancer.java b/server/master/src/main/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancer.java index e0ecc59..c467810 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancer.java +++ b/server/master/src/main/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancer.java @@ -31,9 +31,9 @@ import org.apache.accumulo.core.master.thrift.TableInfo; import org.apache.accumulo.core.master.thrift.TabletServerStatus; import org.apache.accumulo.core.metadata.MetadataTable; import org.apache.accumulo.core.tabletserver.thrift.TabletStats; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TabletMigration; import org.apache.accumulo.server.conf.ServerConfiguration; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.thrift.TException; /** http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/balancer/DefaultLoadBalancer.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/balancer/DefaultLoadBalancer.java b/server/master/src/main/java/org/apache/accumulo/master/balancer/DefaultLoadBalancer.java index 571316e..4999439 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/balancer/DefaultLoadBalancer.java +++ b/server/master/src/main/java/org/apache/accumulo/master/balancer/DefaultLoadBalancer.java @@ -30,8 +30,8 @@ import org.apache.accumulo.core.data.KeyExtent; import org.apache.accumulo.core.master.thrift.TableInfo; import org.apache.accumulo.core.master.thrift.TabletServerStatus; import org.apache.accumulo.core.tabletserver.thrift.TabletStats; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TabletMigration; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.log4j.Logger; public class DefaultLoadBalancer extends TabletBalancer { http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/balancer/TableLoadBalancer.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/balancer/TableLoadBalancer.java b/server/master/src/main/java/org/apache/accumulo/master/balancer/TableLoadBalancer.java index 64dc6de..37f7075 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/balancer/TableLoadBalancer.java +++ b/server/master/src/main/java/org/apache/accumulo/master/balancer/TableLoadBalancer.java @@ -31,8 +31,8 @@ import org.apache.accumulo.core.client.admin.TableOperations; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.KeyExtent; import org.apache.accumulo.core.master.thrift.TabletServerStatus; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TabletMigration; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.accumulo.server.security.SystemCredentials; import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader; import org.apache.log4j.Logger; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/balancer/TabletBalancer.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/balancer/TabletBalancer.java b/server/master/src/main/java/org/apache/accumulo/master/balancer/TabletBalancer.java index a776656..2a85de9 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/balancer/TabletBalancer.java +++ b/server/master/src/main/java/org/apache/accumulo/master/balancer/TabletBalancer.java @@ -29,9 +29,9 @@ import org.apache.accumulo.core.tabletserver.thrift.TabletClientService; import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client; import org.apache.accumulo.core.tabletserver.thrift.TabletStats; import org.apache.accumulo.core.util.ThriftUtil; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.master.state.TabletMigration; import org.apache.accumulo.server.conf.ServerConfiguration; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.accumulo.server.security.SystemCredentials; import org.apache.accumulo.trace.instrument.Tracer; import org.apache.log4j.Logger; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java index 9312cce..6b692d8 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java +++ b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java @@ -32,14 +32,14 @@ import org.apache.accumulo.core.metadata.MetadataTable; import org.apache.accumulo.core.metadata.RootTable; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.zookeeper.ZooUtil; -import org.apache.accumulo.master.state.CurrentState; -import org.apache.accumulo.master.state.MergeInfo; -import org.apache.accumulo.master.state.MergeState; -import org.apache.accumulo.master.state.MetaDataTableScanner; -import org.apache.accumulo.master.state.TabletLocationState; -import org.apache.accumulo.master.state.TabletState; -import org.apache.accumulo.master.state.TabletLocationState.BadLocationStateException; import org.apache.accumulo.server.cli.ClientOpts; +import org.apache.accumulo.server.master.state.CurrentState; +import org.apache.accumulo.server.master.state.MergeInfo; +import org.apache.accumulo.server.master.state.MergeState; +import org.apache.accumulo.server.master.state.MetaDataTableScanner; +import org.apache.accumulo.server.master.state.TabletLocationState; +import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException; +import org.apache.accumulo.server.master.state.TabletState; import org.apache.accumulo.server.zookeeper.ZooReaderWriter; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Text; @@ -254,7 +254,7 @@ public class MergeStats { in.reset(data, data.length); info.readFields(in); } - System.out.println(String.format("%25s %10s %10s %s", table, info.state, info.operation, info.extent)); + System.out.println(String.format("%25s %10s %10s %s", table, info.getState(), info.getOperation(), info.getExtent())); } } } http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java b/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java index 2d1e186..4ebd745 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java +++ b/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java @@ -16,7 +16,7 @@ */ package org.apache.accumulo.master.state; -import org.apache.accumulo.master.state.TabletState; +import org.apache.accumulo.server.master.state.TabletState; public class TableCounts { int counts[] = new int[TabletState.values().length]; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java index 12b6ec8..f088a5d 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java +++ b/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java @@ -19,7 +19,7 @@ package org.apache.accumulo.master.state; import java.util.HashMap; import java.util.Map; -import org.apache.accumulo.master.state.TabletState; +import org.apache.accumulo.server.master.state.TabletState; import org.apache.hadoop.io.Text; public class TableStats { http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/state/TabletMigration.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/TabletMigration.java b/server/master/src/main/java/org/apache/accumulo/master/state/TabletMigration.java index aab7520..255ef7a 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/state/TabletMigration.java +++ b/server/master/src/main/java/org/apache/accumulo/master/state/TabletMigration.java @@ -17,7 +17,7 @@ package org.apache.accumulo.master.state; import org.apache.accumulo.core.data.KeyExtent; -import org.apache.accumulo.master.state.TServerInstance; +import org.apache.accumulo.server.master.state.TServerInstance; public class TabletMigration { public KeyExtent tablet; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java index 6d75a45..eabe81f 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java @@ -62,14 +62,14 @@ import org.apache.accumulo.core.util.SimpleThreadPool; import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; -import org.apache.accumulo.master.LiveTServerSet.TServerConnection; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.server.ServerConstants; import org.apache.accumulo.server.client.HdfsZooInstance; import org.apache.accumulo.server.conf.ServerConfiguration; import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection; +import org.apache.accumulo.server.master.state.TServerInstance; import org.apache.accumulo.server.security.SystemCredentials; -import org.apache.accumulo.server.tabletserver.UniqueNameAllocator; +import org.apache.accumulo.server.tablets.UniqueNameAllocator; import org.apache.accumulo.server.util.MetadataTableUtil; import org.apache.accumulo.server.zookeeper.DistributedWorkQueue; import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java index c57549d..ab737a6 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java @@ -45,9 +45,10 @@ import org.apache.accumulo.fate.Repo; import org.apache.accumulo.fate.zookeeper.IZooReaderWriter; import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator; import org.apache.accumulo.master.Master; -import org.apache.accumulo.master.LiveTServerSet.TServerConnection; -import org.apache.accumulo.master.state.TServerInstance; import org.apache.accumulo.server.client.HdfsZooInstance; +import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection; +import org.apache.accumulo.server.master.state.TServerInstance; +import org.apache.accumulo.server.master.tableOps.CompactionIterators; import org.apache.accumulo.server.util.MapCounter; import org.apache.accumulo.server.zookeeper.ZooReaderWriter; import org.apache.commons.codec.binary.Hex; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java index ec3f2aa..aea4744 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java @@ -38,7 +38,7 @@ import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.apache.accumulo.server.security.SecurityOperation; import org.apache.accumulo.server.security.SystemCredentials; import org.apache.accumulo.server.tables.TableManager; -import org.apache.accumulo.server.tabletserver.TabletTime; +import org.apache.accumulo.server.tablets.TabletTime; import org.apache.accumulo.server.util.MetadataTableUtil; import org.apache.accumulo.server.util.TablePropUtil; import org.apache.accumulo.server.util.TabletOperations; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java index 718015c..4271257 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java @@ -39,11 +39,11 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Da import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; -import org.apache.accumulo.master.state.MetaDataTableScanner; -import org.apache.accumulo.master.state.TabletLocationState; -import org.apache.accumulo.master.state.TabletState; import org.apache.accumulo.server.ServerConstants; import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.accumulo.server.master.state.MetaDataTableScanner; +import org.apache.accumulo.server.master.state.TabletLocationState; +import org.apache.accumulo.server.master.state.TabletState; import org.apache.accumulo.server.problems.ProblemReports; import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.apache.accumulo.server.security.SystemCredentials; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java index d86b5f4..bcd9e9f 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java @@ -61,7 +61,7 @@ import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.apache.accumulo.server.security.SecurityOperation; import org.apache.accumulo.server.security.SystemCredentials; import org.apache.accumulo.server.tables.TableManager; -import org.apache.accumulo.server.tabletserver.UniqueNameAllocator; +import org.apache.accumulo.server.tablets.UniqueNameAllocator; import org.apache.accumulo.server.util.MetadataTableUtil; import org.apache.accumulo.server.util.TablePropUtil; import org.apache.hadoop.fs.FileStatus; http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3ddf9b6/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java ---------------------------------------------------------------------- diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java index 5fa2eb4..0ad2196 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java @@ -24,9 +24,9 @@ import org.apache.accumulo.core.metadata.RootTable; import org.apache.accumulo.core.util.TextUtil; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; -import org.apache.accumulo.master.state.MergeInfo; -import org.apache.accumulo.master.state.MergeState; -import org.apache.accumulo.master.state.MergeInfo.Operation; +import org.apache.accumulo.server.master.state.MergeInfo; +import org.apache.accumulo.server.master.state.MergeState; +import org.apache.accumulo.server.master.state.MergeInfo.Operation; import org.apache.hadoop.io.Text; /**