From commits-return-22081-archive-asf-public=cust-asf.ponee.io@accumulo.apache.org Fri Sep 7 22:37:29 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 9D2D218077A for ; Fri, 7 Sep 2018 22:37:27 +0200 (CEST) Received: (qmail 79627 invoked by uid 500); 7 Sep 2018 20:37:26 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 79617 invoked by uid 99); 7 Sep 2018 20:37:26 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 07 Sep 2018 20:37:26 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id BCB3382820; Fri, 7 Sep 2018 20:37:25 +0000 (UTC) Date: Fri, 07 Sep 2018 20:37:26 +0000 To: "commits@accumulo.apache.org" Subject: [accumulo] 01/02: #573 Merged in changes from GH-573 MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit From: jmark99@apache.org In-Reply-To: <153635264526.1573.15380812164425327421@gitbox.apache.org> References: <153635264526.1573.15380812164425327421@gitbox.apache.org> X-Git-Host: gitbox.apache.org X-Git-Repo: accumulo X-Git-Refname: refs/heads/master X-Git-Reftype: branch X-Git-Rev: ba8009f9a7b00e65a7ea515137ee7095cae517a1 X-Git-NotificationType: diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated Message-Id: <20180907203725.BCB3382820@gitbox.apache.org> This is an automated email from the ASF dual-hosted git repository. jmark99 pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/accumulo.git commit ba8009f9a7b00e65a7ea515137ee7095cae517a1 Author: Mark Owens AuthorDate: Fri Sep 7 15:43:50 2018 -0400 #573 Merged in changes from GH-573 --- .../core/client/admin/InitialTableState.java | 35 +- .../core/client/admin/NewTableConfiguration.java | 64 ++- .../core/client/impl/TableOperationsImpl.java | 17 +- .../org/apache/accumulo/core/conf/Property.java | 2 +- .../client/admin/NewTableConfigurationTest.java | 100 +++++ .../apache/accumulo/master/FateServiceHandler.java | 93 +++- .../java/org/apache/accumulo/master/Master.java | 16 +- .../apache/accumulo/master/tableOps/ChooseDir.java | 64 ++- .../apache/accumulo/master/tableOps/CreateDir.java | 33 +- .../accumulo/master/tableOps/CreateTable.java | 7 +- .../master/tableOps/FinishCreateTable.java | 30 +- .../accumulo/master/tableOps/PopulateMetadata.java | 63 ++- .../apache/accumulo/master/tableOps/TableInfo.java | 11 +- .../org/apache/accumulo/master/tableOps/Utils.java | 28 ++ .../shell/commands/CreateTableCommand.java | 29 +- .../org/apache/accumulo/test/ShellServerIT.java | 490 ++++++++++++++++++++- .../test/functional/CreateInitialSplitsIT.java | 246 +++++++++++ 17 files changed, 1269 insertions(+), 59 deletions(-) diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/core/src/main/java/org/apache/accumulo/core/client/admin/InitialTableState.java similarity index 63% copy from server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java copy to core/src/main/java/org/apache/accumulo/core/client/admin/InitialTableState.java index 8854e4f..eb12bc9 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java +++ b/core/src/main/java/org/apache/accumulo/core/client/admin/InitialTableState.java @@ -14,25 +14,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.accumulo.master.tableOps; +package org.apache.accumulo.core.client.admin; -import java.io.Serializable; -import java.util.Map; - -import org.apache.accumulo.core.client.impl.Namespace; -import org.apache.accumulo.core.client.impl.Table; - -class TableInfo implements Serializable { - - private static final long serialVersionUID = 1L; - - String tableName; - Table.ID tableId; - Namespace.ID namespaceId; - char timeType; - String user; - - public Map props; - - public String dir = null; +/** + * Creation mode for table creation. + * + * @since 2.0.0 + */ +public enum InitialTableState { + /* + * Set if table is to be created in OFFLINE mode. + */ + OFFLINE, + /* + * Used if table is to be created in ONLINE mode. + */ + ONLINE } diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java index 9c97f54..01b2fdd 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java +++ b/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java @@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -27,7 +28,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; +import java.util.SortedSet; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.IteratorSetting; @@ -44,6 +45,8 @@ import org.apache.accumulo.core.summary.SummarizerConfigurationUtil; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.hadoop.io.Text; +import com.google.common.collect.ImmutableSortedSet; + /** * This object stores table creation parameters. Currently includes: {@link TimeType}, whether to * include default iterators, and user-specified initial properties @@ -55,13 +58,17 @@ public class NewTableConfiguration { private static final TimeType DEFAULT_TIME_TYPE = TimeType.MILLIS; private TimeType timeType = DEFAULT_TIME_TYPE; + private static final InitialTableState DEFAULT_CREATION_MODE = InitialTableState.ONLINE; + private InitialTableState initialTableState = DEFAULT_CREATION_MODE; + private boolean limitVersion = true; private Map properties = Collections.emptyMap(); private Map samplerProps = Collections.emptyMap(); private Map summarizerProps = Collections.emptyMap(); private Map localityProps = Collections.emptyMap(); - private Map iteratorProps = new HashMap<>(); + private final Map iteratorProps = new HashMap<>(); + private SortedSet splitProps = Collections.emptySortedSet(); private void checkDisjoint(Map props, Map derivedProps, String kind) { @@ -104,6 +111,29 @@ public class NewTableConfiguration { } /** + * Create the new table in an offline state. + * + * @return this + * + * @since 2.0.0 + */ + public NewTableConfiguration createOffline() { + this.initialTableState = InitialTableState.OFFLINE; + return this; + } + + /** + * Return value indicating whether table is to be created in offline or online mode. + * + * @return 1 if true; 0 otherwise. + * + * @since 2.0.0 + */ + public InitialTableState getInitialTableState() { + return initialTableState; + } + + /** * Sets additional properties to be applied to tables created with this configuration. Additional * calls to this method replace properties set by previous calls. * @@ -143,6 +173,17 @@ public class NewTableConfiguration { } /** + * Return Collection of split values. + * + * @return Collection containing splits associated with this NewTableConfiguration object. + * + * @since 2.0.0 + */ + public Collection getSplits() { + return splitProps; + } + + /** * Enable building a sample data set on the new table using the given sampler configuration. * * @since 1.8.0 @@ -192,14 +233,29 @@ public class NewTableConfiguration { String value = LocalityGroupUtil.encodeColumnFamilies(colFams); tmp.put(Property.TABLE_LOCALITY_GROUP_PREFIX + entry.getKey(), value); } - tmp.put(Property.TABLE_LOCALITY_GROUPS.getKey(), - groups.keySet().stream().collect(Collectors.joining(","))); + tmp.put(Property.TABLE_LOCALITY_GROUPS.getKey(), String.join(",", groups.keySet())); checkDisjoint(properties, tmp, "locality groups"); localityProps = tmp; return this; } /** + * Create a new table with pre-configured splits from the provided input collection. + * + * @param splits + * A SortedSet of String values to be used as split points in a newly created table. + * @return this + * + * @since 2.0.0 + */ + public NewTableConfiguration withSplits(final SortedSet splits) { + checkArgument(splits != null, "splits set is null"); + checkArgument(!splits.isEmpty(), "splits set is empty"); + this.splitProps = ImmutableSortedSet.copyOf(splits); + return this; + } + + /** * Configure iterator settings for a table prior to its creation. * * Additional calls to this method before table creation will overwrite previous iterator diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java index d392ded..1cbd2ab 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java @@ -235,8 +235,21 @@ public class TableOperationsImpl extends TableOperationsHelper { checkArgument(tableName != null, "tableName is null"); checkArgument(ntc != null, "ntc is null"); - List args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)), - ByteBuffer.wrap(ntc.getTimeType().name().getBytes(UTF_8))); + List args = new ArrayList<>(); + args.add(ByteBuffer.wrap(tableName.getBytes(UTF_8))); + args.add(ByteBuffer.wrap(ntc.getTimeType().name().getBytes(UTF_8))); + // Send info relating to initial table creation i.e, create online or offline + args.add(ByteBuffer.wrap(ntc.getInitialTableState().name().getBytes(UTF_8))); + // Check for possible initial splits to be added at table creation + // Always send number of initial splits to be created, even if zero. If greater than zero, + // add the splits to the argument List which will be used by the FATE operations. + int numSplits = ntc.getSplits().size(); + args.add(ByteBuffer.wrap(String.valueOf(numSplits).getBytes(UTF_8))); + if (numSplits > 0) { + for (Text t : ntc.getSplits()) { + args.add(TextUtil.getByteBuffer(t)); + } + } Map opts = ntc.getProperties(); diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java index 23b8f01..0c3157f 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java @@ -810,7 +810,7 @@ public enum Property { + " summarizer configured should have a unique id, this id can be anything." + " To add a summarizer set " + "`table.summarizer.=.` If the summarizer has options" - + ", then for each option set" + " `table.summarizer..opt.=`."), + + ", then for each option set `table.summarizer..opt.=`."), @Experimental TABLE_DELETE_BEHAVIOR("table.delete.behavior", DeletingIterator.Behavior.PROCESS.name().toLowerCase(), PropertyType.STRING, diff --git a/core/src/test/java/org/apache/accumulo/core/client/admin/NewTableConfigurationTest.java b/core/src/test/java/org/apache/accumulo/core/client/admin/NewTableConfigurationTest.java new file mode 100644 index 0000000..738e335 --- /dev/null +++ b/core/src/test/java/org/apache/accumulo/core/client/admin/NewTableConfigurationTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.core.client.admin; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Collection; +import java.util.Iterator; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.io.Text; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class NewTableConfigurationTest { + + private static final Logger log = LoggerFactory.getLogger(NewTableConfigurationTest.class); + + private SortedSet splits; + + @Before + public void populateSplits() { + splits = new TreeSet(); + splits.add(new Text("ccccc")); + splits.add(new Text("aaaaa")); + splits.add(new Text("ddddd")); + splits.add(new Text("abcde")); + splits.add(new Text("bbbbb")); + } + + /** + * Verify the withSplits/getSplits methods do as expected. + * + * The withSplits() takes a SortedSet as its input. Verify that the set orders the data even if + * input non-ordered. + * + * The getSplits should return a SortedSet. Test verifies set performs ordering and the input set + * and output set are equal. + */ + @Test + public void testWithAndGetSplits() { + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits); + Collection ntcSplits = ntc.getSplits(); + Iterator splitIt = splits.iterator(); + Iterator ntcIt = ntcSplits.iterator(); + while (splitIt.hasNext() && ntcIt.hasNext()) { + assertEquals(splitIt.next(), ntcIt.next()); + } + // verify splits is in sorted order + Iterator it = splits.iterator(); + Text current = new Text(""); + while (it.hasNext()) { + Text nxt = it.next(); + assertTrue(current.toString().compareTo(nxt.toString()) < 0); + current = nxt; + } + // verify ntcSplits is in sorted order + Iterator it2 = ntcSplits.iterator(); + current = new Text(""); + while (it2.hasNext()) { + Text nxt = it2.next(); + assertTrue(current.toString().compareTo(nxt.toString()) < 0); + current = nxt; + } + + NewTableConfiguration ntc2 = new NewTableConfiguration(); + Collection splits = ntc2.getSplits(); + assertTrue(splits.isEmpty()); + + } + + /** + * Verify that createOffline option + */ + @Test + public void testCreateOffline() { + NewTableConfiguration ntcOffline = new NewTableConfiguration().createOffline(); + assertTrue(ntcOffline.getInitialTableState() == InitialTableState.OFFLINE); + NewTableConfiguration ntcOnline = new NewTableConfiguration(); + assertTrue(ntcOnline.getInitialTableState() == InitialTableState.ONLINE); + } +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java index cda934c..47dcd0b 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java +++ b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java @@ -22,7 +22,9 @@ import static org.apache.accumulo.master.util.TableValidators.NOT_SYSTEM; import static org.apache.accumulo.master.util.TableValidators.VALID_ID; import static org.apache.accumulo.master.util.TableValidators.VALID_NAME; +import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -35,6 +37,7 @@ import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.NamespaceNotFoundException; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.client.admin.CompactionStrategyConfig; +import org.apache.accumulo.core.client.admin.InitialTableState; import org.apache.accumulo.core.client.admin.TimeType; import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil; import org.apache.accumulo.core.client.impl.Namespace; @@ -55,6 +58,7 @@ import org.apache.accumulo.core.security.thrift.TCredentials; import org.apache.accumulo.core.trace.thrift.TInfo; import org.apache.accumulo.core.util.ByteBufferUtil; import org.apache.accumulo.core.util.Validator; +import org.apache.accumulo.core.volume.Volume; import org.apache.accumulo.fate.ReadOnlyTStore.TStatus; import org.apache.accumulo.master.tableOps.CancelCompactions; import org.apache.accumulo.master.tableOps.ChangeTableState; @@ -74,6 +78,9 @@ import org.apache.accumulo.master.tableOps.bulkVer2.PrepBulkImport; import org.apache.accumulo.server.client.ClientServiceHandler; import org.apache.accumulo.server.master.state.MergeInfo; import org.apache.accumulo.server.util.TablePropUtil; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.slf4j.Logger; @@ -145,7 +152,22 @@ class FateServiceHandler implements FateService.Iface { TableOperation tableOp = TableOperation.CREATE; String tableName = validateTableNameArgument(arguments.get(0), tableOp, NOT_SYSTEM); TimeType timeType = TimeType.valueOf(ByteBufferUtil.toString(arguments.get(1))); - + InitialTableState initialTableState = InitialTableState + .valueOf(ByteBufferUtil.toString(arguments.get(2))); + int splitCount = Integer.parseInt(ByteBufferUtil.toString(arguments.get(3))); + String splitFile = null; + String splitDirsFile = null; + if (splitCount > 0) { + int SPLIT_OFFSET = 4; // offset where split data begins in arguments list + try { + splitFile = writeSplitsToFile(opid, arguments, splitCount, SPLIT_OFFSET); + splitDirsFile = createSplitDirsFile(opid); + } catch (IOException e) { + throw new ThriftTableOperationException(null, tableName, tableOp, + TableOperationExceptionType.OTHER, + "Exception thrown while writing splits to file system"); + } + } Namespace.ID namespaceId; try { @@ -160,8 +182,8 @@ class FateServiceHandler implements FateService.Iface { throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED); master.fate.seedTransaction(opid, - new TraceRepo<>( - new CreateTable(c.getPrincipal(), tableName, timeType, options, namespaceId)), + new TraceRepo<>(new CreateTable(c.getPrincipal(), tableName, timeType, options, + splitFile, splitCount, splitDirsFile, initialTableState, namespaceId)), autoCleanup); break; @@ -674,4 +696,69 @@ class FateServiceHandler implements FateService.Iface { TableOperationExceptionType.INVALID_NAME, why); } } + + /** + * Create a file on the file system to hold the splits to be created at table creation. + */ + private String writeSplitsToFile(final long opid, final List arguments, + final int splitCount, final int splitOffset) throws IOException { + String opidStr = String.format("%016x", opid); + String splitsPath = getSplitPath("/tmp/splits-" + opidStr); + removeAndCreateTempFile(splitsPath); + try (FSDataOutputStream stream = master.getOutputStream(splitsPath)) { + writeSplitsToFileSystem(stream, arguments, splitCount, splitOffset); + } catch (IOException e) { + log.error("Error in FateServiceHandler while writing splits for opid: " + opidStr + ": " + + e.getMessage()); + throw e; + } + return splitsPath; + } + + /** + * Always check for and delete the splits file if it exists to prevent issues in case of server + * failure and/or FateServiceHandler retries. + */ + private void removeAndCreateTempFile(String path) throws IOException { + FileSystem fs = master.getFileSystem().getDefaultVolume().getFileSystem(); + if (fs.exists(new Path(path))) + fs.delete(new Path(path), true); + fs.create(new Path(path)); + } + + /** + * Check for and delete the temp file if it exists to prevent issues in case of server failure + * and/or FateServiceHandler retries. Then create/recreate the file. + */ + private String createSplitDirsFile(final long opid) throws IOException { + String opidStr = String.format("%016x", opid); + String splitDirPath = getSplitPath("/tmp/splitDirs-" + opidStr); + removeAndCreateTempFile(splitDirPath); + return splitDirPath; + } + + /** + * Write the split values to a tmp directory with unique name. Given that it is not known if the + * supplied splits will be textual or binary, all splits will be encoded to enable proper handling + * of binary data. + */ + private void writeSplitsToFileSystem(final FSDataOutputStream stream, + final List arguments, final int splitCount, final int splitOffset) + throws IOException { + for (int i = splitOffset; i < splitCount + splitOffset; i++) { + byte[] splitBytes = ByteBufferUtil.toBytes(arguments.get(i)); + String encodedSplit = Base64.getEncoder().encodeToString(splitBytes); + stream.writeBytes(encodedSplit + '\n'); + } + } + + /** + * Get full path to location where initial splits are stored on file system. + */ + private String getSplitPath(String relPath) { + Volume defaultVolume = master.getFileSystem().getDefaultVolume(); + String uri = defaultVolume.getFileSystem().getUri().toString(); + String basePath = defaultVolume.getBasePath(); + return uri + basePath + relPath; + } } diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java index 043e498..d422a31 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/Master.java +++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java @@ -148,6 +148,9 @@ import org.apache.accumulo.server.zookeeper.ZooLock; import org.apache.accumulo.server.zookeeper.ZooReaderWriter; import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader; import org.apache.accumulo.start.classloader.vfs.ContextManager; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; @@ -167,7 +170,7 @@ import com.google.common.collect.Iterables; /** * The Master is responsible for assigning and balancing tablets to tablet servers. - * + *

* The master will also coordinate log recoveries and reports general status. */ public class Master @@ -1813,4 +1816,15 @@ public class Master public boolean isActiveService() { return masterInitialized.get(); } + + public FSDataOutputStream getOutputStream(final String path) throws IOException { + FileSystem fileSystem = fs.getDefaultVolume().getFileSystem(); + return fileSystem.create(new Path(path)); + } + + public FSDataInputStream getInputStream(final String path) throws IOException { + FileSystem fileSystem = fs.getDefaultVolume().getFileSystem(); + return fileSystem.open(new Path(path)); + } + } diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java index bd5be4b..5d0985d 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java @@ -16,17 +16,26 @@ */ package org.apache.accumulo.master.tableOps; +import java.io.IOException; +import java.util.SortedSet; +import java.util.TreeSet; + import org.apache.accumulo.core.Constants; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; import org.apache.accumulo.server.ServerConstants; import org.apache.accumulo.server.fs.VolumeChooserEnvironment; +import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.accumulo.server.tablets.UniqueNameAllocator; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; class ChooseDir extends MasterRepo { private static final long serialVersionUID = 1L; - private TableInfo tableInfo; + private final TableInfo tableInfo; ChooseDir(TableInfo ti) { this.tableInfo = ti; @@ -41,6 +50,16 @@ class ChooseDir extends MasterRepo { public Repo call(long tid, Master master) throws Exception { // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add // one here + + VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableInfo.tableId); + + String baseDir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.tableId; + tableInfo.defaultTabletDir = baseDir + Constants.DEFAULT_TABLET_LOCATION; + + if (tableInfo.initialSplitSize > 0) { + createTableDirectoriesInfo(master, baseDir); + } VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableInfo.tableId, master.getContext()); tableInfo.dir = master.getFileSystem().choose(chooserEnv, @@ -51,6 +70,49 @@ class ChooseDir extends MasterRepo { @Override public void undo(long tid, Master master) throws Exception { + VolumeManager fs = master.getFileSystem(); + fs.deleteRecursively(new Path(tableInfo.splitDirsFile)); + } + /** + * Create unique table directory names that will be associated with split values. Then write these + * to the file system for later use during this FATE operation. + */ + private void createTableDirectoriesInfo(Master master, String baseDir) throws IOException { + SortedSet splits = Utils.getSortedSetFromFile(master.getInputStream(tableInfo.splitFile), + true); + SortedSet tabletDirectoryInfo = createTabletDirectoriesSet(splits.size(), baseDir); + writeTabletDirectoriesToFileSystem(master, tabletDirectoryInfo); } + + /** + * Create a set of unique table directories. These will be associated with splits in a follow-on + * FATE step. + */ + private SortedSet createTabletDirectoriesSet(int num, String baseDir) { + String tabletDir; + UniqueNameAllocator namer = UniqueNameAllocator.getInstance(); + SortedSet splitDirs = new TreeSet<>(); + for (int i = 0; i < num; i++) { + tabletDir = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName(); + splitDirs.add(new Text(baseDir + "/" + new Path(tabletDir).getName())); + } + return splitDirs; + } + + /** + * Write the SortedSet of Tablet Directory names to the file system for use in the next phase of + * the FATE operation. + */ + private void writeTabletDirectoriesToFileSystem(Master master, SortedSet dirs) + throws IOException { + FileSystem fs = master.getFileSystem().getDefaultVolume().getFileSystem(); + if (fs.exists(new Path(tableInfo.splitDirsFile))) + fs.delete(new Path(tableInfo.splitDirsFile), true); + try (FSDataOutputStream stream = master.getOutputStream(tableInfo.splitDirsFile)) { + for (Text dir : dirs) + stream.writeBytes(dir + "\n"); + } + } + } diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java index 695f9be..f3d1a41 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java @@ -16,15 +16,19 @@ */ package org.apache.accumulo.master.tableOps; +import java.io.IOException; +import java.util.SortedSet; + import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; class CreateDir extends MasterRepo { private static final long serialVersionUID = 1L; - private TableInfo tableInfo; + private final TableInfo tableInfo; CreateDir(TableInfo ti) { this.tableInfo = ti; @@ -38,14 +42,37 @@ class CreateDir extends MasterRepo { @Override public Repo call(long tid, Master master) throws Exception { VolumeManager fs = master.getFileSystem(); - fs.mkdirs(new Path(tableInfo.dir)); + fs.mkdirs(new Path(tableInfo.defaultTabletDir)); + + // read in the splitDir info file and create a directory for each item + if (tableInfo.initialSplitSize > 0) { + SortedSet dirInfo = Utils + .getSortedSetFromFile(master.getInputStream(tableInfo.splitDirsFile), false); + createTabletDirectories(master.getFileSystem(), dirInfo); + } return new PopulateMetadata(tableInfo); } @Override public void undo(long tid, Master master) throws Exception { VolumeManager fs = master.getFileSystem(); - fs.deleteRecursively(new Path(tableInfo.dir)); + fs.deleteRecursively(new Path(tableInfo.defaultTabletDir)); + + if (tableInfo.initialSplitSize > 0) { + SortedSet dirInfo = Utils + .getSortedSetFromFile(master.getInputStream(tableInfo.splitDirsFile), false); + for (Text dirname : dirInfo) { + fs.deleteRecursively(new Path(dirname.toString())); + } + } + } + + private void createTabletDirectories(VolumeManager fs, SortedSet dirInfo) + throws IOException { + for (Text dir : dirInfo) { + if (!fs.mkdirs(new Path(dir.toString()))) + throw new IOException("Failed to create tablet directory: " + dir); + } } } diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java index 65aef99..baea523 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java @@ -18,6 +18,7 @@ package org.apache.accumulo.master.tableOps; import java.util.Map; +import org.apache.accumulo.core.client.admin.InitialTableState; import org.apache.accumulo.core.client.admin.TimeType; import org.apache.accumulo.core.client.impl.Namespace; import org.apache.accumulo.core.client.impl.Table; @@ -32,6 +33,7 @@ public class CreateTable extends MasterRepo { private TableInfo tableInfo; public CreateTable(String user, String tableName, TimeType timeType, Map props, + String splitFile, int splitCount, String splitDirsFile, InitialTableState initialTableState, Namespace.ID namespaceId) { tableInfo = new TableInfo(); tableInfo.tableName = tableName; @@ -39,6 +41,10 @@ public class CreateTable extends MasterRepo { tableInfo.user = user; tableInfo.props = props; tableInfo.namespaceId = namespaceId; + tableInfo.splitFile = splitFile; + tableInfo.initialSplitSize = splitCount; + tableInfo.initialTableState = initialTableState; + tableInfo.splitDirsFile = splitDirsFile; } @Override @@ -63,7 +69,6 @@ public class CreateTable extends MasterRepo { } finally { Utils.idLock.unlock(); } - } @Override diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java index a80d06e..6512521 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java @@ -16,16 +16,22 @@ */ package org.apache.accumulo.master.tableOps; +import java.io.IOException; + +import org.apache.accumulo.core.client.admin.InitialTableState; import org.apache.accumulo.core.master.state.tables.TableState; +import org.apache.accumulo.core.volume.Volume; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; -import org.slf4j.LoggerFactory; +import org.apache.accumulo.server.tables.TableManager; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; class FinishCreateTable extends MasterRepo { private static final long serialVersionUID = 1L; - private TableInfo tableInfo; + private final TableInfo tableInfo; public FinishCreateTable(TableInfo ti) { this.tableInfo = ti; @@ -38,19 +44,31 @@ class FinishCreateTable extends MasterRepo { @Override public Repo call(long tid, Master env) throws Exception { - env.getTableManager().transitionTableState(tableInfo.tableId, TableState.ONLINE); + + if (tableInfo.initialTableState == InitialTableState.OFFLINE) { + TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.OFFLINE); + } else { + TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE); + } Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false); Utils.unreserveTable(env, tableInfo.tableId, tid, true); env.getEventCoordinator().event("Created table %s ", tableInfo.tableName); - LoggerFactory.getLogger(FinishCreateTable.class) - .debug("Created table " + tableInfo.tableId + " " + tableInfo.tableName); - + if (tableInfo.initialSplitSize > 0) { + cleanupSplitFiles(env); + } return null; } + private void cleanupSplitFiles(Master env) throws IOException { + Volume defaultVolume = env.getFileSystem().getDefaultVolume(); + FileSystem fs = defaultVolume.getFileSystem(); + fs.delete(new Path(tableInfo.splitFile), true); + fs.delete(new Path(tableInfo.splitDirsFile), true); + } + @Override public String getReturn() { return tableInfo.tableId.canonicalID(); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java index 1049770..cb51e9d 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java @@ -16,16 +16,33 @@ */ package org.apache.accumulo.master.tableOps; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedSet; + +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.MutationsRejectedException; +import org.apache.accumulo.core.client.impl.Table; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.data.impl.KeyExtent; +import org.apache.accumulo.core.metadata.schema.MetadataSchema; import org.apache.accumulo.fate.Repo; import org.apache.accumulo.master.Master; import org.apache.accumulo.server.util.MetadataTableUtil; +import org.apache.accumulo.server.zookeeper.ZooLock; +import org.apache.hadoop.io.Text; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Iterables; class PopulateMetadata extends MasterRepo { private static final long serialVersionUID = 1L; - private TableInfo tableInfo; + private final TableInfo tableInfo; PopulateMetadata(TableInfo ti) { this.tableInfo = ti; @@ -39,11 +56,40 @@ class PopulateMetadata extends MasterRepo { @Override public Repo call(long tid, Master environment) throws Exception { KeyExtent extent = new KeyExtent(tableInfo.tableId, null, null); - MetadataTableUtil.addTablet(extent, tableInfo.dir, environment.getContext(), tableInfo.timeType, + MetadataTableUtil.addTablet(extent, tableInfo.defaultTabletDir, environment.getContext(), + tableInfo.timeType, environment.getMasterLock()); + if (tableInfo.initialSplitSize > 0) { + SortedSet splits = Utils + .getSortedSetFromFile(environment.getInputStream(tableInfo.splitFile), true); + SortedSet dirs = Utils + .getSortedSetFromFile(environment.getInputStream(tableInfo.splitDirsFile), false); + Map splitDirMap = createSplitDirectoryMap(splits, dirs); + try (BatchWriter bw = environment.getConnector().createBatchWriter("accumulo.metadata")) { + writeSplitsToMetadataTable(tableInfo.tableId, splits, splitDirMap, tableInfo.timeType, + environment.getMasterLock(), bw); + } + } return new FinishCreateTable(tableInfo); + } + private void writeSplitsToMetadataTable(Table.ID tableId, SortedSet splits, + Map data, char timeType, ZooLock lock, BatchWriter bw) + throws MutationsRejectedException { + Text prevSplit = null; + Value dirValue; + for (Text split : Iterables.concat(splits, Collections.singleton(null))) { + Mutation mut = new KeyExtent(tableId, split, prevSplit).getPrevRowUpdateMutation(); + dirValue = (split == null) ? new Value(tableInfo.defaultTabletDir) + : new Value(data.get(split)); + MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, dirValue); + MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, + new Value(timeType + "0")); + MetadataTableUtil.putLockID(lock, mut); + prevSplit = split; + bw.addMutation(mut); + } } @Override @@ -52,4 +98,17 @@ class PopulateMetadata extends MasterRepo { environment.getMasterLock()); } + /** + * Create a map containing an association between each split directory and a split value. + */ + private Map createSplitDirectoryMap(SortedSet splits, SortedSet dirs) { + Preconditions.checkArgument(splits.size() == dirs.size()); + Map data = new HashMap<>(); + Iterator s = splits.iterator(); + Iterator d = dirs.iterator(); + while (s.hasNext() && d.hasNext()) { + data.put(s.next(), d.next()); + } + return data; + } } diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java index 8854e4f..6a92820 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java @@ -19,6 +19,7 @@ package org.apache.accumulo.master.tableOps; import java.io.Serializable; import java.util.Map; +import org.apache.accumulo.core.client.admin.InitialTableState; import org.apache.accumulo.core.client.impl.Namespace; import org.apache.accumulo.core.client.impl.Table; @@ -32,7 +33,15 @@ class TableInfo implements Serializable { char timeType; String user; + // Record requested initial state at creation + InitialTableState initialTableState; + + // Track information related to initial split creation + int initialSplitSize; + String splitFile; + String splitDirsFile; + public Map props; - public String dir = null; + public String defaultTabletDir = null; } diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java index b8ebaed..bbf801a 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java @@ -18,8 +18,13 @@ package org.apache.accumulo.master.tableOps; import static java.nio.charset.StandardCharsets.UTF_8; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; import java.math.BigInteger; import java.util.Base64; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; @@ -40,6 +45,9 @@ import org.apache.accumulo.fate.zookeeper.ZooReservation; import org.apache.accumulo.master.Master; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.zookeeper.ZooQueueLock; +import org.apache.accumulo.server.zookeeper.ZooReaderWriter; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.io.Text; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -177,4 +185,24 @@ public class Utils { throw new AcceptableThriftTableOperationException(null, namespace, operation, TableOperationExceptionType.NAMESPACE_EXISTS, null); } + + /** + * Given an input stream and a flag indicating if the file info is base64 encoded or not, retrieve + * the data from a file on the file system. It is assumed that the file is textual and not binary + * data. + */ + static SortedSet getSortedSetFromFile(FSDataInputStream inputStream, boolean encoded) + throws IOException { + SortedSet data = new TreeSet<>(); + try (BufferedReader br = new BufferedReader(new InputStreamReader(inputStream))) { + String line; + while ((line = br.readLine()) != null) { + if (encoded) + data.add(new Text(Base64.getDecoder().decode(line))); + else + data.add(new Text(line)); + } + } + return data; + } } diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java index 836ffbd..4ccead4 100644 --- a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java +++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import java.util.SortedSet; import java.util.TreeSet; import org.apache.accumulo.core.client.AccumuloException; @@ -63,11 +62,12 @@ public class CreateTableCommand extends Command { private Option createTableOptInitProp; private Option createTableOptLocalityProps; private Option createTableOptIteratorProps; + private Option createTableOptOffline; @Override public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableExistsException, - TableNotFoundException, IOException, ClassNotFoundException { + TableNotFoundException, IOException { final String testTableName = cl.getArgs()[0]; NewTableConfiguration ntc = new NewTableConfiguration(); @@ -82,18 +82,22 @@ public class CreateTableCommand extends Command { if (shellState.getConnector().tableOperations().exists(tableName)) { throw new TableExistsException(null, tableName, null); } - final SortedSet partitions = new TreeSet<>(); + final boolean decode = cl.hasOption(base64Opt.getOpt()); + // Prior to 2.0, if splits were provided at table creation the table was created separately + // and then the addSplits method was called. Starting with 2.0, the splits will be + // stored on the file system and created before the table is brought online. if (cl.hasOption(createTableOptSplit.getOpt())) { - partitions - .addAll(ShellUtil.scanFile(cl.getOptionValue(createTableOptSplit.getOpt()), decode)); + ntc = ntc.withSplits(new TreeSet<>( + ShellUtil.scanFile(cl.getOptionValue(createTableOptSplit.getOpt()), decode))); } else if (cl.hasOption(createTableOptCopySplits.getOpt())) { final String oldTable = cl.getOptionValue(createTableOptCopySplits.getOpt()); if (!shellState.getConnector().tableOperations().exists(oldTable)) { throw new TableNotFoundException(null, oldTable, null); } - partitions.addAll(shellState.getConnector().tableOperations().listSplits(oldTable)); + ntc = ntc.withSplits( + new TreeSet<>(shellState.getConnector().tableOperations().listSplits(oldTable))); } if (cl.hasOption(createTableOptCopyConfig.getOpt())) { @@ -120,12 +124,14 @@ public class CreateTableCommand extends Command { ntc = setLocalityForNewTable(cl, ntc); } - // create table + // set offline table creation property + if (cl.hasOption(createTableOptOffline.getOpt())) { + ntc = ntc.createOffline(); + } + + // create table. shellState.getConnector().tableOperations().create(tableName, ntc.setTimeType(timeType).setProperties(props)); - if (partitions.size() > 0) { - shellState.getConnector().tableOperations().addSplits(tableName, partitions); - } shellState.setTableName(tableName); // switch shell to new table context @@ -317,6 +323,8 @@ public class CreateTableCommand extends Command { createTableOptIteratorProps.setArgName("profile[:[all]|[scan[,]][minc[,]][majc]]"); createTableOptIteratorProps.setArgs(Option.UNLIMITED_VALUES); + createTableOptOffline = new Option("o", "offline", false, "create table in offline mode"); + // Splits and CopySplits are put in an optionsgroup to make them // mutually exclusive final OptionGroup splitOrCopySplit = new OptionGroup(); @@ -340,6 +348,7 @@ public class CreateTableCommand extends Command { o.addOption(createTableOptInitProp); o.addOption(createTableOptLocalityProps); o.addOption(createTableOptIteratorProps); + o.addOption(createTableOptOffline); return o; } diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java index 805ea53..917edd7 100644 --- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java @@ -16,6 +16,7 @@ */ package org.apache.accumulo.test; +import static java.nio.file.Files.newBufferedReader; import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -24,6 +25,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.BufferedReader; +import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.IOException; @@ -31,9 +33,13 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.PrintWriter; import java.lang.reflect.Constructor; -import java.security.SecureRandom; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; @@ -42,9 +48,14 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Random; import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.Constants; +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.ClientInfo; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.IteratorSetting; @@ -67,6 +78,7 @@ import org.apache.accumulo.core.file.FileOperations; import org.apache.accumulo.core.file.FileSKVWriter; import org.apache.accumulo.core.metadata.MetadataTable; import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.util.TextUtil; import org.apache.accumulo.core.util.format.Formatter; import org.apache.accumulo.core.util.format.FormatterConfig; import org.apache.accumulo.harness.MiniClusterConfigurationCallback; @@ -107,7 +119,7 @@ public class ShellServerIT extends SharedMiniClusterBase { StringBuilder sb = new StringBuilder(); @Override - public void write(int b) throws IOException { + public void write(int b) { sb.append((char) (0xff & b)); } @@ -127,7 +139,7 @@ public class ShellServerIT extends SharedMiniClusterBase { private int offset = 0; @Override - public int read() throws IOException { + public int read() { if (offset == source.length()) return '\n'; else @@ -281,9 +293,11 @@ public class ShellServerIT extends SharedMiniClusterBase { SharedMiniClusterBase.startMiniClusterWithConfig(new ShellServerITConfigCallback()); rootPath = getMiniClusterDir().getAbsolutePath(); + String userDir = System.getProperty("user.dir"); + // history file is updated in $HOME System.setProperty("HOME", rootPath); - System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp"); + System.setProperty("hadoop.tmp.dir", userDir + "/target/hadoop-tmp"); traceProcess = getCluster().exec(TraceServer.class); @@ -2284,4 +2298,472 @@ public class ShellServerIT extends SharedMiniClusterBase { ts.exec("createtable " + table + "-i ", false); ts.exec("deletetable -f " + tmpTable); } + + /** + * Verify that table can be created in offline status and then be brought online. + */ + @Test + public void testCreateTableOffline() throws IOException { + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -o", true); + String output = ts.exec("tables"); + Assert.assertTrue(output.contains(tableName)); + output = ts.exec("scan -t " + tableName, false, "is offline", true); + Assert.assertTrue(output.contains("TableOfflineException")); + ts.exec("table " + tableName, true); + ts.exec("online", true); + ts.exec("scan", true); + ts.exec("deletetable -f " + tableName, true); + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and un-encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithSplitsFile1() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 1000, 12, false, false, true, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, unsorted and un-encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithSplitsFile2() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 300, 12, false, false, false, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithSplitsFile3() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 23, false, true, true, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and un-encoded with a blank line and no repeats. + */ + @Test + public void testCreateTableWithSplitsFile4() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 31, false, false, true, true, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and un-encoded with a blank line and no repeats. + */ + @Test + public void testCreateTableWithSplitsFile5() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 32, false, false, true, false, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, unsorted and un-encoded with a blank line and repeats. + */ + @Test + public void testCreateTableWithSplitsFile6() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 12, false, false, false, true, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with a blank line and repeats. + */ + @Test + public void testCreateTableWithSplitsFile7() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 12, false, false, true, true, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits file will be empty. + */ + @Test(expected = org.apache.accumulo.core.client.TableNotFoundException.class) + public void testCreateTableWithEmptySplitFile() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 0, 0, false, false, false, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, false); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table that used splits from another table. + */ + @Test + public void testCreateTableWithCopySplitsFromOtherTable() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + // create a table and add some splits + final String tableName1 = name.getMethodName() + "_table1"; + ts.exec("createtable " + tableName1, true); + String output = ts.exec("tables", true); + Assert.assertTrue(output.contains(tableName1)); + ts.exec("table " + tableName1, true); + // add splits to this table using the addsplits command. + List splits = new ArrayList<>(); + splits.add(new Text("ccccc")); + splits.add(new Text("fffff")); + splits.add(new Text("mmmmm")); + splits.add(new Text("sssss")); + ts.exec("addsplits " + splits.get(0) + " " + splits.get(1) + " " + splits.get(2) + " " + + splits.get(3), true); + // Now create a table that will used the previous tables splits and create them at table + // creation + final String tableName2 = name.getMethodName() + "_table2"; + ts.exec("createtable " + tableName2 + " --copy-splits " + tableName1, true); + ts.exec("table " + tableName1, true); + String tablesOutput = ts.exec("tables", true); + Assert.assertTrue(tablesOutput.contains(tableName2)); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName2); + assertEquals(new TreeSet<>(splits), new TreeSet<>(createdSplits)); + ts.exec("deletetable -f " + tableName1, true); + ts.exec("deletetable -f " + tableName2, true); + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithBinarySplitsFile1() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 200, 12, true, true, true, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, unsorted and encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithBinarySplitsFile2() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 300, 12, true, true, false, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with no repeats or blank lines. + */ + @Test + public void testCreateTableWithBinarySplitsFile3() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 23, true, true, true, false, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with a blank line and no repeats. + */ + @Test + public void testCreateTableWithBinarySplitsFile4() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 31, true, true, true, true, false); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with a blank line and no repeats. + */ + @Test + public void testCreateTableWithBinarySplitsFile5() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 32, true, true, true, false, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, unsorted and encoded with a blank line and repeats. + */ + @Test + public void testCreateTableWithBinarySplitsFile6() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 12, true, true, false, true, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + /** + * Use shell to create a table with a supplied file containing splits. + * + * The splits will be contained in a file, sorted and encoded with a blank line and repeats. + */ + @Test + public void testCreateTableWithBinarySplitsFile7() + throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException { + String splitsFile = null; + try { + splitsFile = System.getProperty("user.dir") + "/target/splitFile"; + generateSplitsFile(splitsFile, 100, 12, true, true, true, true, true); + SortedSet expectedSplits = readSplitsFromFile(splitsFile, false); + final String tableName = name.getMethodName() + "_table"; + ts.exec("createtable " + tableName + " -sf " + splitsFile, true); + Collection createdSplits = getConnector().tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } finally { + Files.delete(Paths.get(splitsFile)); + } + } + + private SortedSet readSplitsFromFile(final String splitsFile, boolean decode) + throws IOException { + SortedSet splits = new TreeSet<>(); + try (BufferedReader reader = newBufferedReader(Paths.get(splitsFile))) { + String split; + while ((split = reader.readLine()) != null) { + Text unencodedString = decode(split, decode); + if (unencodedString != null) + splits.add(unencodedString); + } + } + return splits; + } + + private void generateSplitsFile(final String splitsFile, final int numItems, final int len, + final boolean binarySplits, final boolean encoded, final boolean sort, + final boolean addBlankLine, final boolean repeat) throws IOException { + + java.nio.file.Path splitsPath = java.nio.file.Paths.get(splitsFile); + int insertAt = (len % 2 == 0) ? len / 2 : (len + 1) / 2; + Collection sortedSplits = null; + Collection randomSplits = null; + + if (binarySplits) + randomSplits = generateBinarySplits(numItems, len); + else + randomSplits = generateNonBinarySplits(numItems, len); + + if (sort) + sortedSplits = new TreeSet<>(randomSplits); + + try (BufferedWriter writer = Files.newBufferedWriter(splitsPath, Charset.forName("UTF-8"))) { + int cnt = 0; + Collection splits; + if (sort) + splits = sortedSplits; + else + splits = randomSplits; + + for (Text text : splits) { + if (addBlankLine && cnt++ == insertAt) + writer.write('\n'); + writer.write(encode(text, encoded) + '\n'); + if (repeat) + writer.write(encode(text, encoded) + '\n'); + } + } + } + + private Collection generateNonBinarySplits(final int numItems, final int len) { + Set splits = new HashSet<>(); + for (int i = 0; i < numItems; i++) { + splits.add(getRandomText(len)); + } + return splits; + } + + private Collection generateBinarySplits(final int numItems, final int len) { + Set splits = new HashSet<>(); + Random rand = new Random(); + for (int i = 0; i < numItems; i++) { + byte[] split = new byte[len]; + rand.nextBytes(split); + splits.add(new Text(split)); + } + return splits; + } + + private Text getRandomText(final int len) { + int desiredLen = len; + if (len > 32) + desiredLen = 32; + return new Text( + String.valueOf(UUID.randomUUID()).replaceAll("-", "").substring(0, desiredLen - 1)); + } + + private static String encode(final Text text, final boolean encode) { + if (StringUtils.isBlank(text.toString())) + return null; + return encode ? Base64.getEncoder().encodeToString(TextUtil.getBytes(text)) : text.toString(); + } + + private Text decode(final String text, final boolean decode) { + if (StringUtils.isBlank(text)) + return null; + return decode ? new Text(Base64.getDecoder().decode(text)) : new Text(text); + } } diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java new file mode 100644 index 0000000..d796873 --- /dev/null +++ b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.test.functional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Base64; +import java.util.Collection; +import java.util.Random; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.UUID; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.TableExistsException; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.NewTableConfiguration; +import org.apache.accumulo.core.util.TextUtil; +import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.minicluster.MemoryUnit; +import org.apache.accumulo.minicluster.ServerType; +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.io.Text; +import org.junit.Before; +import org.junit.Test; + +@SuppressWarnings("SpellCheckingInspection") +public class CreateInitialSplitsIT extends AccumuloClusterHarness { + + private Connector connector; + private String tableName; + + @Override + public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration conf) { + cfg.setMemory(ServerType.TABLET_SERVER, 128 * 4, MemoryUnit.MEGABYTE); + + // use raw local file system + conf.set("fs.file.impl", RawLocalFileSystem.class.getName()); + } + + @Override + protected int defaultTimeoutSeconds() { + return 2 * 60; + } + + @Before + public void setupInitialSplits() { + connector = getConnector(); + } + + /** + * Verify normal table creation did not get broken. + */ + @Test + public void testCreateTableWithNoSplits() + throws TableExistsException, AccumuloSecurityException, AccumuloException { + tableName = getUniqueNames(1)[0]; + connector.tableOperations().create(tableName); + assertTrue(connector.tableOperations().exists(tableName)); + } + + /** + * Create initial splits by providing splits from a Java Collection. + */ + @Test + public void testCreateInitialSplits() throws TableExistsException, AccumuloSecurityException, + AccumuloException, TableNotFoundException { + tableName = getUniqueNames(1)[0]; + SortedSet expectedSplits = generateNonBinarySplits(3000, 32); + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits); + assertFalse(connector.tableOperations().exists(tableName)); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } + + @Test + public void testCreateInitialSplitsWithEncodedSplits() throws TableExistsException, + AccumuloSecurityException, AccumuloException, TableNotFoundException { + tableName = getUniqueNames(1)[0]; + SortedSet expectedSplits = generateNonBinarySplits(3000, 32, true); + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits); + assertFalse(connector.tableOperations().exists(tableName)); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } + + /** + * Test that binary split data is handled property. + */ + @Test + public void testCreateInitialBinarySplits() throws TableExistsException, + AccumuloSecurityException, AccumuloException, TableNotFoundException { + tableName = getUniqueNames(1)[0]; + SortedSet expectedSplits = generateBinarySplits(1000, 16); + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits); + assertFalse(connector.tableOperations().exists(tableName)); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } + + @Test + public void testCreateInitialBinarySplitsWithEncodedSplits() throws TableExistsException, + AccumuloSecurityException, AccumuloException, TableNotFoundException { + tableName = getUniqueNames(1)[0]; + SortedSet expectedSplits = generateBinarySplits(1000, 16, true); + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits); + assertFalse(connector.tableOperations().exists(tableName)); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + } + + /** + * Create splits based upon splits from another table. + */ + @Test + public void testCreateInitialSplitsCopiedFromAnotherTable() throws TableExistsException, + AccumuloSecurityException, AccumuloException, TableNotFoundException { + // create first table with some splits. Do it the older way just for test purposes to verify + // older method was not affected. + tableName = getUniqueNames(1)[0]; + NewTableConfiguration ntc = new NewTableConfiguration(); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + SortedSet splits = new TreeSet<>(); + splits.add(new Text("ccccc")); + splits.add(new Text("mmmmm")); + splits.add(new Text("ttttt")); + connector.tableOperations().addSplits(tableName, splits); + // now create another table using the splits from this table + Collection otherSplits = connector.tableOperations().listSplits(tableName); + assertEquals(splits, new TreeSet<>(otherSplits)); + String tableName2 = getUniqueNames(2)[1]; + NewTableConfiguration ntc2 = new NewTableConfiguration(); + ntc2.withSplits(new TreeSet<>(otherSplits)); + assertFalse(connector.tableOperations().exists(tableName2)); + connector.tableOperations().create(tableName2, ntc); + assertTrue(connector.tableOperations().exists(tableName2)); + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(splits, new TreeSet<>(createdSplits)); + } + + /** + * + * Write some data to multiple tablets Verify data Compact table Verify data Delete table. + */ + @Test + public void testMultipleOperationsFunctionality() throws TableExistsException, + AccumuloSecurityException, AccumuloException, TableNotFoundException { + // Write data to mulitple tablets + tableName = getUniqueNames(1)[0]; + SortedSet expectedSplits = generateNonBinarySplits(1000, 32); + NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits); + assertFalse(connector.tableOperations().exists(tableName)); + connector.tableOperations().create(tableName, ntc); + assertTrue(connector.tableOperations().exists(tableName)); + // verify data + Collection createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + connector.tableOperations().flush(tableName); + // compact data + connector.tableOperations().compact(tableName, null, null, true, true); + // verify data + createdSplits = connector.tableOperations().listSplits(tableName); + assertEquals(expectedSplits, new TreeSet<>(createdSplits)); + // delete table + connector.tableOperations().delete(tableName); + assertFalse(connector.tableOperations().exists(tableName)); + } + + // @Test + // public void testCreateSplitsViaShellCommands() { + // // See ShellServerIT for IT tests using shell commands. + // } + + private SortedSet generateNonBinarySplits(final int numItems, final int len) { + return generateNonBinarySplits(numItems, len, false); + } + + private SortedSet generateNonBinarySplits(final int numItems, final int len, + final boolean useB64) { + SortedSet splits = new TreeSet<>(); + for (int i = 0; i < numItems; i++) { + splits.add(encode(getRandomText(len), useB64)); + } + return splits; + } + + private SortedSet generateBinarySplits(final int numItems, final int len) { + return generateBinarySplits(numItems, len, false); + } + + private SortedSet generateBinarySplits(final int numItems, final int len, + final boolean useB64) { + SortedSet splits = new TreeSet<>(); + Random rand = new Random(); + for (int i = 0; i < numItems; i++) { + byte[] split = new byte[len]; + rand.nextBytes(split); + splits.add(encode(new Text(split), useB64)); + } + return splits; + } + + private Text encode(final Text text, final boolean encode) { + if (text == null) { + return null; + } + return encode ? new Text(Base64.getEncoder().encodeToString(TextUtil.getBytes(text))) : text; + } + + private Text getRandomText(final int len) { + int desiredLen = len; + if (len > 32) + desiredLen = 32; + return new Text( + String.valueOf(UUID.randomUUID()).replaceAll("-", "").substring(0, desiredLen - 1)); + } +}