accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [8/8] accumulo git commit: Merge branch '1.7'
Date Tue, 05 Jan 2016 22:32:38 GMT
Merge branch '1.7'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7bbf7ade
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7bbf7ade
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7bbf7ade

Branch: refs/heads/master
Commit: 7bbf7ade1bf2947ad866b56c9b9cd0dd48619caa
Parents: dee8d7a 0e9f208
Author: Christopher Tubbs <ctubbsii@apache.org>
Authored: Tue Jan 5 17:13:36 2016 -0500
Committer: Christopher Tubbs <ctubbsii@apache.org>
Committed: Tue Jan 5 17:13:36 2016 -0500

----------------------------------------------------------------------
 .../accumulo/core/file/blockfile/impl/CachableBlockFile.java     | 4 ++--
 .../accumulo/core/iterators/system/SynchronizedIterator.java     | 2 +-
 pom.xml                                                          | 4 ++--
 .../org/apache/accumulo/shell/commands/NamespacesCommand.java    | 2 +-
 .../java/org/apache/accumulo/shell/commands/TablesCommand.java   | 2 +-
 .../org/apache/accumulo/test/ArbitraryTablePropertiesIT.java     | 2 +-
 .../apache/accumulo/test/CreateTableWithNewTableConfigIT.java    | 2 +-
 test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java | 2 +-
 8 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bbf7ade/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bbf7ade/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bbf7ade/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
index 213ab59,0000000..44124e4
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
@@@ -1,198 -1,0 +1,198 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.cluster.ClusterUser;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.harness.SharedMiniClusterBase;
 +import org.junit.Assert;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
 +  private static final Logger log = LoggerFactory.getLogger(ArbitraryTablePropertiesIT.class);
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 30;
-   };
++  }
 +
 +  // Test set, get, and remove arbitrary table properties on the root account
 +  @Test
 +  public void setGetRemoveTablePropertyRoot() throws Exception {
 +    log.debug("Starting setGetRemoveTablePropertyRoot test ------------------------");
 +
 +    // make a table
 +    final String tableName = getUniqueNames(1)[0];
 +    final Connector conn = getConnector();
 +    conn.tableOperations().create(tableName);
 +
 +    // Set variables for the property name to use and the initial value
 +    String propertyName = "table.custom.description";
 +    String description1 = "Description";
 +
 +    // Make sure the property name is valid
 +    Assert.assertTrue(Property.isValidPropertyKey(propertyName));
 +    // Set the property to the desired value
 +    conn.tableOperations().setProperty(tableName, propertyName, description1);
 +
 +    // Loop through properties to make sure the new property is added to the list
 +    int count = 0;
 +    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 1);
 +
 +    // Set the property as something different
 +    String description2 = "set second";
 +    conn.tableOperations().setProperty(tableName, propertyName, description2);
 +
 +    // / Loop through properties to make sure the new property is added to the list
 +    count = 0;
 +    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 1);
 +
 +    // Remove the property and make sure there is no longer a value associated with it
 +    conn.tableOperations().removeProperty(tableName, propertyName);
 +
 +    // / Loop through properties to make sure the new property is added to the list
 +    count = 0;
 +    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 0);
 +  }
 +
 +  // Tests set, get, and remove of user added arbitrary properties using a non-root account
with permissions to alter tables
 +  @Test
 +  public void userSetGetRemoveTablePropertyWithPermission() throws Exception {
 +    log.debug("Starting userSetGetRemoveTablePropertyWithPermission test ------------------------");
 +
 +    // Make a test username and password
 +    ClusterUser user = getUser(0);
 +    String testUser = user.getPrincipal();
 +    AuthenticationToken testToken = user.getToken();
 +
 +    // Create a root user and create the table
 +    // Create a test user and grant that user permission to alter the table
 +    final String tableName = getUniqueNames(1)[0];
 +    final Connector c = getConnector();
 +    c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken
? (PasswordToken) testToken : null));
 +    c.tableOperations().create(tableName);
 +    c.securityOperations().grantTablePermission(testUser, tableName, TablePermission.ALTER_TABLE);
 +
 +    // Set variables for the property name to use and the initial value
 +    String propertyName = "table.custom.description";
 +    String description1 = "Description";
 +
 +    // Make sure the property name is valid
 +    Assert.assertTrue(Property.isValidPropertyKey(propertyName));
 +
 +    // Getting a fresh token will ensure we're logged in as this user (if necessary)
 +    Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
 +    // Set the property to the desired value
 +    testConn.tableOperations().setProperty(tableName, propertyName, description1);
 +
 +    // Loop through properties to make sure the new property is added to the list
 +    int count = 0;
 +    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 1);
 +
 +    // Set the property as something different
 +    String description2 = "set second";
 +    testConn.tableOperations().setProperty(tableName, propertyName, description2);
 +
 +    // / Loop through properties to make sure the new property is added to the list
 +    count = 0;
 +    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 1);
 +
 +    // Remove the property and make sure there is no longer a value associated with it
 +    testConn.tableOperations().removeProperty(tableName, propertyName);
 +
 +    // / Loop through properties to make sure the new property is added to the list
 +    count = 0;
 +    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 0);
 +
 +  }
 +
 +  // Tests set and get of user added arbitrary properties using a non-root account without
permissions to alter tables
 +  @Test
 +  public void userSetGetTablePropertyWithoutPermission() throws Exception {
 +    log.debug("Starting userSetGetTablePropertyWithoutPermission test ------------------------");
 +
 +    // Make a test username and password
 +    ClusterUser user = getUser(1);
 +    String testUser = user.getPrincipal();
 +    AuthenticationToken testToken = user.getToken();
 +
 +    // Create a root user and create the table
 +    // Create a test user and grant that user permission to alter the table
 +    final String tableName = getUniqueNames(1)[0];
 +    final Connector c = getConnector();
 +    c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken
? (PasswordToken) testToken : null));
 +    c.tableOperations().create(tableName);
 +
 +    // Set variables for the property name to use and the initial value
 +    String propertyName = "table.custom.description";
 +    String description1 = "Description";
 +
 +    // Make sure the property name is valid
 +    Assert.assertTrue(Property.isValidPropertyKey(propertyName));
 +
 +    // Getting a fresh token will ensure we're logged in as this user (if necessary)
 +    Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
 +
 +    // Try to set the property to the desired value.
 +    // If able to set it, the test fails, since permission was never granted
 +    try {
 +      testConn.tableOperations().setProperty(tableName, propertyName, description1);
 +      Assert.fail("Was able to set property without permissions");
 +    } catch (AccumuloSecurityException e) {}
 +
 +    // Loop through properties to make sure the new property is not added to the list
 +    int count = 0;
 +    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName))
{
 +      if (property.getKey().equals(propertyName))
 +        count++;
 +    }
 +    Assert.assertEquals(count, 0);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bbf7ade/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
index b80bcb7,0000000..1aa27d5
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
@@@ -1,193 -1,0 +1,193 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 +import org.apache.accumulo.core.client.admin.TimeType;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.SharedMiniClusterBase;
 +import org.junit.Assert;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.collect.Iterators;
 +
 +/**
 + *
 + */
 +public class CreateTableWithNewTableConfigIT extends SharedMiniClusterBase {
 +  static private final Logger log = LoggerFactory.getLogger(CreateTableWithNewTableConfigIT.class);
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 30;
-   };
++  }
 +
 +  public int numProperties(Connector connector, String tableName) throws AccumuloException,
TableNotFoundException {
 +    return Iterators.size(connector.tableOperations().getProperties(tableName).iterator());
 +  }
 +
 +  public int compareProperties(Connector connector, String tableNameOrig, String tableName,
String changedProp) throws AccumuloException,
 +      TableNotFoundException {
 +    boolean inNew = false;
 +    int countOrig = 0;
 +    for (Entry<String,String> orig : connector.tableOperations().getProperties(tableNameOrig))
{
 +      countOrig++;
 +      for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName))
{
 +        if (entry.equals(orig)) {
 +          inNew = true;
 +          break;
 +        } else if (entry.getKey().equals(orig.getKey()) && !entry.getKey().equals(changedProp))
 +          Assert.fail("Property " + orig.getKey() + " has different value than deprecated
method");
 +      }
 +      if (!inNew)
 +        Assert.fail("Original property missing after using the new create method");
 +    }
 +    return countOrig;
 +  }
 +
 +  public boolean checkTimeType(Connector connector, String tableName, TimeType expectedTimeType)
throws TableNotFoundException {
 +    final Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName) + "<";
 +    for (Entry<Key,Value> entry : scanner) {
 +      Key k = entry.getKey();
 +
 +      if (k.getRow().toString().equals(tableID) && k.getColumnQualifier().toString().equals(ServerColumnFamily.TIME_COLUMN.getColumnQualifier().toString()))
{
 +        if (expectedTimeType == TimeType.MILLIS && entry.getValue().toString().charAt(0)
== 'M')
 +          return true;
 +        if (expectedTimeType == TimeType.LOGICAL && entry.getValue().toString().charAt(0)
== 'L')
 +          return true;
 +      }
 +    }
 +    return false;
 +  }
 +
 +  @SuppressWarnings("deprecation")
 +  @Test
 +  public void tableNameOnly() throws Exception {
 +    log.info("Starting tableNameOnly");
 +
 +    // Create a table with the initial properties
 +    Connector connector = getConnector();
 +    String tableName = getUniqueNames(2)[0];
 +    connector.tableOperations().create(tableName, new NewTableConfiguration());
 +
 +    String tableNameOrig = "original";
 +    connector.tableOperations().create(tableNameOrig, true);
 +
 +    int countNew = numProperties(connector, tableName);
 +    int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
 +
 +    Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
 +    Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
 +  }
 +
 +  @SuppressWarnings("deprecation")
 +  @Test
 +  public void tableNameAndLimitVersion() throws Exception {
 +    log.info("Starting tableNameAndLimitVersion");
 +
 +    // Create a table with the initial properties
 +    Connector connector = getConnector();
 +    String tableName = getUniqueNames(2)[0];
 +    boolean limitVersion = false;
 +    connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
 +
 +    String tableNameOrig = "originalWithLimitVersion";
 +    connector.tableOperations().create(tableNameOrig, limitVersion);
 +
 +    int countNew = numProperties(connector, tableName);
 +    int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
 +
 +    Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
 +    Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
 +  }
 +
 +  @SuppressWarnings("deprecation")
 +  @Test
 +  public void tableNameLimitVersionAndTimeType() throws Exception {
 +    log.info("Starting tableNameLimitVersionAndTimeType");
 +
 +    // Create a table with the initial properties
 +    Connector connector = getConnector();
 +    String tableName = getUniqueNames(2)[0];
 +    boolean limitVersion = false;
 +    TimeType tt = TimeType.LOGICAL;
 +    connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators().setTimeType(tt));
 +
 +    String tableNameOrig = "originalWithLimitVersionAndTimeType";
 +    connector.tableOperations().create(tableNameOrig, limitVersion, tt);
 +
 +    int countNew = numProperties(connector, tableName);
 +    int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
 +
 +    Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
 +    Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, tt));
 +  }
 +
 +  @SuppressWarnings("deprecation")
 +  @Test
 +  public void addCustomPropAndChangeExisting() throws Exception {
 +    log.info("Starting addCustomPropAndChangeExisting");
 +
 +    // Create and populate initial properties map for creating table 1
 +    Map<String,String> properties = new HashMap<String,String>();
 +    String propertyName = Property.TABLE_SPLIT_THRESHOLD.getKey();
 +    String volume = "10K";
 +    properties.put(propertyName, volume);
 +
 +    String propertyName2 = "table.custom.testProp";
 +    String volume2 = "Test property";
 +    properties.put(propertyName2, volume2);
 +
 +    // Create a table with the initial properties
 +    Connector connector = getConnector();
 +    String tableName = getUniqueNames(2)[0];
 +    connector.tableOperations().create(tableName, new NewTableConfiguration().setProperties(properties));
 +
 +    String tableNameOrig = "originalWithTableName";
 +    connector.tableOperations().create(tableNameOrig, true);
 +
 +    int countNew = numProperties(connector, tableName);
 +    int countOrig = compareProperties(connector, tableNameOrig, tableName, propertyName);
 +
 +    for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName))
{
 +      if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey()))
 +        Assert.assertTrue("TABLE_SPLIT_THRESHOLD has been changed", entry.getValue().equals("10K"));
 +      if (entry.getKey().equals("table.custom.testProp"))
 +        Assert.assertTrue("table.custom.testProp has been changed", entry.getValue().equals("Test
property"));
 +    }
 +
 +    Assert.assertEquals("Extra properties using the new create method", countOrig + 1, countNew);
 +    Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bbf7ade/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
index c2dee9f,0000000..19e4a73
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
@@@ -1,392 -1,0 +1,392 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.File;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.server.fs.PerTableVolumeChooser;
 +import org.apache.accumulo.server.fs.PreferredVolumeChooser;
 +import org.apache.accumulo.server.fs.RandomVolumeChooser;
 +import org.apache.accumulo.test.functional.ConfigurableMacBase;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.fs.RawLocalFileSystem;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +/**
 + *
 + */
 +public class VolumeChooserIT extends ConfigurableMacBase {
 +
 +  private static final Text EMPTY = new Text();
 +  private static final Value EMPTY_VALUE = new Value(new byte[] {});
 +  private File volDirBase;
 +  private Path v1, v2, v3, v4;
 +  private String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
 +  private String namespace1;
 +  private String namespace2;
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 30;
-   };
++  }
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    // Get 2 tablet servers
 +    cfg.setNumTservers(2);
 +    namespace1 = "ns_" + getUniqueNames(2)[0];
 +    namespace2 = "ns_" + getUniqueNames(2)[1];
 +
 +    // Set the general volume chooser to the PerTableVolumeChooser so that different choosers
can be specified
 +    Map<String,String> siteConfig = new HashMap<String,String>();
 +    siteConfig.put(Property.GENERAL_VOLUME_CHOOSER.getKey(), PerTableVolumeChooser.class.getName());
 +    cfg.setSiteConfig(siteConfig);
 +
 +    // Set up 4 different volume paths
 +    File baseDir = cfg.getDir();
 +    volDirBase = new File(baseDir, "volumes");
 +    File v1f = new File(volDirBase, "v1");
 +    File v2f = new File(volDirBase, "v2");
 +    File v3f = new File(volDirBase, "v3");
 +    File v4f = new File(volDirBase, "v4");
 +    v1 = new Path("file://" + v1f.getAbsolutePath());
 +    v2 = new Path("file://" + v2f.getAbsolutePath());
 +    v3 = new Path("file://" + v3f.getAbsolutePath());
 +    v4 = new Path("file://" + v4f.getAbsolutePath());
 +
 +    // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that
isn't in the options list when they are choosing
 +    cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString() + ","
+ v4.toString());
 +
 +    // use raw local file system so walogs sync and flush will work
 +    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
 +
 +    super.configure(cfg, hadoopCoreSite);
 +
 +  }
 +
 +  public void addSplits(Connector connector, String tableName) throws TableNotFoundException,
AccumuloException, AccumuloSecurityException {
 +    // Add 10 splits to the table
 +    SortedSet<Text> partitions = new TreeSet<Text>();
 +    for (String s : "b,e,g,j,l,o,q,t,v,y".split(","))
 +      partitions.add(new Text(s));
 +    connector.tableOperations().addSplits(tableName, partitions);
 +  }
 +
 +  public void writeAndReadData(Connector connector, String tableName) throws AccumuloException,
AccumuloSecurityException, TableNotFoundException {
 +    // Write some data to the table
 +    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
 +    for (String s : rows) {
 +      Mutation m = new Mutation(new Text(s));
 +      m.put(EMPTY, EMPTY, EMPTY_VALUE);
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    // Write the data to disk, read it back
 +    connector.tableOperations().flush(tableName, null, null, true);
 +    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
 +    int i = 0;
 +    for (Entry<Key,Value> entry : scanner) {
 +      assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString());
 +    }
 +  }
 +
 +  public void verifyVolumes(Connector connector, String tableName, Range tableRange, String
vol) throws TableNotFoundException {
 +    // Verify the new files are written to the Volumes specified
 +    ArrayList<String> volumes = new ArrayList<String>();
 +    for (String s : vol.split(","))
 +      volumes.add(s);
 +
 +    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    scanner.setRange(tableRange);
 +    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
 +    int fileCount = 0;
 +    for (Entry<Key,Value> entry : scanner) {
 +      boolean inVolume = false;
 +      for (String volume : volumes) {
 +        if (entry.getKey().getColumnQualifier().toString().contains(volume))
 +          inVolume = true;
 +      }
 +      assertTrue("Data not written to the correct volumes", inVolume);
 +      fileCount++;
 +    }
 +    assertEquals("Wrong number of files", 11, fileCount);
 +  }
 +
 +  // Test that uses two tables with 10 split points each. They each use the PreferredVolumeChooser
to choose volumes.
 +  @Test
 +  public void twoTablesPreferredVolumeChooser() throws Exception {
 +    log.info("Starting twoTablesPreferredVolumeChooser");
 +
 +    // Create namespace
 +    Connector connector = getConnector();
 +    connector.namespaceOperations().create(namespace1);
 +
 +    // Set properties on the namespace
 +    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    String volume = PreferredVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    propertyName = "table.custom.preferredVolumes";
 +    volume = v2.toString();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    // Create table1 on namespace1
 +    String tableName = namespace1 + ".1";
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), volume);
 +
 +    connector.namespaceOperations().create(namespace2);
 +
 +    // Set properties on the namespace
 +    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    volume = PreferredVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
 +
 +    propertyName = "table.custom.preferredVolumes";
 +    volume = v1.toString();
 +    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
 +
 +    // Create table2 on namespace2
 +    String tableName2 = namespace2 + ".1";
 +
 +    connector.tableOperations().create(tableName2);
 +    String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName2);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName2);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
 +  }
 +
 +  // Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser
to choose volumes.
 +  @Test
 +  public void twoTablesRandomVolumeChooser() throws Exception {
 +    log.info("Starting twoTablesRandomVolumeChooser()");
 +
 +    // Create namespace
 +    Connector connector = getConnector();
 +    connector.namespaceOperations().create(namespace1);
 +
 +    // Set properties on the namespace
 +    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    String volume = RandomVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    // Create table1 on namespace1
 +    String tableName = namespace1 + ".1";
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +    // Verify the new files are written to the Volumes specified
 +
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +
 +    connector.namespaceOperations().create(namespace2);
 +
 +    // Set properties on the namespace
 +    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    volume = RandomVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
 +
 +    // Create table2 on namespace2
 +    String tableName2 = namespace2 + ".1";
 +    connector.tableOperations().create(tableName2);
 +    String tableID2 = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // / Add 10 splits to the table
 +    addSplits(connector, tableName2);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName2);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +  }
 +
 +  // Test that uses two tables with 10 split points each. The first uses the RandomVolumeChooser
and the second uses the
 +  // StaticVolumeChooser to choose volumes.
 +  @Test
 +  public void twoTablesDiffChoosers() throws Exception {
 +    log.info("Starting twoTablesDiffChoosers");
 +
 +    // Create namespace
 +    Connector connector = getConnector();
 +    connector.namespaceOperations().create(namespace1);
 +
 +    // Set properties on the namespace
 +    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    String volume = RandomVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    // Create table1 on namespace1
 +    String tableName = namespace1 + ".1";
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +    // Verify the new files are written to the Volumes specified
 +
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +
 +    connector.namespaceOperations().create(namespace2);
 +
 +    // Set properties on the namespace
 +    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    volume = PreferredVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
 +
 +    propertyName = "table.custom.preferredVolumes";
 +    volume = v1.toString();
 +    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
 +
 +    // Create table2 on namespace2
 +    String tableName2 = namespace2 + ".1";
 +    connector.tableOperations().create(tableName2);
 +    String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName2);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName2);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
 +  }
 +
 +  // Test that uses one table with 10 split points each. It uses the StaticVolumeChooser,
but no preferred volume is specified. This means that the volume
 +  // is chosen randomly from all instance volumes.
 +  @Test
 +  public void missingVolumePreferredVolumeChooser() throws Exception {
 +    log.info("Starting missingVolumePreferredVolumeChooser");
 +
 +    // Create namespace
 +    Connector connector = getConnector();
 +    connector.namespaceOperations().create(namespace1);
 +
 +    // Set properties on the namespace
 +    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    String volume = PreferredVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    // Create table1 on namespace1
 +    String tableName = namespace1 + ".1";
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +  }
 +
 +  // Test that uses one table with 10 split points each. It uses the PreferredVolumeChooser,
but preferred volume is not an instance volume. This means that the
 +  // volume is chosen randomly from all instance volumes
 +  @Test
 +  public void notInstancePreferredVolumeChooser() throws Exception {
 +    log.info("Starting notInstancePreferredVolumeChooser");
 +
 +    // Create namespace
 +    Connector connector = getConnector();
 +    connector.namespaceOperations().create(namespace1);
 +
 +    // Set properties on the namespace
 +    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
 +    String volume = PreferredVolumeChooser.class.getName();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    propertyName = "table.custom.preferredVolumes";
 +    volume = v3.toString();
 +    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
 +
 +    // Create table1 on namespace1
 +    String tableName = namespace1 + ".1";
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +  }
 +
 +  // Test that uses one table with 10 split points each. It does not specify a specific
chooser, so the volume is chosen randomly from all instance volumes.
 +  @Test
 +  public void chooserNotSpecified() throws Exception {
 +    log.info("Starting chooserNotSpecified");
 +
 +    // Create a table
 +    Connector connector = getConnector();
 +    String tableName = getUniqueNames(2)[0];
 +    connector.tableOperations().create(tableName);
 +    String tableID = connector.tableOperations().tableIdMap().get(tableName);
 +
 +    // Add 10 splits to the table
 +    addSplits(connector, tableName);
 +    // Write some data to the table
 +    writeAndReadData(connector, tableName);
 +
 +    // Verify the new files are written to the Volumes specified
 +    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString()
+ "," + v2.toString() + "," + v4.toString());
 +  }
 +
 +}


Mime
View raw message