accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [38/50] [abbrv] Merge branch '1.5' into 1.6
Date Sat, 01 Nov 2014 04:57:32 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/data/ValueTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/data/ValueTest.java
index cd26743,0000000..8022c14
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/data/ValueTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/data/ValueTest.java
@@@ -1,210 -1,0 +1,211 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.data;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInputStream;
 +import java.io.DataOutputStream;
 +import java.nio.ByteBuffer;
- import java.nio.charset.Charset;
 +import java.util.List;
 +
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import static org.easymock.EasyMock.*;
 +import static org.junit.Assert.*;
 +
 +public class ValueTest {
 +  private static final byte[] toBytes(String s) {
-     return s.getBytes(Charset.forName("UTF-8"));
++    return s.getBytes(UTF_8);
 +  }
 +
 +  private static final byte[] DATA = toBytes("data");
 +  private static final ByteBuffer DATABUFF = ByteBuffer.allocate(DATA.length);
 +  static {
 +    DATABUFF.put(DATA);
 +  }
 +
 +  @Before
 +  public void setUp() throws Exception {
 +    DATABUFF.rewind();
 +  }
 +
 +  @Test
 +  public void testDefault() {
 +    Value v = new Value();
 +    assertEquals(0, v.get().length);
 +  }
 +
 +  @Test(expected=NullPointerException.class)
 +  public void testNullBytesConstructor() {
 +    new Value((byte[]) null);
 +  }
 +
 +  @Test(expected=NullPointerException.class)
 +  public void testNullCopyConstructor() {
 +    new Value((Value) null);
 +  }
 +
 +  @Test(expected=NullPointerException.class)
 +  public void testNullByteBufferConstructor() {
 +    new Value((ByteBuffer)null);
 +  }
 +
 +  @Test(expected=NullPointerException.class)
 +  public void testNullSet() {
 +    Value v = new Value();
 +    v.set(null);
 +  }
 +
 +  @Test
 +  public void testByteArray() {
 +    Value v = new Value(DATA);
 +    assertArrayEquals(DATA, v.get());
 +    assertSame(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testByteArrayCopy() {
 +    Value v = new Value(DATA, true);
 +    assertArrayEquals(DATA, v.get());
 +    assertNotSame(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testByteBuffer() {
 +    Value v = new Value(DATABUFF);
 +    assertArrayEquals(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testByteBufferCopy() {
 +    @SuppressWarnings("deprecation")
 +    Value v = new Value(DATABUFF, true);
 +    assertArrayEquals(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testValueCopy() {
 +    Value ov = createMock(Value.class);
 +    expect(ov.get()).andReturn(DATA);
 +    expect(ov.getSize()).andReturn(4);
 +    replay(ov);
 +    Value v = new Value(ov);
 +    assertArrayEquals(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testByteArrayOffsetLength() {
 +    Value v = new Value(DATA, 0, 4);
 +    assertArrayEquals(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testSet() {
 +    Value v = new Value();
 +    v.set(DATA);
 +    assertArrayEquals(DATA, v.get());
 +    assertSame(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testCopy() {
 +    Value v = new Value();
 +    v.copy(DATA);
 +    assertArrayEquals(DATA, v.get());
 +    assertNotSame(DATA, v.get());
 +  }
 +
 +  @Test
 +  public void testGetSize() {
 +    Value v = new Value(DATA);
 +    assertEquals(DATA.length, v.getSize());
 +  }
 +
 +  @Test
 +  public void testGetSizeDefault() {
 +    Value v = new Value();
 +    assertEquals(0, v.getSize());
 +  }
 +
 +  @Test
 +  public void testWriteRead() throws Exception {
 +    Value v = new Value(DATA);
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    DataOutputStream dos = new DataOutputStream(baos);
 +    v.write(dos);
 +    dos.close();
 +    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
 +    DataInputStream dis = new DataInputStream(bais);
 +    Value v2 = new Value();
 +    v2.readFields(dis);
 +    dis.close();
 +    assertArrayEquals(DATA, v2.get());
 +  }
 +
 +  @Test
 +  public void testHashCode() {
 +    Value v1 = new Value(DATA);
 +    Value v2 = new Value(DATA);
 +    assertEquals(v1.hashCode(), v2.hashCode());
 +  }
 +
 +  @Test
 +  public void testCompareTo() {
 +    Value v1 = new Value(DATA);
 +    Value v2 = new Value(toBytes("datb"));
 +    assertTrue(v1.compareTo(v2) < 0);
 +    assertTrue(v2.compareTo(v1) > 0);
 +    Value v1a = new Value(DATA);
 +    assertTrue(v1.compareTo(v1a) == 0);
 +    Value v3 = new Value(toBytes("datc"));
 +    assertTrue(v2.compareTo(v3) < 0);
 +    assertTrue(v1.compareTo(v3) < 0);
 +  }
 +
 +  @Test
 +  public void testEquals() {
 +    Value v1 = new Value(DATA);
 +    assertTrue(v1.equals(v1));
 +    Value v2 = new Value(DATA);
 +    assertTrue(v1.equals(v2));
 +    assertTrue(v2.equals(v1));
 +    Value v3 = new Value(toBytes("datb"));
 +    assertFalse(v1.equals(v3));
 +  }
 +
 +  @Test
 +  public void testToArray() {
 +    List<byte[]> l = new java.util.ArrayList<byte[]>();
 +    byte[] one = toBytes("one");
 +    byte[] two = toBytes("two");
 +    byte[] three = toBytes("three");
 +    l.add(one);
 +    l.add(two);
 +    l.add(three);
 +
 +    byte[][] a = Value.toArray(l);
 +    assertEquals(3, a.length);
 +    assertArrayEquals(one, a[0]);
 +    assertArrayEquals(two, a[1]);
 +    assertArrayEquals(three, a[2]);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
index 931ff41,6c4e814..2577e48
--- a/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
@@@ -16,19 -16,11 +16,19 @@@
   */
  package org.apache.accumulo.core.security;
  
++import static com.google.common.base.Charsets.UTF_8;
  import static org.apache.accumulo.core.security.ColumnVisibility.quote;
  import static org.junit.Assert.assertArrayEquals;
  import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
  import static org.junit.Assert.fail;
  
 +import java.util.Comparator;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.security.ColumnVisibility.Node;
 +import org.apache.accumulo.core.security.ColumnVisibility.NodeComparator;
 +import org.apache.accumulo.core.security.ColumnVisibility.NodeType;
 +import org.apache.hadoop.io.Text;
  import org.junit.Test;
  
  public class ColumnVisibilityTest {
@@@ -166,83 -152,4 +166,83 @@@
      cv = new ColumnVisibility(quote("五"));
      assertEquals("[\"五\"]", cv.toString());
    }
 +
 +  @Test
 +  public void testParseTree() {
 +    Node node = parse("(W)|(U&V)");
 +    assertNode(node, NodeType.OR, 0, 9);
 +    assertNode(node.getChildren().get(0), NodeType.TERM, 1, 2);
 +    assertNode(node.getChildren().get(1), NodeType.AND, 5, 8);
 +  }
 +
 +  @Test
 +  public void testParseTreeWithNoChildren() {
 +    Node node = parse("ABC");
 +    assertNode(node, NodeType.TERM, 0, 3);
 +  }
 +
 +  @Test
 +  public void testParseTreeWithTwoChildren() {
 +    Node node = parse("ABC|DEF");
 +    assertNode(node, NodeType.OR, 0, 7);
 +    assertNode(node.getChildren().get(0), NodeType.TERM, 0, 3);
 +    assertNode(node.getChildren().get(1), NodeType.TERM, 4, 7);
 +  }
 +
 +  @Test
 +  public void testParseTreeWithParenthesesAndTwoChildren() {
 +    Node node = parse("(ABC|DEF)");
 +    assertNode(node, NodeType.OR, 1, 8);
 +    assertNode(node.getChildren().get(0), NodeType.TERM, 1, 4);
 +    assertNode(node.getChildren().get(1), NodeType.TERM, 5, 8);
 +  }
 +
 +  @Test
 +  public void testParseTreeWithParenthesizedChildren() {
 +    Node node = parse("ABC|(DEF&GHI)");
 +    assertNode(node, NodeType.OR, 0, 13);
 +    assertNode(node.getChildren().get(0), NodeType.TERM, 0, 3);
 +    assertNode(node.getChildren().get(1), NodeType.AND, 5, 12);
 +    assertNode(node.getChildren().get(1).children.get(0), NodeType.TERM, 5, 8);
 +    assertNode(node.getChildren().get(1).children.get(1), NodeType.TERM, 9, 12);
 +  }
 +
 +  @Test
 +  public void testParseTreeWithMoreParentheses() {
 +    Node node = parse("(W)|(U&V)");
 +    assertNode(node, NodeType.OR, 0, 9);
 +    assertNode(node.getChildren().get(0), NodeType.TERM, 1, 2);
 +    assertNode(node.getChildren().get(1), NodeType.AND, 5, 8);
 +    assertNode(node.getChildren().get(1).children.get(0), NodeType.TERM, 5, 6);
 +    assertNode(node.getChildren().get(1).children.get(1), NodeType.TERM, 7, 8);
 +  }
 +
 +  @Test
 +  public void testEmptyParseTreesAreEqual() {
 +    Comparator<Node> comparator = new NodeComparator(new byte[] {});
 +    Node empty = new ColumnVisibility().getParseTree();
 +    assertEquals(0, comparator.compare(empty, parse("")));
 +  }
 +
 +  @Test
 +  public void testParseTreesOrdering() {
-     byte[] expression = "(b&c&d)|((a|m)&y&z)|(e&f)".getBytes(Constants.UTF8);
++    byte[] expression = "(b&c&d)|((a|m)&y&z)|(e&f)".getBytes(UTF_8);
 +    byte[] flattened = new ColumnVisibility(expression).flatten();
 +
 +    // Convert to String for indexOf convenience
-     String flat = new String(flattened, Constants.UTF8);
++    String flat = new String(flattened, UTF_8);
 +    assertTrue("shortest expressions sort first", flat.indexOf('e') < flat.indexOf('|'));
 +    assertTrue("shortest children sort first", flat.indexOf('b') < flat.indexOf('a'));
 +  }
 +
 +  private Node parse(String s) {
 +    ColumnVisibility v = new ColumnVisibility(s);
 +    return v.getParseTree();
 +  }
 +
 +  private void assertNode(Node node, NodeType nodeType, int start, int end) {
 +    assertEquals(node.type, nodeType);
 +    assertEquals(start, node.start);
 +    assertEquals(end, node.end);
 +  }
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
index b925e4a,0000000..a4e9f02
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
@@@ -1,143 -1,0 +1,143 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.security;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertArrayEquals;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +import javax.security.auth.DestroyFailedException;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.SecurityErrorCode;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.client.security.tokens.NullToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.security.thrift.TCredentials;
 +import org.junit.Test;
 +
 +/**
 + * 
 + */
 +public class CredentialsTest {
 +  
 +  @Test
 +  public void testToThrift() throws DestroyFailedException {
 +    // verify thrift serialization
 +    Credentials creds = new Credentials("test", new PasswordToken("testing"));
 +    TCredentials tCreds = creds.toThrift(new MockInstance());
 +    assertEquals("test", tCreds.getPrincipal());
 +    assertEquals(PasswordToken.class.getName(), tCreds.getTokenClassName());
 +    assertArrayEquals(AuthenticationTokenSerializer.serialize(new PasswordToken("testing")), tCreds.getToken());
 +    
 +    // verify that we can't serialize if it's destroyed
 +    creds.getToken().destroy();
 +    try {
 +      creds.toThrift(new MockInstance());
 +      fail();
 +    } catch (Exception e) {
 +      assertTrue(e instanceof RuntimeException);
 +      assertTrue(e.getCause() instanceof AccumuloSecurityException);
 +      assertTrue(AccumuloSecurityException.class.cast(e.getCause()).getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
 +    }
 +  }
 +
 +  @Test
 +  public void roundtripThrift() throws DestroyFailedException {
 +    Credentials creds = new Credentials("test", new PasswordToken("testing"));
 +    TCredentials tCreds = creds.toThrift(new MockInstance());
 +    Credentials roundtrip = Credentials.fromThrift(tCreds);
 +    assertEquals("Roundtrip through thirft changed credentials equality", creds, roundtrip);
 +  }
 +  
 +  @Test
 +  public void testMockConnector() throws AccumuloException, DestroyFailedException, AccumuloSecurityException {
 +    Instance inst = new MockInstance();
 +    Connector rootConnector = inst.getConnector("root", new PasswordToken());
 +    PasswordToken testToken = new PasswordToken("testPass");
 +    rootConnector.securityOperations().createLocalUser("testUser", testToken);
 +    
 +    assertFalse(testToken.isDestroyed());
 +    testToken.destroy();
 +    assertTrue(testToken.isDestroyed());
 +    try {
 +      inst.getConnector("testUser", testToken);
 +      fail();
 +    } catch (AccumuloSecurityException e) {
 +      assertTrue(e.getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
 +    }
 +  }
 +  
 +  @Test
 +  public void testEqualsAndHashCode() {
 +    Credentials nullNullCreds = new Credentials(null, null);
 +    Credentials abcNullCreds = new Credentials("abc", new NullToken());
 +    Credentials cbaNullCreds = new Credentials("cba", new NullToken());
 +    Credentials abcBlahCreds = new Credentials("abc", new PasswordToken("blah"));
 +    
 +    // check hash codes
 +    assertEquals(0, nullNullCreds.hashCode());
 +    assertEquals("abc".hashCode(), abcNullCreds.hashCode());
 +    assertEquals(abcNullCreds.hashCode(), abcBlahCreds.hashCode());
 +    assertFalse(abcNullCreds.hashCode() == cbaNullCreds.hashCode());
 +    
 +    // identity
 +    assertEquals(abcNullCreds, abcNullCreds);
 +    assertEquals(new Credentials("abc", new NullToken()), abcNullCreds);
 +    // equal, but different token constructors
-     assertEquals(new Credentials("abc", new PasswordToken("abc".getBytes(Constants.UTF8))), new Credentials("abc", new PasswordToken("abc")));
++    assertEquals(new Credentials("abc", new PasswordToken("abc".getBytes(UTF_8))), new Credentials("abc", new PasswordToken("abc")));
 +    // test not equals
 +    assertFalse(nullNullCreds.equals(abcBlahCreds));
 +    assertFalse(nullNullCreds.equals(abcNullCreds));
 +    assertFalse(abcNullCreds.equals(abcBlahCreds));
 +  }
 +  
 +  @Test
 +  public void testCredentialsSerialization() throws AccumuloSecurityException {
-     Credentials creds = new Credentials("a:b-c", new PasswordToken("d-e-f".getBytes(Constants.UTF8)));
++    Credentials creds = new Credentials("a:b-c", new PasswordToken("d-e-f".getBytes(UTF_8)));
 +    String serialized = creds.serialize();
 +    Credentials result = Credentials.deserialize(serialized);
 +    assertEquals(creds, result);
 +    assertEquals("a:b-c", result.getPrincipal());
 +    assertEquals(new PasswordToken("d-e-f"), result.getToken());
 +    
 +    Credentials nullNullCreds = new Credentials(null, null);
 +    serialized = nullNullCreds.serialize();
 +    result = Credentials.deserialize(serialized);
 +    assertEquals(null, result.getPrincipal());
 +    assertEquals(null, result.getToken());
 +  }
 +  
 +  @Test
 +  public void testToString() {
 +    Credentials creds = new Credentials(null, null);
 +    assertEquals(Credentials.class.getName() + ":null:null:<hidden>", creds.toString());
 +    creds = new Credentials("", new NullToken());
 +    assertEquals(Credentials.class.getName() + "::" + NullToken.class.getName() + ":<hidden>", creds.toString());
 +    creds = new Credentials("abc", null);
 +    assertEquals(Credentials.class.getName() + ":abc:null:<hidden>", creds.toString());
 +    creds = new Credentials("abc", new PasswordToken(""));
 +    assertEquals(Credentials.class.getName() + ":abc:" + PasswordToken.class.getName() + ":<hidden>", creds.toString());
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/security/VisibilityConstraintTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/VisibilityConstraintTest.java
index d31a788,0000000..523279a
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/security/VisibilityConstraintTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/VisibilityConstraintTest.java
@@@ -1,103 -1,0 +1,103 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.security;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.easymock.EasyMock.createMock;
 +import static org.easymock.EasyMock.createNiceMock;
 +import static org.easymock.EasyMock.expect;
 +import static org.easymock.EasyMock.replay;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +
 +import java.util.Arrays;
 +import java.util.List;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.constraints.Constraint.Environment;
 +import org.apache.accumulo.core.data.ArrayByteSequence;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.junit.Before;
 +import org.junit.Ignore;
 +import org.junit.Test;
 +
 +public class VisibilityConstraintTest {
 +
 +  VisibilityConstraint vc;
 +  Environment env;
 +  Mutation mutation;
 +
 +  static final ColumnVisibility good = new ColumnVisibility("good");
 +  static final ColumnVisibility bad = new ColumnVisibility("bad");
 +
 +  static final String D = "don't care";
 +
 +  static final List<Short> ENOAUTH = Arrays.asList((short) 2);
 +
 +  @Before
 +  public void setUp() throws Exception {
 +    vc = new VisibilityConstraint();
 +    mutation = new Mutation("r");
 +
-     ArrayByteSequence bs = new ArrayByteSequence("good".getBytes(Constants.UTF8));
++    ArrayByteSequence bs = new ArrayByteSequence("good".getBytes(UTF_8));
 +
 +    AuthorizationContainer ac = createNiceMock(AuthorizationContainer.class);
 +    expect(ac.contains(bs)).andReturn(true);
 +    replay(ac);
 +
 +    env = createMock(Environment.class);
 +    expect(env.getAuthorizationsContainer()).andReturn(ac);
 +    replay(env);
 +  }
 +
 +  @Test
 +  public void testNoVisibility() {
 +    mutation.put(D, D, D);
 +    assertNull("authorized", vc.check(env, mutation));
 +  }
 +
 +  @Test
 +  public void testVisibilityNoAuth() {
 +    mutation.put(D, D, bad, D);
 +    assertEquals("unauthorized", ENOAUTH, vc.check(env, mutation));
 +  }
 +
 +  @Test
 +  public void testGoodVisibilityAuth() {
 +    mutation.put(D, D, good, D);
 +    assertNull("authorized", vc.check(env, mutation));
 +  }
 +
 +  @Test
 +  public void testCachedVisibilities() {
 +    mutation.put(D, D, good, "v");
 +    mutation.put(D, D, good, "v2");
 +    assertNull("authorized", vc.check(env, mutation));
 +  }
 +
 +  @Test
 +  public void testMixedVisibilities() {
 +    mutation.put(D, D, bad, D);
 +    mutation.put(D, D, good, D);
 +    assertEquals("unauthorized", ENOAUTH, vc.check(env, mutation));
 +  }
 +
 +  @Test
 +  @Ignore
 +  public void testMalformedVisibility() {
 +    // TODO: ACCUMULO-1006 Should test for returning error code 1, but not sure how since ColumnVisibility won't let us construct a bad one in the first place
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/security/crypto/BlockedIOStreamTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/crypto/BlockedIOStreamTest.java
index ea64ac7,0000000..9078ef9
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/security/crypto/BlockedIOStreamTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/crypto/BlockedIOStreamTest.java
@@@ -1,162 -1,0 +1,162 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.security.crypto;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertArrayEquals;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInputStream;
 +import java.io.IOException;
 +import java.util.Arrays;
 +import java.util.Random;
 +
- import org.apache.accumulo.core.Constants;
 +import org.junit.Test;
 +
 +public class BlockedIOStreamTest {
 +  @Test
 +  public void testLargeBlockIO() throws IOException {
 +    writeRead(1024, 2048);
 +  }
 +
 +  private void writeRead(int blockSize, int expectedSize) throws IOException {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    BlockedOutputStream blockOut = new BlockedOutputStream(baos, blockSize, 1);
 +
 +    String contentString = "My Blocked Content String";
-     byte[] content = contentString.getBytes(Constants.UTF8);
++    byte[] content = contentString.getBytes(UTF_8);
 +    blockOut.write(content);
 +    blockOut.flush();
 +
 +    String contentString2 = "My Other Blocked Content String";
-     byte[] content2 = contentString2.getBytes(Constants.UTF8);
++    byte[] content2 = contentString2.getBytes(UTF_8);
 +    blockOut.write(content2);
 +    blockOut.flush();
 +
 +    blockOut.close();
 +    byte[] written = baos.toByteArray();
 +    assertEquals(expectedSize, written.length);
 +
 +    ByteArrayInputStream biis = new ByteArrayInputStream(written);
 +    BlockedInputStream blockIn = new BlockedInputStream(biis, blockSize, blockSize);
 +    DataInputStream dIn = new DataInputStream(blockIn);
 +
 +    dIn.readFully(content, 0, content.length);
-     String readContentString = new String(content, Constants.UTF8);
++    String readContentString = new String(content, UTF_8);
 +
 +    assertEquals(contentString, readContentString);
 +
 +    dIn.readFully(content2, 0, content2.length);
-     String readContentString2 = new String(content2, Constants.UTF8);
++    String readContentString2 = new String(content2, UTF_8);
 +
 +    assertEquals(contentString2, readContentString2);
 +
 +    blockIn.close();
 +  }
 +
 +  @Test
 +  public void testSmallBufferBlockedIO() throws IOException {
 +    writeRead(16, (12 + 4) * (int) (Math.ceil(25.0 / 12) + Math.ceil(31.0 / 12)));
 +  }
 +
 +  @Test
 +  public void testSpillingOverOutputStream() throws IOException {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    // buffer will be size 12
 +    BlockedOutputStream blockOut = new BlockedOutputStream(baos, 16, 16);
 +    Random r = new Random(22);
 +
 +    byte[] undersized = new byte[11];
 +    byte[] perfectSized = new byte[12];
 +    byte[] overSized = new byte[13];
 +    byte[] perfectlyOversized = new byte[13];
 +    byte filler = (byte) r.nextInt();
 +
 +    r.nextBytes(undersized);
 +    r.nextBytes(perfectSized);
 +    r.nextBytes(overSized);
 +    r.nextBytes(perfectlyOversized);
 +
 +    // 1 block
 +    blockOut.write(undersized);
 +    blockOut.write(filler);
 +    blockOut.flush();
 +
 +    // 2 blocks
 +    blockOut.write(perfectSized);
 +    blockOut.write(filler);
 +    blockOut.flush();
 +
 +    // 2 blocks
 +    blockOut.write(overSized);
 +    blockOut.write(filler);
 +    blockOut.flush();
 +
 +    // 3 blocks
 +    blockOut.write(undersized);
 +    blockOut.write(perfectlyOversized);
 +    blockOut.write(filler);
 +    blockOut.flush();
 +
 +    blockOut.close();
 +    assertEquals(16 * 8, baos.toByteArray().length);
 +  }
 +
 +  @Test
 +  public void testGiantWrite() throws IOException {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    int blockSize = 16;
 +    // buffer will be size 12
 +    BlockedOutputStream blockOut = new BlockedOutputStream(baos, blockSize, blockSize);
 +    Random r = new Random(22);
 +
 +    int size = 1024 * 1024 * 128;
 +    byte[] giant = new byte[size];
 +    byte[] pattern = new byte[1024];
 +
 +    r.nextBytes(pattern);
 +
 +    for (int i = 0; i < size / 1024; i++) {
 +      System.arraycopy(pattern, 0, giant, i * 1024, 1024);
 +    }
 +
 +    blockOut.write(giant);
 +    blockOut.flush();
 +
 +    blockOut.close();
 +    baos.close();
 +
 +    int blocks = (int) Math.ceil(size / (blockSize - 4.0));
 +    byte[] byteStream = baos.toByteArray();
 +
 +    assertEquals(blocks * 16, byteStream.length);
 +
 +    DataInputStream blockIn = new DataInputStream(new BlockedInputStream(new ByteArrayInputStream(byteStream), blockSize, blockSize));
 +    Arrays.fill(giant, (byte) 0);
 +    blockIn.readFully(giant, 0, size);
 +    blockIn.close();
 +
 +    for (int i = 0; i < size / 1024; i++) {
 +      byte[] readChunk = new byte[1024];
 +      System.arraycopy(giant, i * 1024, readChunk, 0, 1024);
 +      assertArrayEquals(pattern, readChunk);
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/util/format/DeleterFormatterTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/format/DeleterFormatterTest.java
index 36def05,0000000..a2e4f7a
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/util/format/DeleterFormatterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/format/DeleterFormatterTest.java
@@@ -1,176 -1,0 +1,176 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util.format;
 +
- import static org.apache.accumulo.core.Constants.UTF8;
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.easymock.EasyMock.anyObject;
 +import static org.easymock.EasyMock.createMock;
 +import static org.easymock.EasyMock.createNiceMock;
 +import static org.easymock.EasyMock.expect;
 +import static org.easymock.EasyMock.expectLastCall;
 +import static org.easymock.EasyMock.replay;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.util.Collections;
 +import java.util.Map;
 +import java.util.TreeMap;
 +
 +import jline.UnsupportedTerminal;
 +import jline.console.ConsoleReader;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.util.shell.Shell;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +public class DeleterFormatterTest {
 +  DeleterFormatter formatter;
 +  Map<Key,Value> data;
 +  BatchWriter writer;
 +  BatchWriter exceptionWriter;
 +  Shell shellState;
 +
 +  ByteArrayOutputStream baos;
 +  ConsoleReader reader;
 +
 +  SettableInputStream input;
 +
 +  class SettableInputStream extends InputStream {
 +    ByteArrayInputStream bais;
 +
 +    @Override
 +    public int read() throws IOException {
 +      return bais.read();
 +    }
 +
 +    public void set(String in) {
-       bais = new ByteArrayInputStream(in.getBytes(UTF8));
++      bais = new ByteArrayInputStream(in.getBytes(UTF_8));
 +    }
 +  };
 +
 +  @Before
 +  public void setUp() throws IOException, MutationsRejectedException {
 +    input = new SettableInputStream();
 +    baos = new ByteArrayOutputStream();
 +
 +    MutationsRejectedException mre = createMock(MutationsRejectedException.class);
 +
 +    writer = createNiceMock(BatchWriter.class);
 +    exceptionWriter = createNiceMock(BatchWriter.class);
 +    exceptionWriter.close();
 +    expectLastCall().andThrow(mre);
 +    exceptionWriter.addMutation(anyObject(Mutation.class));
 +    expectLastCall().andThrow(mre);
 +
 +    shellState = createNiceMock(Shell.class);
 +
 +    reader = new ConsoleReader(input, baos, new UnsupportedTerminal());
 +    expect(shellState.getReader()).andReturn(reader).anyTimes();
 +
 +    replay(writer, exceptionWriter, shellState);
 +
 +    data = new TreeMap<Key,Value>();
-     data.put(new Key("r", "cf", "cq"), new Value("value".getBytes(UTF8)));
++    data.put(new Key("r", "cf", "cq"), new Value("value".getBytes(UTF_8)));
 +  }
 +
 +  @Test
 +  public void testEmpty() {
 +    formatter = new DeleterFormatter(writer, Collections.<Key,Value> emptyMap().entrySet(), true, shellState, true);
 +    assertFalse(formatter.hasNext());
 +  }
 +
 +  @Test
 +  public void testSingle() throws IOException {
 +    formatter = new DeleterFormatter(writer, data.entrySet(), true, shellState, true);
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +
 +    verify("[DELETED]", " r ", "cf", "cq", "value");
 +  }
 +
 +  @Test
 +  public void testNo() throws IOException {
 +    input.set("no\n");
-     data.put(new Key("z"), new Value("v2".getBytes(UTF8)));
++    data.put(new Key("z"), new Value("v2".getBytes(UTF_8)));
 +    formatter = new DeleterFormatter(writer, data.entrySet(), true, shellState, false);
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +
 +    verify("[SKIPPED]", " r ", "cf", "cq", "value");
 +
 +    assertTrue(formatter.hasNext());
 +  }
 +
 +  @Test
 +  public void testNoConfirmation() throws IOException {
 +    input.set("");
-     data.put(new Key("z"), new Value("v2".getBytes(UTF8)));
++    data.put(new Key("z"), new Value("v2".getBytes(UTF_8)));
 +    formatter = new DeleterFormatter(writer, data.entrySet(), true, shellState, false);
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +
 +    verify("[SKIPPED]", " r ", "cf", "cq", "value");
 +
 +    assertFalse(formatter.hasNext());
 +  }
 +
 +  @Test
 +  public void testYes() throws IOException {
 +    input.set("y\nyes\n");
-     data.put(new Key("z"), new Value("v2".getBytes(UTF8)));
++    data.put(new Key("z"), new Value("v2".getBytes(UTF_8)));
 +    formatter = new DeleterFormatter(writer, data.entrySet(), true, shellState, false);
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +    verify("[DELETED]", " r ", "cf", "cq", "value");
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +    verify("[DELETED]", " z ", "v2");
 +  }
 +
 +  @Test
 +  public void testMutationException() {
 +    formatter = new DeleterFormatter(exceptionWriter, data.entrySet(), true, shellState, true);
 +
 +    assertTrue(formatter.hasNext());
 +    assertNull(formatter.next());
 +    assertFalse(formatter.hasNext());
 +  }
 +
 +  private void verify(String... chunks) throws IOException {
 +    reader.flush();
 +
 +    String output = baos.toString();
 +    for (String chunk : chunks) {
 +      assertTrue(output.contains(chunk));
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/core/src/test/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatterTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatterTest.java
index 8fc68fd,0000000..eef06c3
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatterTest.java
@@@ -1,90 -1,0 +1,90 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util.format;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Arrays;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.TreeMap;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.commons.collections.CollectionUtils;
 +import org.apache.commons.collections.Predicate;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +public class ShardedTableDistributionFormatterTest {
 +  ShardedTableDistributionFormatter formatter;
 +
 +  Map<Key,Value> data;
 +
 +  @Before
 +  public void setUp() {
 +    data = new TreeMap<Key,Value>();
 +    formatter = new ShardedTableDistributionFormatter();
 +  }
 +
 +  @Test
 +  public void testInitialize() {
 +    data.put(new Key(), new Value());
 +    data.put(new Key("r", "~tab"), new Value());
 +    formatter.initialize(data.entrySet(), false);
 +
 +    assertTrue(formatter.hasNext());
 +    formatter.next();
 +    assertFalse(formatter.hasNext());
 +  }
 +
 +  @Test
 +  public void testAggregate() {
-     data.put(new Key("t", "~tab", "loc"), new Value("srv1".getBytes(Constants.UTF8)));
-     data.put(new Key("t;19700101", "~tab", "loc", 0), new Value("srv1".getBytes(Constants.UTF8)));
-     data.put(new Key("t;19700101", "~tab", "loc", 1), new Value("srv2".getBytes(Constants.UTF8)));
++    data.put(new Key("t", "~tab", "loc"), new Value("srv1".getBytes(UTF_8)));
++    data.put(new Key("t;19700101", "~tab", "loc", 0), new Value("srv1".getBytes(UTF_8)));
++    data.put(new Key("t;19700101", "~tab", "loc", 1), new Value("srv2".getBytes(UTF_8)));
 +
 +    formatter.initialize(data.entrySet(), false);
 +
 +    String[] resultLines = formatter.next().split("\n");
 +    List<String> results = Arrays.asList(resultLines).subList(2, 4);
 +
 +    assertTrue(CollectionUtils.exists(results, new AggregateReportChecker("NULL", 1)));
 +    assertTrue(CollectionUtils.exists(results, new AggregateReportChecker("19700101", 2)));
 +
 +    assertFalse(formatter.hasNext());
 +  }
 +
 +  private static class AggregateReportChecker implements Predicate {
 +    private String day;
 +    private int count;
 +
 +    AggregateReportChecker(String day, int count) {
 +      this.day = day;
 +      this.count = count;
 +    }
 +
 +    @Override
 +    public boolean evaluate(Object arg) {
 +      String resLine = (String) arg;
 +      return resLine.startsWith(this.day) && resLine.endsWith("" + this.count);
 +    }
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
----------------------------------------------------------------------
diff --cc fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
index 1f8c32d,e411d22..af31cf2
--- a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
@@@ -16,11 -16,10 +16,12 @@@
   */
  package org.apache.accumulo.fate;
  
- import java.nio.charset.Charset;
+ import static com.google.common.base.Charsets.UTF_8;
+ 
  import java.util.ArrayList;
  import java.util.Collections;
 +import java.util.EnumSet;
 +import java.util.Formatter;
  import java.util.HashMap;
  import java.util.List;
  import java.util.Map;
@@@ -37,34 -35,8 +38,33 @@@ import org.apache.zookeeper.KeeperExcep
   * A utility to administer FATE operations
   */
  public class AdminUtil<T> {
-   private static final Charset UTF8 = Charset.forName("UTF-8");
    
 +  private boolean exitOnError = false;
 +  
 +  /**
 +   * Default constructor
 +   */
 +  public AdminUtil() {
 +    this(true);
 +  }
 +  
 +  /**
 +   * Constructor
 +   * 
 +   * @param exitOnError
 +   *          <code>System.exit(1)</code> on error if true
 +   */
 +  public AdminUtil(boolean exitOnError) {
 +    super();
 +    this.exitOnError = exitOnError;
 +  }
 +  
    public void print(ReadOnlyTStore<T> zs, IZooReaderWriter zk, String lockPath) throws KeeperException, InterruptedException {
 +    print(zs, zk, lockPath, new Formatter(System.out), null, null);
 +  }
 +  
 +  public void print(ReadOnlyTStore<T> zs, IZooReaderWriter zk, String lockPath, Formatter fmt, Set<Long> filterTxid, EnumSet<TStatus> filterStatus)
 +      throws KeeperException, InterruptedException {
      Map<Long,List<String>> heldLocks = new HashMap<Long,List<String>>();
      Map<Long,List<String>> waitingLocks = new HashMap<Long,List<String>>();
      

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
----------------------------------------------------------------------
diff --cc fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
index b6890c6,3ef6b0a..ff35364
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
@@@ -32,8 -33,7 +33,7 @@@ import org.apache.zookeeper.ZooKeeper
  import org.apache.zookeeper.ZooKeeper.States;
  
  public class ZooSession {
-   private static final Charset UTF8 = Charset.forName("UTF-8");
 -  
 +
    public static class ZooSessionShutdownException extends RuntimeException {
  
      private static final long serialVersionUID = 1L;
@@@ -46,19 -46,18 +46,19 @@@
      public ZooSessionInfo(ZooKeeper zooKeeper, ZooWatcher watcher) {
        this.zooKeeper = zooKeeper;
      }
 -    
 +
      ZooKeeper zooKeeper;
    }
 -  
 +
    private static Map<String,ZooSessionInfo> sessions = new HashMap<String,ZooSessionInfo>();
 -  
 +
    private static String sessionKey(String keepers, int timeout, String scheme, byte[] auth) {
-     return keepers + ":" + timeout + ":" + (scheme == null ? "" : scheme) + ":" + (auth == null ? "" : new String(auth, UTF8));
+     return keepers + ":" + timeout + ":" + (scheme == null ? "" : scheme) + ":" + (auth == null ? "" : new String(auth, UTF_8));
    }
 -  
 +
    private static class ZooWatcher implements Watcher {
 -    
 +
 +    @Override
      public void process(WatchedEvent event) {
        if (event.getState() == KeeperState.Expired) {
          log.debug("Session expired, state of current session : " + event.getState());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
----------------------------------------------------------------------
diff --cc minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index d5dc1e9,0000000..8216441
mode 100644,000000..100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@@ -1,851 -1,0 +1,853 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.minicluster.impl;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.io.BufferedReader;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileFilter;
 +import java.io.FileWriter;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.net.InetSocketAddress;
 +import java.net.Socket;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.net.URL;
 +import java.net.URLClassLoader;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Properties;
 +import java.util.Set;
 +import java.util.concurrent.Callable;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.ExecutorService;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.FutureTask;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.TimeoutException;
 +
 +import org.apache.accumulo.cluster.AccumuloCluster;
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.impl.MasterClient;
 +import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.ConfigurationCopy;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.master.thrift.MasterClientService;
 +import org.apache.accumulo.core.master.thrift.MasterGoalState;
 +import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.Daemon;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.StringUtil;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.gc.SimpleGarbageCollector;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.master.state.SetGoalState;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.server.Accumulo;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.fs.VolumeManagerImpl;
 +import org.apache.accumulo.server.init.Initialize;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.util.AccumuloStatus;
 +import org.apache.accumulo.server.util.PortUtils;
 +import org.apache.accumulo.server.util.time.SimpleTimer;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory;
 +import org.apache.accumulo.start.Main;
 +import org.apache.accumulo.start.classloader.vfs.MiniDFSUtil;
 +import org.apache.accumulo.trace.instrument.Tracer;
 +import org.apache.accumulo.tserver.TabletServer;
 +import org.apache.commons.configuration.MapConfiguration;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.vfs2.FileObject;
 +import org.apache.commons.vfs2.impl.VFSClassLoader;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.CommonConfigurationKeys;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hdfs.DFSConfigKeys;
 +import org.apache.hadoop.hdfs.MiniDFSCluster;
 +import org.apache.thrift.TException;
 +import org.apache.zookeeper.KeeperException;
 +import org.apache.zookeeper.data.Stat;
 +import org.apache.zookeeper.server.ZooKeeperServerMain;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Predicate;
 +import com.google.common.collect.Maps;
 +
 +/**
 + * A utility class that will create Zookeeper and Accumulo processes that write all of their data to a single local directory. This class makes it easy to test
 + * code against a real Accumulo instance. Its much more accurate for testing than {@link org.apache.accumulo.core.client.mock.MockAccumulo}, but much slower.
 + *
 + * @since 1.6.0
 + */
 +public class MiniAccumuloClusterImpl implements AccumuloCluster {
 +  private static final Logger log = LoggerFactory.getLogger(MiniAccumuloClusterImpl.class);
 +
 +  public static class LogWriter extends Daemon {
 +    private BufferedReader in;
 +    private BufferedWriter out;
 +
 +    public LogWriter(InputStream stream, File logFile) throws IOException {
 +      this.in = new BufferedReader(new InputStreamReader(stream));
 +      out = new BufferedWriter(new FileWriter(logFile));
 +
 +      SimpleTimer.getInstance().schedule(new Runnable() {
 +        @Override
 +        public void run() {
 +          try {
 +            flush();
 +          } catch (IOException e) {
 +            e.printStackTrace();
 +          }
 +        }
 +      }, 1000, 1000);
 +    }
 +
 +    public synchronized void flush() throws IOException {
 +      if (out != null)
 +        out.flush();
 +    }
 +
 +    @Override
 +    public void run() {
 +      String line;
 +
 +      try {
 +        while ((line = in.readLine()) != null) {
 +          out.append(line);
 +          out.append("\n");
 +        }
 +
 +        synchronized (this) {
 +          out.close();
 +          out = null;
 +          in.close();
 +        }
 +
 +      } catch (IOException e) {}
 +    }
 +  }
 +
 +  private boolean initialized = false;
 +  private Process zooKeeperProcess = null;
 +  private Process masterProcess = null;
 +  private Process gcProcess = null;
 +  private List<Process> tabletServerProcesses = Collections.synchronizedList(new ArrayList<Process>());
 +
 +  private Set<Pair<ServerType,Integer>> debugPorts = new HashSet<Pair<ServerType,Integer>>();
 +
 +  private File zooCfgFile;
 +  private String dfsUri;
 +
 +  public List<LogWriter> getLogWriters() {
 +    return logWriters;
 +  }
 +
 +  private List<LogWriter> logWriters = new ArrayList<MiniAccumuloClusterImpl.LogWriter>();
 +
 +  private MiniAccumuloConfigImpl config;
 +  private MiniDFSCluster miniDFS = null;
 +  private List<Process> cleanup = new ArrayList<Process>();
 +
 +  private ExecutorService executor;
 +
 +  public Process exec(Class<?> clazz, String... args) throws IOException {
 +    return exec(clazz, null, args);
 +  }
 +
 +  public Process exec(Class<?> clazz, List<String> jvmArgs, String... args) throws IOException {
 +    ArrayList<String> jvmArgs2 = new ArrayList<String>(1 + (jvmArgs == null ? 0 : jvmArgs.size()));
 +    jvmArgs2.add("-Xmx" + config.getDefaultMemory());
 +    if (jvmArgs != null)
 +      jvmArgs2.addAll(jvmArgs);
 +    Process proc = _exec(clazz, jvmArgs2, args);
 +    cleanup.add(proc);
 +    return proc;
 +  }
 +
 +  private boolean containsSiteFile(File f) {
 +    return f.isDirectory() && f.listFiles(new FileFilter() {
 +
 +      @Override
 +      public boolean accept(File pathname) {
 +        return pathname.getName().endsWith("site.xml");
 +      }
 +    }).length > 0;
 +  }
 +
 +  private void append(StringBuilder classpathBuilder, URL url) throws URISyntaxException {
 +    File file = new File(url.toURI());
 +    // do not include dirs containing hadoop or accumulo site files
 +    if (!containsSiteFile(file))
 +      classpathBuilder.append(File.pathSeparator).append(file.getAbsolutePath());
 +  }
 +
 +  private String getClasspath() throws IOException {
 +
 +    try {
 +      ArrayList<ClassLoader> classloaders = new ArrayList<ClassLoader>();
 +
 +      ClassLoader cl = this.getClass().getClassLoader();
 +
 +      while (cl != null) {
 +        classloaders.add(cl);
 +        cl = cl.getParent();
 +      }
 +
 +      Collections.reverse(classloaders);
 +
 +      StringBuilder classpathBuilder = new StringBuilder();
 +      classpathBuilder.append(config.getConfDir().getAbsolutePath());
 +
 +      if (config.getHadoopConfDir() != null)
 +        classpathBuilder.append(File.pathSeparator).append(config.getHadoopConfDir().getAbsolutePath());
 +
 +      if (config.getClasspathItems() == null) {
 +
 +        // assume 0 is the system classloader and skip it
 +        for (int i = 1; i < classloaders.size(); i++) {
 +          ClassLoader classLoader = classloaders.get(i);
 +
 +          if (classLoader instanceof URLClassLoader) {
 +
 +            URLClassLoader ucl = (URLClassLoader) classLoader;
 +
 +            for (URL u : ucl.getURLs()) {
 +              append(classpathBuilder, u);
 +            }
 +
 +          } else if (classLoader instanceof VFSClassLoader) {
 +
 +            VFSClassLoader vcl = (VFSClassLoader) classLoader;
 +            for (FileObject f : vcl.getFileObjects()) {
 +              append(classpathBuilder, f.getURL());
 +            }
 +          } else {
 +            throw new IllegalArgumentException("Unknown classloader type : " + classLoader.getClass().getName());
 +          }
 +        }
 +      } else {
 +        for (String s : config.getClasspathItems())
 +          classpathBuilder.append(File.pathSeparator).append(s);
 +      }
 +
 +      return classpathBuilder.toString();
 +
 +    } catch (URISyntaxException e) {
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  private Process _exec(Class<?> clazz, List<String> extraJvmOpts, String... args) throws IOException {
 +    String javaHome = System.getProperty("java.home");
 +    String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
 +    String classpath = getClasspath();
 +
 +    String className = clazz.getName();
 +
 +    ArrayList<String> argList = new ArrayList<String>();
 +    argList.addAll(Arrays.asList(javaBin, "-Dproc=" + clazz.getSimpleName(), "-cp", classpath));
 +    argList.addAll(extraJvmOpts);
 +    for (Entry<String,String> sysProp : config.getSystemProperties().entrySet()) {
 +      argList.add(String.format("-D%s=%s", sysProp.getKey(), sysProp.getValue()));
 +    }
 +    argList.addAll(Arrays.asList("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", "-Dapple.awt.UIElement=true", Main.class.getName(),
 +        className));
 +    argList.addAll(Arrays.asList(args));
 +
 +    ProcessBuilder builder = new ProcessBuilder(argList);
 +
 +    builder.environment().put("ACCUMULO_HOME", config.getDir().getAbsolutePath());
 +    builder.environment().put("ACCUMULO_LOG_DIR", config.getLogDir().getAbsolutePath());
 +    builder.environment().put("ACCUMULO_CLIENT_CONF_PATH", config.getClientConfFile().getAbsolutePath());
 +    String ldLibraryPath = StringUtil.join(Arrays.asList(config.getNativeLibPaths()), File.pathSeparator);
 +    builder.environment().put("LD_LIBRARY_PATH", ldLibraryPath);
 +    builder.environment().put("DYLD_LIBRARY_PATH", ldLibraryPath);
 +
 +    // if we're running under accumulo.start, we forward these env vars
 +    String env = System.getenv("HADOOP_PREFIX");
 +    if (env != null)
 +      builder.environment().put("HADOOP_PREFIX", env);
 +    env = System.getenv("ZOOKEEPER_HOME");
 +    if (env != null)
 +      builder.environment().put("ZOOKEEPER_HOME", env);
 +    builder.environment().put("ACCUMULO_CONF_DIR", config.getConfDir().getAbsolutePath());
 +    // hadoop-2.2 puts error messages in the logs if this is not set
 +    builder.environment().put("HADOOP_HOME", config.getDir().getAbsolutePath());
 +    if (config.getHadoopConfDir() != null)
 +      builder.environment().put("HADOOP_CONF_DIR", config.getHadoopConfDir().getAbsolutePath());
 +
 +    Process process = builder.start();
 +
 +    LogWriter lw;
 +    lw = new LogWriter(process.getErrorStream(), new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".err"));
 +    logWriters.add(lw);
 +    lw.start();
 +    lw = new LogWriter(process.getInputStream(), new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".out"));
 +    logWriters.add(lw);
 +    lw.start();
 +
 +    return process;
 +  }
 +
 +  private Process _exec(Class<?> clazz, ServerType serverType, String... args) throws IOException {
 +
 +    List<String> jvmOpts = new ArrayList<String>();
 +    jvmOpts.add("-Xmx" + config.getMemory(serverType));
 +
 +    if (config.isJDWPEnabled()) {
 +      Integer port = PortUtils.getRandomFreePort();
 +      jvmOpts.addAll(buildRemoteDebugParams(port));
 +      debugPorts.add(new Pair<ServerType,Integer>(serverType, port));
 +    }
 +    return _exec(clazz, jvmOpts, args);
 +  }
 +
 +  /**
 +   *
 +   * @param dir
 +   *          An empty or nonexistant temp directoy that Accumulo and Zookeeper can store data in. Creating the directory is left to the user. Java 7, Guava,
 +   *          and Junit provide methods for creating temporary directories.
 +   * @param rootPassword
 +   *          Initial root password for instance.
 +   */
 +  public MiniAccumuloClusterImpl(File dir, String rootPassword) throws IOException {
 +    this(new MiniAccumuloConfigImpl(dir, rootPassword));
 +  }
 +
 +  /**
 +   * @param config
 +   *          initial configuration
 +   */
 +  @SuppressWarnings("deprecation")
 +  public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {
 +
 +    this.config = config.initialize();
 +
 +    config.getConfDir().mkdirs();
 +    config.getLogDir().mkdirs();
 +    config.getLibDir().mkdirs();
 +    config.getLibExtDir().mkdirs();
 +
 +    if (!config.useExistingInstance()) {
 +      config.getZooKeeperDir().mkdirs();
 +      config.getWalogDir().mkdirs();
 +      config.getAccumuloDir().mkdirs();
 +    }
 +
 +    if (config.useMiniDFS()) {
 +      File nn = new File(config.getAccumuloDir(), "nn");
 +      nn.mkdirs();
 +      File dn = new File(config.getAccumuloDir(), "dn");
 +      dn.mkdirs();
 +      File dfs = new File(config.getAccumuloDir(), "dfs");
 +      dfs.mkdirs();
 +      Configuration conf = new Configuration();
 +      conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
 +      conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
 +      conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
 +      conf.set("dfs.support.append", "true");
 +      conf.set("dfs.datanode.synconclose", "true");
 +      conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
 +      String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
 +      miniDFS = new MiniDFSCluster(conf, 1, true, null);
 +      if (oldTestBuildData == null)
 +        System.clearProperty("test.build.data");
 +      else
 +        System.setProperty("test.build.data", oldTestBuildData);
 +      miniDFS.waitClusterUp();
 +      InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
 +      dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
 +      File coreFile = new File(config.getConfDir(), "core-site.xml");
 +      writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
 +      File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
 +      writeConfig(hdfsFile, conf);
 +
 +      Map<String,String> siteConfig = config.getSiteConfig();
 +      siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
 +      siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
 +      config.setSiteConfig(siteConfig);
 +    } else if (config.useExistingInstance()) {
 +      dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
 +    } else {
 +      dfsUri = "file://";
 +    }
 +
 +    File clientConfFile = config.getClientConfFile();
 +    // Write only the properties that correspond to ClientConfiguration properties
 +    writeConfigProperties(clientConfFile, Maps.filterEntries(config.getSiteConfig(), new Predicate<Entry<String,String>>() {
 +      @Override
 +      public boolean apply(Entry<String,String> v) {
 +        return ClientConfiguration.ClientProperty.getPropertyByKey(v.getKey()) != null;
 +      }
 +    }));
 +
 +    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
 +    writeConfig(siteFile, config.getSiteConfig().entrySet());
 +
 +    if (!config.useExistingInstance()) {
 +      zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
 +      FileWriter fileWriter = new FileWriter(zooCfgFile);
 +
 +      // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
 +      Properties zooCfg = new Properties();
 +      zooCfg.setProperty("tickTime", "2000");
 +      zooCfg.setProperty("initLimit", "10");
 +      zooCfg.setProperty("syncLimit", "5");
 +      zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
 +      zooCfg.setProperty("maxClientCnxns", "1000");
 +      zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
 +      zooCfg.store(fileWriter, null);
 +
 +      fileWriter.close();
 +    }
 +
 +    // disable audit logging for mini....
 +    InputStream auditStream = this.getClass().getResourceAsStream("/auditLog.xml");
 +
 +    if (auditStream != null) {
 +      FileUtils.copyInputStreamToFile(auditStream, new File(config.getConfDir(), "auditLog.xml"));
 +    }
 +  }
 +
 +  private void writeConfig(File file, Iterable<Map.Entry<String,String>> settings) throws IOException {
 +    FileWriter fileWriter = new FileWriter(file);
 +    fileWriter.append("<configuration>\n");
 +
 +    for (Entry<String,String> entry : settings) {
 +      String value = entry.getValue().replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;");
 +      fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + value + "</value></property>\n");
 +    }
 +    fileWriter.append("</configuration>\n");
 +    fileWriter.close();
 +  }
 +
 +  private void writeConfigProperties(File file, Map<String,String> settings) throws IOException {
 +    FileWriter fileWriter = new FileWriter(file);
 +
 +    for (Entry<String,String> entry : settings.entrySet())
 +      fileWriter.append(entry.getKey() + "=" + entry.getValue() + "\n");
 +    fileWriter.close();
 +  }
 +
 +  /**
 +   * Starts Accumulo and Zookeeper processes. Can only be called once.
 +   *
 +   * @throws IllegalStateException
 +   *           if already started
 +   */
 +  @Override
 +  public synchronized void start() throws IOException, InterruptedException {
 +    if (config.useExistingInstance()) {
 +      Configuration acuConf = config.getAccumuloConfiguration();
 +      Configuration hadoopConf = config.getHadoopConfiguration();
 +
 +      ConfigurationCopy cc = new ConfigurationCopy(acuConf);
 +      VolumeManager fs;
 +      try {
 +        fs = VolumeManagerImpl.get(cc, hadoopConf);
 +      } catch (IOException e) {
 +        throw new RuntimeException(e);
 +      }
 +      Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(fs);
 +
 +      String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, cc, hadoopConf);
 +      IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(cc.get(Property.INSTANCE_ZK_HOST),
 +          (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
 +
 +      String rootPath = ZooUtil.getRoot(instanceIdFromFile);
 +
 +      String instanceName = null;
 +      try {
 +        for (String name : zrw.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
 +          String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
 +          byte[] bytes = zrw.getData(instanceNamePath, new Stat());
-           String iid = new String(bytes, Constants.UTF8);
++          String iid = new String(bytes, UTF_8);
 +          if (iid.equals(instanceIdFromFile)) {
 +            instanceName = name;
 +          }
 +        }
 +      } catch (KeeperException e) {
 +        throw new RuntimeException("Unable to read instance name from zookeeper.", e);
 +      }
 +      if (instanceName == null)
 +        throw new RuntimeException("Unable to read instance name from zookeeper.");
 +
 +      config.setInstanceName(instanceName);
 +      if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath))
 +        throw new RuntimeException("The Accumulo instance being used is already running. Aborting.");
 +    } else {
 +      if (!initialized) {
 +        Runtime.getRuntime().addShutdownHook(new Thread() {
 +          @Override
 +          public void run() {
 +            try {
 +              MiniAccumuloClusterImpl.this.stop();
 +            } catch (IOException e) {
 +              e.printStackTrace();
 +            } catch (InterruptedException e) {
 +              e.printStackTrace();
 +            }
 +          }
 +        });
 +      }
 +
 +      if (zooKeeperProcess == null) {
 +        zooKeeperProcess = _exec(ZooKeeperServerMain.class, ServerType.ZOOKEEPER, zooCfgFile.getAbsolutePath());
 +      }
 +
 +      if (!initialized) {
 +        // sleep a little bit to let zookeeper come up before calling init, seems to work better
 +        long startTime = System.currentTimeMillis();
 +        while (true) {
 +          Socket s = null;
 +          try {
 +            s = new Socket("localhost", config.getZooKeeperPort());
 +            s.getOutputStream().write("ruok\n".getBytes());
 +            s.getOutputStream().flush();
 +            byte buffer[] = new byte[100];
 +            int n = s.getInputStream().read(buffer);
 +            if (n >= 4 && new String(buffer, 0, 4).equals("imok"))
 +              break;
 +          } catch (Exception e) {
 +            if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) {
 +              throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in "
 +                  + config.getLogDir() + " for errors.  Last exception: " + e);
 +            }
 +          } finally {
 +            if (s != null)
 +              s.close();
 +          }
 +        }
 +        Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword());
 +        int ret = initProcess.waitFor();
 +        if (ret != 0) {
 +          throw new RuntimeException("Initialize process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
 +        }
 +        initialized = true;
 +      }
 +    }
 +    
 +    log.info("Starting MAC against instance {} and zookeeper(s) {}.", config.getInstanceName(), config.getZooKeepers());
 +    
 +    synchronized (tabletServerProcesses) {
 +      for (int i = tabletServerProcesses.size(); i < config.getNumTservers(); i++) {
 +        tabletServerProcesses.add(_exec(TabletServer.class, ServerType.TABLET_SERVER));
 +      }
 +    }
 +    int ret = 0;
 +    for (int i = 0; i < 5; i++) {
 +      ret = exec(Main.class, SetGoalState.class.getName(), MasterGoalState.NORMAL.toString()).waitFor();
 +      if (ret == 0)
 +        break;
 +      UtilWaitThread.sleep(1000);
 +    }
 +    if (ret != 0) {
 +      throw new RuntimeException("Could not set master goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
 +    }
 +    if (masterProcess == null) {
 +      masterProcess = _exec(Master.class, ServerType.MASTER);
 +    }
 +
 +    if (gcProcess == null) {
 +      gcProcess = _exec(SimpleGarbageCollector.class, ServerType.GARBAGE_COLLECTOR);
 +    }
 +
 +    if (null == executor) {
 +      executor = Executors.newSingleThreadExecutor();
 +    }
 +  }
 +
 +  private List<String> buildRemoteDebugParams(int port) {
 +    return Arrays.asList(new String[] {"-Xdebug", String.format("-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=%d", port)});
 +  }
 +
 +  /**
 +   * @return generated remote debug ports if in debug mode.
 +   * @since 1.6.0
 +   */
 +  public Set<Pair<ServerType,Integer>> getDebugPorts() {
 +    return debugPorts;
 +  }
 +
 +  List<ProcessReference> references(Process... procs) {
 +    List<ProcessReference> result = new ArrayList<ProcessReference>();
 +    for (Process proc : procs) {
 +      result.add(new ProcessReference(proc));
 +    }
 +    return result;
 +  }
 +
 +  public Map<ServerType,Collection<ProcessReference>> getProcesses() {
 +    Map<ServerType,Collection<ProcessReference>> result = new HashMap<ServerType,Collection<ProcessReference>>();
 +    result.put(ServerType.MASTER, references(masterProcess));
 +    result.put(ServerType.TABLET_SERVER, references(tabletServerProcesses.toArray(new Process[0])));
 +    result.put(ServerType.ZOOKEEPER, references(zooKeeperProcess));
 +    if (null != gcProcess) {
 +      result.put(ServerType.GARBAGE_COLLECTOR, references(gcProcess));
 +    }
 +    return result;
 +  }
 +
 +  public void killProcess(ServerType type, ProcessReference proc) throws ProcessNotFoundException, InterruptedException {
 +    boolean found = false;
 +    switch (type) {
 +      case MASTER:
 +        if (proc.equals(masterProcess)) {
 +          masterProcess.destroy();
 +          masterProcess.waitFor();
 +          masterProcess = null;
 +          found = true;
 +        }
 +        break;
 +      case TABLET_SERVER:
 +        synchronized (tabletServerProcesses) {
 +          for (Process tserver : tabletServerProcesses) {
 +            if (proc.equals(tserver)) {
 +              tabletServerProcesses.remove(tserver);
 +              tserver.destroy();
 +              tserver.waitFor();
 +              found = true;
 +              break;
 +            }
 +          }
 +        }
 +        break;
 +      case ZOOKEEPER:
 +        if (proc.equals(zooKeeperProcess)) {
 +          zooKeeperProcess.destroy();
 +          zooKeeperProcess.waitFor();
 +          zooKeeperProcess = null;
 +          found = true;
 +        }
 +        break;
 +      case GARBAGE_COLLECTOR:
 +        if (proc.equals(gcProcess)) {
 +          gcProcess.destroy();
 +          gcProcess.waitFor();
 +          gcProcess = null;
 +          found = true;
 +        }
 +        break;
 +    }
 +    if (!found)
 +      throw new ProcessNotFoundException();
 +  }
 +
 +  /**
 +   * @return Accumulo instance name
 +   */
 +  @Override
 +  public String getInstanceName() {
 +    return config.getInstanceName();
 +  }
 +
 +  /**
 +   * @return zookeeper connection string
 +   */
 +  @Override
 +  public String getZooKeepers() {
 +    return config.getZooKeepers();
 +  }
 +
 +  /**
 +   * Stops Accumulo and Zookeeper processes. If stop is not called, there is a shutdown hook that is setup to kill the processes. However its probably best to
 +   * call stop in a finally block as soon as possible.
 +   */
 +  @Override
 +  public synchronized void stop() throws IOException, InterruptedException {
 +    if (null == executor) {
 +      // keep repeated calls to stop() from failing
 +      return;
 +    }
 +
 +    for (LogWriter lw : logWriters) {
 +      lw.flush();
 +    }
 +
 +    if (gcProcess != null) {
 +      try {
 +        stopProcessWithTimeout(gcProcess, 30, TimeUnit.SECONDS);
 +      } catch (ExecutionException e) {
 +        log.warn("GarbageCollector did not fully stop after 30 seconds", e);
 +      } catch (TimeoutException e) {
 +        log.warn("GarbageCollector did not fully stop after 30 seconds", e);
 +      }
 +    }
 +    if (masterProcess != null) {
 +      try {
 +        stopProcessWithTimeout(masterProcess, 30, TimeUnit.SECONDS);
 +      } catch (ExecutionException e) {
 +        log.warn("Master did not fully stop after 30 seconds", e);
 +      } catch (TimeoutException e) {
 +        log.warn("Master did not fully stop after 30 seconds", e);
 +      }
 +    }
 +    if (tabletServerProcesses != null) {
 +      synchronized (tabletServerProcesses) {
 +        for (Process tserver : tabletServerProcesses) {
 +          try {
 +            stopProcessWithTimeout(tserver, 30, TimeUnit.SECONDS);
 +          } catch (ExecutionException e) {
 +            log.warn("TabletServer did not fully stop after 30 seconds", e);
 +          } catch (TimeoutException e) {
 +            log.warn("TabletServer did not fully stop after 30 seconds", e);
 +          }
 +        }
 +      }
 +    }
 +    if (zooKeeperProcess != null) {
 +      try {
 +        stopProcessWithTimeout(zooKeeperProcess, 30, TimeUnit.SECONDS);
 +      } catch (ExecutionException e) {
 +        log.warn("ZooKeeper did not fully stop after 30 seconds", e);
 +      } catch (TimeoutException e) {
 +        log.warn("ZooKeeper did not fully stop after 30 seconds", e);
 +      }
 +    }
 +
 +    zooKeeperProcess = null;
 +    masterProcess = null;
 +    gcProcess = null;
 +    tabletServerProcesses.clear();
 +
 +    // ACCUMULO-2985 stop the ExecutorService after we finished using it to stop accumulo procs
 +    if (null != executor) {
 +      List<Runnable> tasksRemaining = executor.shutdownNow();
 +
 +      // the single thread executor shouldn't have any pending tasks, but check anyways
 +      if (!tasksRemaining.isEmpty()) {
 +        log.warn("Unexpectedly had " + tasksRemaining.size() + " task(s) remaining in threadpool for execution when being stopped");
 +      }
 +
 +      executor = null;
 +    }
 +
 +    if (config.useMiniDFS() && miniDFS != null)
 +      miniDFS.shutdown();
 +    for (Process p : cleanup) {
 +      p.destroy();
 +      p.waitFor();
 +    }
 +    miniDFS = null;
 +  }
 +
 +  /**
 +   * @since 1.6.0
 +   */
 +  @Override
 +  public MiniAccumuloConfigImpl getConfig() {
 +    return config;
 +  }
 +
 +  /**
 +   * Utility method to get a connector to the MAC.
 +   *
 +   * @since 1.6.0
 +   */
 +  @Override
 +  public Connector getConnector(String user, String passwd) throws AccumuloException, AccumuloSecurityException {
 +    Instance instance = new ZooKeeperInstance(getClientConfig());
 +    return instance.getConnector(user, new PasswordToken(passwd));
 +  }
 +
 +  @Override
 +  public ClientConfiguration getClientConfig() {
 +    return new ClientConfiguration(Arrays.asList(new MapConfiguration(config.getSiteConfig()))).withInstance(this.getInstanceName()).withZkHosts(
 +        this.getZooKeepers());
 +  }
 +
 +  public FileSystem getFileSystem() {
 +    try {
 +      return FileSystem.get(new URI(dfsUri), new Configuration());
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  // Visible for testing
 +  protected void setShutdownExecutor(ExecutorService svc) {
 +    this.executor = svc;
 +  }
 +
 +  // Visible for testing
 +  protected ExecutorService getShutdownExecutor() {
 +    return executor;
 +  }
 +
 +  private int stopProcessWithTimeout(final Process proc, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
 +    FutureTask<Integer> future = new FutureTask<Integer>(new Callable<Integer>() {
 +      @Override
 +      public Integer call() throws InterruptedException {
 +        proc.destroy();
 +        return proc.waitFor();
 +      }
 +    });
 +
 +    executor.execute(future);
 +
 +    return future.get(timeout, unit);
 +  }
 +
 +  /**
 +   * Get programmatic interface to information available in a normal monitor. XXX the returned structure won't contain information about the metadata table
 +   * until there is data in it. e.g. if you want to see the metadata table you should create a table.
 +   * 
 +   * @since 1.6.1
 +   */
 +  public MasterMonitorInfo getMasterMonitorInfo() throws AccumuloException, AccumuloSecurityException {
 +    MasterClientService.Iface client = null;
 +    MasterMonitorInfo stats = null;
 +    try {
 +      Instance instance = new ZooKeeperInstance(getClientConfig());
 +      client = MasterClient.getConnectionWithRetry(instance);
 +      stats = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get(instance).toThrift(instance));
 +    } catch (ThriftSecurityException exception) {
 +      throw new AccumuloSecurityException(exception);
 +    } catch (TException exception) {
 +      throw new AccumuloException(exception);
 +    } finally {
 +      if (client != null) {
 +        MasterClient.close(client);
 +      }
 +    }
 +    return stats;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index be29dbb,48e2906..30da0cba
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@@ -16,6 -16,6 +16,8 @@@
   */
  package org.apache.accumulo.proxy;
  
++import static com.google.common.base.Charsets.UTF_8;
++
  import java.nio.ByteBuffer;
  import java.util.ArrayList;
  import java.util.Collection;
@@@ -194,13 -172,11 +195,13 @@@ public class ProxyServer implements Acc
    }
    
    protected Connector getConnector(ByteBuffer login) throws Exception {
-     String[] pair = new String(login.array(), login.position(), login.remaining(), Constants.UTF8).split(",", 2);
 -    TCredentials user = CredentialHelper.fromByteArray(ByteBufferUtil.toBytes(login));
 -    if (user == null)
 -      throw new org.apache.accumulo.proxy.thrift.AccumuloSecurityException("unknown user");
 -    Connector connector = instance.getConnector(user.getPrincipal(), CredentialHelper.extractToken(user));
 -    return connector;
++    String[] pair = new String(login.array(), login.position(), login.remaining(), UTF_8).split(",", 2);
 +    if (instance.getInstanceID().equals(pair[0])) {
 +      Credentials creds = Credentials.deserialize(pair[1]);
 +      return instance.getConnector(creds.getPrincipal(), creds.getToken());
 +    } else {
 +      throw new org.apache.accumulo.core.client.AccumuloSecurityException(pair[0], org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.INVALID_INSTANCEID);
 +    }
    }
    
    private void handleAccumuloException(AccumuloException e) throws org.apache.accumulo.proxy.thrift.TableNotFoundException,
@@@ -1489,7 -1432,8 +1490,7 @@@
    public ByteBuffer login(String principal, Map<String,String> loginProperties) throws org.apache.accumulo.proxy.thrift.AccumuloSecurityException, TException {
      try {
        AuthenticationToken token = getToken(principal, loginProperties);
-       ByteBuffer login = ByteBuffer.wrap((instance.getInstanceID() + "," + new Credentials(principal, token).serialize()).getBytes(Constants.UTF8));
 -      TCredentials credential = CredentialHelper.create(principal, token, instance.getInstanceID());
 -      ByteBuffer login = ByteBuffer.wrap(CredentialHelper.asByteArray(credential));
++      ByteBuffer login = ByteBuffer.wrap((instance.getInstanceID() + "," + new Credentials(principal, token).serialize()).getBytes(UTF_8));
        getConnector(login); // check to make sure user exists
        return login;
      } catch (AccumuloSecurityException e) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java
----------------------------------------------------------------------


Mime
View raw message