accumulo-notifications mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <>
Subject [jira] [Commented] (ACCUMULO-4229) BatchWriter Locator cache out-of-sync when shared with tserver
Date Sun, 24 Apr 2016 23:24:12 GMT


ASF GitHub Bot commented on ACCUMULO-4229:

Github user joshelser commented on a diff in the pull request:
    --- Diff: test/src/main/java/org/apache/accumulo/test/ ---
    @@ -0,0 +1,256 @@
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.accumulo.test;
    +import org.apache.accumulo.core.client.AccumuloException;
    +import org.apache.accumulo.core.client.AccumuloSecurityException;
    +import org.apache.accumulo.core.client.BatchWriter;
    +import org.apache.accumulo.core.client.BatchWriterConfig;
    +import org.apache.accumulo.core.client.ClientConfiguration;
    +import org.apache.accumulo.core.client.Connector;
    +import org.apache.accumulo.core.client.Instance;
    +import org.apache.accumulo.core.client.IteratorSetting;
    +import org.apache.accumulo.core.client.MutationsRejectedException;
    +import org.apache.accumulo.core.client.TableNotFoundException;
    +import org.apache.accumulo.core.client.TimedOutException;
    +import org.apache.accumulo.core.client.ZooKeeperInstance;
    +import org.apache.accumulo.core.client.impl.TabletLocator;
    +import org.apache.accumulo.core.iterators.IteratorEnvironment;
    +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
    +import org.apache.accumulo.core.iterators.WrappingIterator;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +import java.util.Collection;
    +import java.util.Map;
    +import java.util.SortedSet;
    +import java.util.TreeSet;
    +import java.util.concurrent.TimeUnit;
    + * Iterator that opens a BatchWriter and writes to another table.
    + * <p>
    + * For each entry passed to this iterator, this writes a certain number of entries with
the same key to another table and passes the entry downstream of this
    + * iterator with its value replaced by either "{@value SUCCESS_STRING}" or a description
of what failed. Success counts as all entries writing to the result
    + * table within a timeout period. Failure counts as one of the entries taking longer
than the timeout period.
    + * <p>
    + * Configure this iterator by calling the static {@link #iteratorSetting} method.
    + */
    +public class BatchWriterIterator extends WrappingIterator {
    +  private static final Logger log = LoggerFactory.getLogger(BatchWriterIterator.class);
    +  private int sleepAfterFirstWrite = 0;
    +  private int numEntriesToWritePerEntry = 10;
    +  private long batchWriterTimeout = 0;
    +  private long batchWriterMaxMemory = 0;
    +  private boolean clearCacheAfterFirstWrite = false;
    +  private boolean splitAfterFirstWrite = false;
    +  public static final String OPT_sleepAfterFirstWrite = "sleepAfterFirstWrite", OPT_numEntriesToWritePerEntry
= "numEntriesToWritePerEntry",
    +      OPT_batchWriterTimeout = "batchWriterTimeout", OPT_batchWriterMaxMemory = "batchWriterMaxMemory",
    +      OPT_clearCacheAfterFirstWrite = "clearCacheAfterFirstWrite", OPT_splitAfterFirstWrite
= "splitAfterFirstWrite";
    +  private String instanceName;
    +  private String tableName;
    +  private String zookeeperHost;
    +  private int zookeeperTimeout = -1;
    +  private String username;
    +  private AuthenticationToken auth = null;
    +  public static final String ZOOKEEPERHOST = "zookeeperHost", INSTANCENAME = "instanceName",
TABLENAME = "tableName", USERNAME = "username",
    +      ZOOKEEPERTIMEOUT = "zookeeperTimeout", AUTHENTICATION_TOKEN = "authenticationToken",
// base64 encoding of token
    +      AUTHENTICATION_TOKEN_CLASS = "authenticationTokenClass"; // class of token
    +  private BatchWriter batchWriter;
    +  private boolean firstWrite = true;
    +  private Value topValue = null;
    +  private Connector connector;
    +  public static final String SUCCESS_STRING = "success";
    +  public static final Value SUCCESS_VALUE = new Value(SUCCESS_STRING.getBytes());
    +  public static IteratorSetting iteratorSetting(int priority, int sleepAfterFirstWrite,
long batchWriterTimeout, long batchWriterMaxMemory,
    +      int numEntriesToWrite, String tableName, Connector connector, AuthenticationToken
token, boolean clearCacheAfterFirstWrite, boolean splitAfterFirstWrite) {
    +    return iteratorSetting(priority, sleepAfterFirstWrite, batchWriterTimeout, batchWriterMaxMemory,
numEntriesToWrite, tableName, connector.getInstance()
    +        .getZooKeepers(), connector.getInstance().getInstanceName(), connector.getInstance().getZooKeepersSessionTimeOut(),
connector.whoami(), token,
    +        clearCacheAfterFirstWrite, splitAfterFirstWrite);
    +  }
    +  public static IteratorSetting iteratorSetting(int priority, int sleepAfterFirstWrite,
long batchWriterTimeout, long batchWriterMaxMemory,
    +      int numEntriesToWrite, String tableName, String zookeeperHost, String instanceName,
int zookeeperTimeout, String username, AuthenticationToken token,
    +      boolean clearCacheAfterFirstWrite, boolean splitAfterFirstWrite) {
    +    IteratorSetting itset = new IteratorSetting(priority, BatchWriterIterator.class);
    +    itset.addOption(OPT_sleepAfterFirstWrite, Integer.toString(sleepAfterFirstWrite));
    +    itset.addOption(OPT_numEntriesToWritePerEntry, Integer.toString(numEntriesToWrite));
    +    itset.addOption(OPT_batchWriterTimeout, Long.toString(batchWriterTimeout));
    +    itset.addOption(OPT_batchWriterMaxMemory, Long.toString(batchWriterMaxMemory));
    +    itset.addOption(OPT_clearCacheAfterFirstWrite, Boolean.toString(clearCacheAfterFirstWrite));
    +    itset.addOption(OPT_splitAfterFirstWrite, Boolean.toString(splitAfterFirstWrite));
    +    itset.addOption(TABLENAME, tableName);
    +    itset.addOption(ZOOKEEPERHOST, zookeeperHost);
    +    itset.addOption(ZOOKEEPERTIMEOUT, Integer.toString(zookeeperTimeout));
    +    itset.addOption(INSTANCENAME, instanceName);
    +    itset.addOption(USERNAME, username);
    +    itset.addOption(AUTHENTICATION_TOKEN_CLASS, token.getClass().getName());
    +    itset.addOption(AUTHENTICATION_TOKEN, SerializationUtil.serializeWritableBase64(token));
    +    return itset;
    +  }
    +  @Override
    +  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String>
options, IteratorEnvironment env) throws IOException {
    +    super.init(source, options, env);
    +    parseOptions(options);
    +    initBatchWriter();
    +  }
    +  private void parseOptions(Map<String,String> options) {
    +    if (options.containsKey(OPT_numEntriesToWritePerEntry))
    +      numEntriesToWritePerEntry = Integer.parseInt(options.get(OPT_numEntriesToWritePerEntry));
    +    if (options.containsKey(OPT_sleepAfterFirstWrite))
    +      sleepAfterFirstWrite = Integer.parseInt(options.get(OPT_sleepAfterFirstWrite));
    +    if (options.containsKey(OPT_batchWriterTimeout))
    +      batchWriterTimeout = Long.parseLong(options.get(OPT_batchWriterTimeout));
    +    if (options.containsKey(OPT_batchWriterMaxMemory))
    +      batchWriterMaxMemory = Long.parseLong(options.get(OPT_batchWriterMaxMemory));
    +    if (options.containsKey(OPT_clearCacheAfterFirstWrite))
    +      clearCacheAfterFirstWrite = Boolean.parseBoolean(options.get(OPT_clearCacheAfterFirstWrite));
    +    if (options.containsKey(OPT_splitAfterFirstWrite))
    +      splitAfterFirstWrite = Boolean.parseBoolean(options.get(OPT_splitAfterFirstWrite));
    +    instanceName = options.get(INSTANCENAME);
    +    tableName = options.get(TABLENAME);
    +    zookeeperHost = options.get(ZOOKEEPERHOST);
    +    zookeeperTimeout = Integer.parseInt(options.get(ZOOKEEPERTIMEOUT));
    +    username = options.get(USERNAME);
    +    String authClass = options.get(AUTHENTICATION_TOKEN_CLASS);
    +    String authString = options.get(AUTHENTICATION_TOKEN);
    +    auth = SerializationUtil.subclassNewInstance(authClass, AuthenticationToken.class);
    +    SerializationUtil.deserializeWritableBase64(auth, authString);
    +  }
    +  private void initBatchWriter() {
    +    ClientConfiguration cc = ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeeperHost).withZkTimeout(zookeeperTimeout);
    +    Instance instance = new ZooKeeperInstance(cc);
    +    try {
    +      connector = instance.getConnector(username, auth);
    +    } catch (AccumuloException e) {
    --- End diff --
    `catch (Exception e)` here too in lieu of multi-catch

> BatchWriter Locator cache out-of-sync when shared with tserver
> --------------------------------------------------------------
>                 Key: ACCUMULO-4229
>                 URL:
>             Project: Accumulo
>          Issue Type: Bug
>          Components: client
>    Affects Versions: 1.6.5, 1.7.1
>            Reporter: Dylan Hutchison
>            Assignee: Dylan Hutchison
> BatchWriters that run a long time have write rates that sometimes mysteriously decrease
after the table it is writing to goes through a major compaction or a split.  The decrease
can be as bad as reducing throughput to 0.
> This was first first mentioned in this [email thread|]
for major compactions.  
> I discovered this in this [email thread|]
for splits.  See the thread for some log messages.
> I turned on TRACE logs and I think I pinned it down: the TabletLocator cached by a BatchWriter
gets out of sync with the static cache of TabletLocators.
> # The TabletServerBatchWriter caches a TabletLocator from the static collection of TabletLocators
when it starts writing.  Suppose it is writing to tablet T1.
> # The TabletServerBatchWriter uses its locally cached TabletLocator inside its `binMutations`
method for its entire lifespan; this cache is never refreshed or updated to sync up with the
static collection of TabletLocators.
> # Every hour, the static collection of TabletLocators clears itself.  The next call to
get a TabletLocator from the static collection allocates a new TabletLocator.  Unfortunately,
the TabletServerBatchWriter does not reflect this change and continues to use the old, locally
cached TabletLocator.
> # Tablet T1 splits into T2 and T3, which closes T1.  As such, it no longer exists and
the tablet server that receives the entries meant to go to T1 all fail to write because T1
is closed.
> # The TabletServerBatchWriter receives the response from the tablet server that all entries
failed to write.  It invalidates the cache of the *new* TabletLocator obtained from the static
collection of TabletLocators.  The old TabletLocator that is cached locally does not get invalidated.
> # The TabletServerBatchWriter re-queues the failed entries and tries to write them to
the same closed tablet T1, because it is still looking up tablets using the old TabletLocator.
> This behavior subsumes the circumstances William wrote about in the thread he mentioned.
 The problem would occur as a result of either splits or major compactions.  It would only
stop the BatchWriter if its entire memory filled up with writes to the same tablet that was
closed as a result of a majc or split; otherwise it would just slow down the BatchWriter by
failing to write some number of entries with every RPC.
> There are a few solutions we can think of.  
> # Not have the MutationWriter inside the TabletServerBatchWriter locally cache TabletLocators.
 I suspect this was done for performance reasons, so it's probably not a good solution. 
> # Have all the MutationWriters clear their cache at the same time the static TabletLocator
cache clears.  We could store a reference to the Map that each MutationWriter has inside a
static synchronized WeakHashMap.  The only time the weak map needs to be accessed is:
> ## When a MutationWriter is constructed (from constructing a TabletServerBatchWriter),
add its new local TabletLocator cache to the weak map.
> ## When the static TabletLocator cache is cleared, also clear every map in the weak map.
> # Another solution is to make the invalidate calls on the local TabletLocator cache rather
than the global static one.  If we go this route we should double check the idea to make sure
it does not impact the correctness of any other pieces of code that use the cache. 
> # Perhaps the simplest solution is to put an extra Boolean variable inside the Locators
indicating whether they are valid.  When they are cleared, their Boolean variables set to
false.  Before a client uses a locator from cache, it checks its Boolean indicator.
> The TimeoutTabletLocator does not help when no timeout is set on the BatchWriter (the
default behavior).

This message was sent by Atlassian JIRA

View raw message