accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [09/32] git commit: Merge branch '1.5.2-SNAPSHOT' into 1.6.1-SNAPSHOT
Date Wed, 28 May 2014 20:51:42 GMT
Merge branch '1.5.2-SNAPSHOT' into 1.6.1-SNAPSHOT

Conflicts:
	core/src/main/java/org/apache/accumulo/core/conf/Property.java
	server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/2bf3ebb3
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/2bf3ebb3
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/2bf3ebb3

Branch: refs/heads/ACCUMULO-378
Commit: 2bf3ebb3db07a0c6b57e311037e467a147faefa0
Parents: adaf146 f654e7f
Author: Josh Elser <elserj@apache.org>
Authored: Fri May 23 15:50:49 2014 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Fri May 23 15:50:49 2014 -0400

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/core/conf/Property.java |  5 ++++-
 .../java/org/apache/accumulo/tserver/log/DfsLogger.java   | 10 ++++++----
 2 files changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/2bf3ebb3/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 47d1817,3d82046..2ade366
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@@ -262,16 -201,19 +262,19 @@@ public enum Property 
    TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "200M", PropertyType.MEMORY, "The amount
of memory to use when sorting logs during recovery."),
    TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN, "Keep copies
of the WALOGs for debugging purposes"),
    TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
 -      "The number of threads for the distributed workq.  These threads are used for copying
failed bulk files."),
 +      "The number of threads for the distributed work queue. These threads are used for
copying failed bulk files."),
    TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
        "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents problems recovering
from sudden system resets."),
- 
+   TSERV_WAL_SYNC_METHOD("tserver.wal.sync.method", "hsync", PropertyType.STRING, "The method
to invoke when sync'ing WALs. HSync will provide " +
+       "resiliency in the face of unexpected power outages, at the cost of speed. If method
is not available, the legacy 'sync' method " +
+       "will be used to ensure backwards compatibility with older Hadoop versions"),
 -  
++ 
    // properties that are specific to logger server behavior
    LOGGER_PREFIX("logger.", null, PropertyType.PREFIX, "Properties in this category affect
the behavior of the write-ahead logger servers"),
 -  LOGGER_DIR("logger.dir.walog", "walogs", PropertyType.PATH,
 -      "The property only needs to be set if upgrading from 1.4 which used to store write-ahead
logs on the local filesystem. In 1.5 write-ahead logs are "
 -          + "stored in DFS.  When 1.5 is started for the first time it will copy any 1.4
write ahead logs into DFS.  It is possible to specify a "
 -          + "comma-separated list of directories."),
 +  LOGGER_DIR("logger.dir.walog", "walogs", PropertyType.PATH, "This property is only needed
if Accumulo was upgraded from a 1.4 or earlier version. "
 +      + "In the upgrade to 1.5 this property is used to copy any earlier write ahead logs
into DFS. "
 +      + "In 1.6+, this property is used by the LocalWALRecovery utility in the event that
something went wrong with that earlier upgrade. "
 +      + "It is possible to specify a comma-separated list of directories."),
  
    // accumulo garbage collector properties
    GC_PREFIX("gc.", null, PropertyType.PREFIX, "Properties in this category affect the behavior
of the accumulo garbage collector."),

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2bf3ebb3/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
----------------------------------------------------------------------
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index 61c1059,0000000..71dccc0
mode 100644,000000..100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@@ -1,557 -1,0 +1,559 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.tserver.log;
 +
 +import static org.apache.accumulo.tserver.logger.LogEvents.COMPACTION_FINISH;
 +import static org.apache.accumulo.tserver.logger.LogEvents.COMPACTION_START;
 +import static org.apache.accumulo.tserver.logger.LogEvents.DEFINE_TABLET;
 +import static org.apache.accumulo.tserver.logger.LogEvents.MANY_MUTATIONS;
 +import static org.apache.accumulo.tserver.logger.LogEvents.OPEN;
 +
 +import java.io.DataInputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.io.OutputStream;
 +import java.lang.reflect.InvocationTargetException;
 +import java.lang.reflect.Method;
 +import java.nio.channels.ClosedChannelException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.CountDownLatch;
 +import java.util.concurrent.LinkedBlockingQueue;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.security.crypto.CryptoModule;
 +import org.apache.accumulo.core.security.crypto.CryptoModuleFactory;
 +import org.apache.accumulo.core.security.crypto.CryptoModuleParameters;
 +import org.apache.accumulo.core.security.crypto.DefaultCryptoModule;
 +import org.apache.accumulo.core.security.crypto.NoFlushOutputStream;
 +import org.apache.accumulo.core.util.Daemon;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.StringUtil;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.accumulo.server.master.state.TServerInstance;
 +import org.apache.accumulo.tserver.TabletMutations;
 +import org.apache.accumulo.tserver.logger.LogFileKey;
 +import org.apache.accumulo.tserver.logger.LogFileValue;
 +import org.apache.hadoop.fs.FSDataInputStream;
 +import org.apache.hadoop.fs.FSDataOutputStream;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * Wrap a connection to a logger.
 + * 
 + */
 +public class DfsLogger {
 +  // Package private so that LogSorter can find this
 +  static final String LOG_FILE_HEADER_V2 = "--- Log File Header (v2) ---";
 +  static final String LOG_FILE_HEADER_V3 = "--- Log File Header (v3) ---";
 +
 +  private static Logger log = Logger.getLogger(DfsLogger.class);
 +
 +  public static class LogClosedException extends IOException {
 +    private static final long serialVersionUID = 1L;
 +
 +    public LogClosedException() {
 +      super("LogClosed");
 +    }
 +  }
 +
 +  public static class DFSLoggerInputStreams {
 +
 +    private FSDataInputStream originalInput;
 +    private DataInputStream decryptingInputStream;
 +
 +    public DFSLoggerInputStreams(FSDataInputStream originalInput, DataInputStream decryptingInputStream)
{
 +      this.originalInput = originalInput;
 +      this.decryptingInputStream = decryptingInputStream;
 +    }
 +
 +    public FSDataInputStream getOriginalInput() {
 +      return originalInput;
 +    }
 +
 +    public void setOriginalInput(FSDataInputStream originalInput) {
 +      this.originalInput = originalInput;
 +    }
 +
 +    public DataInputStream getDecryptingInputStream() {
 +      return decryptingInputStream;
 +    }
 +
 +    public void setDecryptingInputStream(DataInputStream decryptingInputStream) {
 +      this.decryptingInputStream = decryptingInputStream;
 +    }
 +  }
 +
 +  public interface ServerResources {
 +    AccumuloConfiguration getConfiguration();
 +
 +    VolumeManager getFileSystem();
 +
 +    Set<TServerInstance> getCurrentTServers();
 +  }
 +
 +  private final LinkedBlockingQueue<DfsLogger.LogWork> workQueue = new LinkedBlockingQueue<DfsLogger.LogWork>();
 +
 +  private final Object closeLock = new Object();
 +
 +  private static final DfsLogger.LogWork CLOSED_MARKER = new DfsLogger.LogWork(null);
 +
 +  private static final LogFileValue EMPTY = new LogFileValue();
 +
 +  private boolean closed = false;
 +
 +  private class LogSyncingTask implements Runnable {
 +
 +    @Override
 +    public void run() {
 +      ArrayList<DfsLogger.LogWork> work = new ArrayList<DfsLogger.LogWork>();
 +      while (true) {
 +        work.clear();
 +
 +        try {
 +          work.add(workQueue.take());
 +        } catch (InterruptedException ex) {
 +          continue;
 +        }
 +        workQueue.drainTo(work);
 +
 +        synchronized (closeLock) {
 +          if (!closed) {
 +            try {
 +              sync.invoke(logFile);
 +            } catch (Exception ex) {
 +              log.warn("Exception syncing " + ex);
 +              for (DfsLogger.LogWork logWork : work) {
 +                logWork.exception = ex;
 +              }
 +            }
 +          } else {
 +            for (DfsLogger.LogWork logWork : work) {
 +              logWork.exception = new LogClosedException();
 +            }
 +          }
 +        }
 +
 +        boolean sawClosedMarker = false;
 +        for (DfsLogger.LogWork logWork : work)
 +          if (logWork == CLOSED_MARKER)
 +            sawClosedMarker = true;
 +          else
 +            logWork.latch.countDown();
 +
 +        if (sawClosedMarker) {
 +          synchronized (closeLock) {
 +            closeLock.notifyAll();
 +          }
 +          break;
 +        }
 +      }
 +    }
 +  }
 +
 +  static class LogWork {
 +    CountDownLatch latch;
 +    volatile Exception exception;
 +
 +    public LogWork(CountDownLatch latch) {
 +      this.latch = latch;
 +    }
 +  }
 +
 +  public static class LoggerOperation {
 +    private final LogWork work;
 +
 +    public LoggerOperation(LogWork work) {
 +      this.work = work;
 +    }
 +
 +    public void await() throws IOException {
 +      try {
 +        work.latch.await();
 +      } catch (InterruptedException e) {
 +        throw new RuntimeException(e);
 +      }
 +
 +      if (work.exception != null) {
 +        if (work.exception instanceof IOException)
 +          throw (IOException) work.exception;
 +        else if (work.exception instanceof RuntimeException)
 +          throw (RuntimeException) work.exception;
 +        else
 +          throw new RuntimeException(work.exception);
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public boolean equals(Object obj) {
 +    // filename is unique
 +    if (obj == null)
 +      return false;
 +    if (obj instanceof DfsLogger)
 +      return getFileName().equals(((DfsLogger) obj).getFileName());
 +    return false;
 +  }
 +
 +  @Override
 +  public int hashCode() {
 +    // filename is unique
 +    return getFileName().hashCode();
 +  }
 +
 +  private final ServerResources conf;
 +  private FSDataOutputStream logFile;
 +  private DataOutputStream encryptingLogFile = null;
 +  private Method sync;
 +  private String logPath;
 +
 +  public DfsLogger(ServerResources conf) throws IOException {
 +    this.conf = conf;
 +  }
 +
 +  public DfsLogger(ServerResources conf, String filename) throws IOException {
 +    this.conf = conf;
 +    this.logPath = filename;
 +  }
 +
 +  public static DFSLoggerInputStreams readHeaderAndReturnStream(VolumeManager fs, Path path,
AccumuloConfiguration conf) throws IOException {
 +    FSDataInputStream input = fs.open(path);
 +    DataInputStream decryptingInput = null;
 +
 +    byte[] magic = DfsLogger.LOG_FILE_HEADER_V3.getBytes();
 +    byte[] magicBuffer = new byte[magic.length];
 +    input.readFully(magicBuffer);
 +    if (Arrays.equals(magicBuffer, magic)) {
 +      // additional parameters it needs from the underlying stream.
 +      String cryptoModuleClassname = input.readUTF();
 +      CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(cryptoModuleClassname);
 +
 +      // Create the parameters and set the input stream into those parameters
 +      CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
 +      params.setEncryptedInputStream(input);
 +
 +      // Create the plaintext input stream from the encrypted one
 +      params = cryptoModule.getDecryptingInputStream(params);
 +
 +      if (params.getPlaintextInputStream() instanceof DataInputStream) {
 +        decryptingInput = (DataInputStream) params.getPlaintextInputStream();
 +      } else {
 +        decryptingInput = new DataInputStream(params.getPlaintextInputStream());
 +      }
 +    } else {
 +      input.seek(0);
 +      byte[] magicV2 = DfsLogger.LOG_FILE_HEADER_V2.getBytes();
 +      byte[] magicBufferV2 = new byte[magicV2.length];
 +      input.readFully(magicBufferV2);
 +
 +      if (Arrays.equals(magicBufferV2, magicV2)) {
 +        // Log files from 1.5 dump their options in raw to the logger files. Since we don't
know the class
 +        // that needs to read those files, we can make a couple of basic assumptions. Either
it's going to be
 +        // the NullCryptoModule (no crypto) or the DefaultCryptoModule.
 +
 +        // If it's null, we won't have any parameters whatsoever. First, let's attempt to
read
 +        // parameters
 +        Map<String,String> opts = new HashMap<String,String>();
 +        int count = input.readInt();
 +        for (int i = 0; i < count; i++) {
 +          String key = input.readUTF();
 +          String value = input.readUTF();
 +          opts.put(key, value);
 +        }
 +
 +        if (opts.size() == 0) {
 +          // NullCryptoModule, we're done
 +          decryptingInput = input;
 +        } else {
 +
 +          // The DefaultCryptoModule will want to read the parameters from the underlying
file, so we will put the file back to that spot.
 +          org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
 +              .getCryptoModule(DefaultCryptoModule.class.getName());
 +
 +          CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
 +
 +          input.seek(0);
 +          input.readFully(magicBufferV2);
 +          params.setEncryptedInputStream(input);
 +
 +          params = cryptoModule.getDecryptingInputStream(params);
 +          if (params.getPlaintextInputStream() instanceof DataInputStream) {
 +            decryptingInput = (DataInputStream) params.getPlaintextInputStream();
 +          } else {
 +            decryptingInput = new DataInputStream(params.getPlaintextInputStream());
 +          }
 +        }
 +
 +      } else {
 +
 +        input.seek(0);
 +        decryptingInput = input;
 +      }
 +
 +    }
 +    return new DFSLoggerInputStreams(input, decryptingInput);
 +  }
 +
 +  public synchronized void open(String address) throws IOException {
 +    String filename = UUID.randomUUID().toString();
 +    String logger = StringUtil.join(Arrays.asList(address.split(":")), "+");
 +
 +    log.debug("DfsLogger.open() begin");
 +    VolumeManager fs = conf.getFileSystem();
 +
 +    logPath = fs.choose(ServerConstants.getWalDirs()) + "/" + logger + "/" + filename;
 +    try {
 +      short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
 +      if (replication == 0)
 +        replication = fs.getDefaultReplication(new Path(logPath));
 +      long blockSize = conf.getConfiguration().getMemoryInBytes(Property.TSERV_WAL_BLOCKSIZE);
 +      if (blockSize == 0)
 +        blockSize = (long) (conf.getConfiguration().getMemoryInBytes(Property.TSERV_WALOG_MAX_SIZE)
* 1.1);
 +      if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
 +        logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
 +      else
 +        logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
 +
++      String syncMethod = conf.getConfiguration().get(Property.TSERV_WAL_SYNC_METHOD);
 +      try {
 +        NoSuchMethodException e = null;
 +        try {
-           // sync: send data to datanodes
-           sync = logFile.getClass().getMethod("sync");
++          // hsync: send data to datanodes and sync the data to disk
++          sync = logFile.getClass().getMethod(syncMethod);
 +        } catch (NoSuchMethodException ex) {
++          log.warn("Could not find configured " + syncMethod + " method, trying to fall
back to old Hadoop sync method", ex);
 +          e = ex;
 +        }
 +        try {
-           // hsync: send data to datanodes and sync the data to disk
-           sync = logFile.getClass().getMethod("hsync");
++          // sync: send data to datanodes
++          sync = logFile.getClass().getMethod("sync");
 +          e = null;
 +        } catch (NoSuchMethodException ex) {}
 +        if (e != null)
 +          throw new RuntimeException(e);
 +      } catch (Exception e) {
 +        throw new RuntimeException(e);
 +      }
 +
 +      // Initialize the crypto operations.
 +      org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf
 +          .getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
 +
 +      // Initialize the log file with a header and the crypto params used to set up this
log file.
 +      logFile.write(LOG_FILE_HEADER_V3.getBytes(Constants.UTF8));
 +
 +      CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
 +
 +      NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
 +      params.setPlaintextOutputStream(nfos);
 +
 +      // In order to bootstrap the reading of this file later, we have to record the CryptoModule
that was used to encipher it here,
 +      // so that that crypto module can re-read its own parameters.
 +
 +      logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
 +
 +      params = cryptoModule.getEncryptingOutputStream(params);
 +      OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
 +
 +      // If the module just kicks back our original stream, then just use it, don't wrap
it in
 +      // another data OutputStream.
 +      if (encipheringOutputStream == nfos) {
 +        log.debug("No enciphering, using raw output stream");
 +        encryptingLogFile = nfos;
 +      } else {
 +        log.debug("Enciphering found, wrapping in DataOutputStream");
 +        encryptingLogFile = new DataOutputStream(encipheringOutputStream);
 +      }
 +
 +      LogFileKey key = new LogFileKey();
 +      key.event = OPEN;
 +      key.tserverSession = filename;
 +      key.filename = filename;
 +      write(key, EMPTY);
 +      sync.invoke(logFile);
 +      log.debug("Got new write-ahead log: " + this);
 +    } catch (Exception ex) {
 +      if (logFile != null)
 +        logFile.close();
 +      logFile = null;
 +      encryptingLogFile = null;
 +      throw new IOException(ex);
 +    }
 +
 +    Thread t = new Daemon(new LogSyncingTask());
 +    t.setName("Accumulo WALog thread " + toString());
 +    t.start();
 +  }
 +
 +  @Override
 +  public String toString() {
 +    String fileName = getFileName();
 +    if (fileName.contains(":"))
 +      return getLogger() + "/" + getFileName();
 +    return fileName;
 +  }
 +
 +  public String getFileName() {
 +    return logPath.toString();
 +  }
 +
 +  public void close() throws IOException {
 +
 +    synchronized (closeLock) {
 +      if (closed)
 +        return;
 +      // after closed is set to true, nothing else should be added to the queue
 +      // CLOSED_MARKER should be the last thing on the queue, therefore when the
 +      // background thread sees the marker and exits there should be nothing else
 +      // to process... so nothing should be left waiting for the background
 +      // thread to do work
 +      closed = true;
 +      workQueue.add(CLOSED_MARKER);
 +      while (!workQueue.isEmpty())
 +        try {
 +          closeLock.wait();
 +        } catch (InterruptedException e) {
 +          log.info("Interrupted");
 +        }
 +    }
 +
 +    if (encryptingLogFile != null)
 +      try {
 +        logFile.close();
 +      } catch (IOException ex) {
 +        log.error(ex);
 +        throw new LogClosedException();
 +      }
 +  }
 +
 +  public synchronized void defineTablet(int seq, int tid, KeyExtent tablet) throws IOException
{
 +    // write this log to the METADATA table
 +    final LogFileKey key = new LogFileKey();
 +    key.event = DEFINE_TABLET;
 +    key.seq = seq;
 +    key.tid = tid;
 +    key.tablet = tablet;
 +    try {
 +      write(key, EMPTY);
 +      sync.invoke(logFile);
 +    } catch (IllegalArgumentException e) {
 +      log.error("Signature of sync method changed. Accumulo is likely incompatible with
this version of Hadoop.");
 +      throw new RuntimeException(e);
 +    } catch (IllegalAccessException e) {
 +      log.error("Could not invoke sync method due to permission error.");
 +      throw new RuntimeException(e);
 +    } catch (InvocationTargetException e) {
 +      Throwable cause = e.getCause();
 +      if (cause instanceof IOException) {
 +        throw (IOException) cause;
 +      } else if (cause instanceof RuntimeException) {
 +        throw (RuntimeException) cause;
 +      } else if (cause instanceof Error) {
 +        throw (Error) cause;
 +      } else {
 +        // Cause is null, or some other checked exception that was added later.
 +        throw new RuntimeException(e);
 +      }
 +    }
 +  }
 +
 +  private synchronized void write(LogFileKey key, LogFileValue value) throws IOException
{
 +    key.write(encryptingLogFile);
 +    value.write(encryptingLogFile);
 +    encryptingLogFile.flush();
 +  }
 +
 +  public LoggerOperation log(int seq, int tid, Mutation mutation) throws IOException {
 +    return logManyTablets(Collections.singletonList(new TabletMutations(tid, seq, Collections.singletonList(mutation))));
 +  }
 +
 +  private LoggerOperation logFileData(List<Pair<LogFileKey,LogFileValue>> keys)
throws IOException {
 +    DfsLogger.LogWork work = new DfsLogger.LogWork(new CountDownLatch(1));
 +    synchronized (DfsLogger.this) {
 +      try {
 +        for (Pair<LogFileKey,LogFileValue> pair : keys) {
 +          write(pair.getFirst(), pair.getSecond());
 +        }
 +      } catch (ClosedChannelException ex) {
 +        throw new LogClosedException();
 +      } catch (Exception e) {
 +        log.error(e, e);
 +        work.exception = e;
 +      }
 +    }
 +
 +    synchronized (closeLock) {
 +      // use a different lock for close check so that adding to work queue does not need
 +      // to wait on walog I/O operations
 +
 +      if (closed)
 +        throw new LogClosedException();
 +      workQueue.add(work);
 +    }
 +
 +    return new LoggerOperation(work);
 +  }
 +
 +  public LoggerOperation logManyTablets(List<TabletMutations> mutations) throws IOException
{
 +    List<Pair<LogFileKey,LogFileValue>> data = new ArrayList<Pair<LogFileKey,LogFileValue>>();
 +    for (TabletMutations tabletMutations : mutations) {
 +      LogFileKey key = new LogFileKey();
 +      key.event = MANY_MUTATIONS;
 +      key.seq = tabletMutations.getSeq();
 +      key.tid = tabletMutations.getTid();
 +      LogFileValue value = new LogFileValue();
 +      value.mutations = tabletMutations.getMutations();
 +      data.add(new Pair<LogFileKey,LogFileValue>(key, value));
 +    }
 +    return logFileData(data);
 +  }
 +
 +  public LoggerOperation minorCompactionFinished(int seq, int tid, String fqfn) throws IOException
{
 +    LogFileKey key = new LogFileKey();
 +    key.event = COMPACTION_FINISH;
 +    key.seq = seq;
 +    key.tid = tid;
 +    return logFileData(Collections.singletonList(new Pair<LogFileKey,LogFileValue>(key,
EMPTY)));
 +  }
 +
 +  public LoggerOperation minorCompactionStarted(int seq, int tid, String fqfn) throws IOException
{
 +    LogFileKey key = new LogFileKey();
 +    key.event = COMPACTION_START;
 +    key.seq = seq;
 +    key.tid = tid;
 +    key.filename = fqfn;
 +    return logFileData(Collections.singletonList(new Pair<LogFileKey,LogFileValue>(key,
EMPTY)));
 +  }
 +
 +  public String getLogger() {
 +    String parts[] = logPath.split("/");
 +    return StringUtil.join(Arrays.asList(parts[parts.length - 2].split("[+]")), ":");
 +  }
 +
 +}


Mime
View raw message