accumulo-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From keith-turner <...@git.apache.org>
Subject [GitHub] accumulo pull request #106: ACCUMULO-4153: Update the getCodec method to no ...
Date Wed, 01 Jun 2016 19:55:37 GMT
Github user keith-turner commented on a diff in the pull request:

    https://github.com/apache/accumulo/pull/106#discussion_r65430644
  
    --- Diff: core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
---
    @@ -244,44 +361,58 @@ public synchronized OutputStream createCompressionStream(OutputStream
downStream
             } else {
               bos1 = downStream;
             }
    -        conf.setInt("io.compression.codec.snappy.buffersize", 64 * 1024);
    +        // use the default codec
             CompressionOutputStream cos = snappyCodec.createOutputStream(bos1, compressor);
             BufferedOutputStream bos2 = new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
             return bos2;
           }
     
           @Override
    -      public synchronized InputStream createDecompressionStream(InputStream downStream,
Decompressor decompressor, int downStreamBufferSize) throws IOException {
    +      public InputStream createDecompressionStream(InputStream downStream, Decompressor
decompressor, int downStreamBufferSize) throws IOException {
             if (!isSupported()) {
               throw new IOException("SNAPPY codec class not specified. Did you forget to
set property " + CONF_SNAPPY_CLASS + "?");
             }
    -        if (downStreamBufferSize > 0) {
    -          conf.setInt("io.file.buffer.size", downStreamBufferSize);
    +
    +        CompressionCodec decomCodec = snappyCodec;
    +        // if we're not using the same buffer size, we'll pull the codec from the loading
cache
    +        if (DEFAULT_BUFFER_SIZE != downStreamBufferSize) {
    +          Entry<Algorithm,Integer> sizeOpt = Maps.immutableEntry(SNAPPY, downStreamBufferSize);
    +          try {
    +            decomCodec = codecCache.get(sizeOpt);
    +          } catch (ExecutionException e) {
    +            throw new IOException(e);
    +          }
             }
    -        CompressionInputStream cis = snappyCodec.createInputStream(downStream, decompressor);
    +
    +        CompressionInputStream cis = decomCodec.createInputStream(downStream, decompressor);
             BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
             return bis2;
           }
     
           @Override
    -      public synchronized boolean isSupported() {
    -        if (!checked) {
    -          checked = true;
    -          String extClazz = (conf.get(CONF_SNAPPY_CLASS) == null ? System.getProperty(CONF_SNAPPY_CLASS)
: null);
    -          String clazz = (extClazz != null) ? extClazz : defaultClazz;
    -          try {
    -            LOG.info("Trying to load snappy codec class: " + clazz);
    -            snappyCodec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz),
conf);
    -          } catch (ClassNotFoundException e) {
    -            // that is okay
    -          }
    -        }
    +      public boolean isSupported() {
    +
    --- End diff --
    
    since this is no long synchronized.. wondering if snappyCodec should be volatile


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

Mime
View raw message