tephra-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From poornachandra <...@git.apache.org>
Subject [GitHub] incubator-tephra pull request #67: TEPHRA-272 Add HBase 2.0 compatibility mo...
Date Tue, 09 Jan 2018 13:39:50 GMT
Github user poornachandra commented on a diff in the pull request:

    https://github.com/apache/incubator-tephra/pull/67#discussion_r160405760
  
    --- Diff: tephra-hbase-compat-2.0/src/test/java/org/apache/tephra/hbase/coprocessor/TransactionProcessorTest.java
---
    @@ -0,0 +1,677 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *      http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.tephra.hbase.coprocessor;
    +
    +import com.google.common.collect.ImmutableSortedMap;
    +import com.google.common.collect.Lists;
    +import com.google.common.collect.Maps;
    +
    +import it.unimi.dsi.fastutil.longs.LongArrayList;
    +
    +import org.apache.hadoop.conf.Configuration;
    +import org.apache.hadoop.fs.FileSystem;
    +import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.hbase.Cell;
    +import org.apache.hadoop.hbase.CellUtil;
    +import org.apache.hadoop.hbase.ChoreService;
    +import org.apache.hadoop.hbase.HBaseConfiguration;
    +import org.apache.hadoop.hbase.HColumnDescriptor;
    +import org.apache.hadoop.hbase.HConstants;
    +import org.apache.hadoop.hbase.HRegionInfo;
    +import org.apache.hadoop.hbase.HTableDescriptor;
    +import org.apache.hadoop.hbase.KeyValue;
    +import org.apache.hadoop.hbase.MockRegionServerServices;
    +import org.apache.hadoop.hbase.ServerName;
    +import org.apache.hadoop.hbase.TableName;
    +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    +import org.apache.hadoop.hbase.client.Delete;
    +import org.apache.hadoop.hbase.client.Put;
    +import org.apache.hadoop.hbase.client.Scan;
    +import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    +import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
    +import org.apache.hadoop.hbase.regionserver.ChunkCreator;
    +import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
    +import org.apache.hadoop.hbase.regionserver.HRegion;
    +import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
    +import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
    +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
    +import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
    +import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
    +import org.apache.hadoop.hbase.regionserver.RegionScanner;
    +import org.apache.hadoop.hbase.regionserver.ScanType;
    +import org.apache.hadoop.hbase.util.Bytes;
    +import org.apache.hadoop.hbase.util.FSUtils;
    +import org.apache.hadoop.hbase.wal.WAL;
    +import org.apache.hadoop.hbase.wal.WALFactory;
    +import org.apache.hadoop.hdfs.MiniDFSCluster;
    +import org.apache.tephra.Transaction;
    +import org.apache.tephra.TransactionManager;
    +import org.apache.tephra.TxConstants;
    +import org.apache.tephra.coprocessor.TransactionStateCache;
    +import org.apache.tephra.coprocessor.TransactionStateCacheSupplier;
    +import org.apache.tephra.manager.InvalidTxList;
    +import org.apache.tephra.metrics.TxMetricsCollector;
    +import org.apache.tephra.persist.HDFSTransactionStateStorage;
    +import org.apache.tephra.persist.TransactionSnapshot;
    +import org.apache.tephra.persist.TransactionVisibilityState;
    +import org.apache.tephra.snapshot.DefaultSnapshotCodec;
    +import org.apache.tephra.snapshot.SnapshotCodecProvider;
    +import org.apache.tephra.util.TxUtils;
    +import org.junit.AfterClass;
    +import org.junit.Assert;
    +import org.junit.BeforeClass;
    +import org.junit.ClassRule;
    +import org.junit.Test;
    +import org.junit.rules.TemporaryFolder;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import java.io.IOException;
    +import java.lang.management.ManagementFactory;
    +import java.net.InetAddress;
    +import java.util.ArrayList;
    +import java.util.Collections;
    +import java.util.HashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.TreeMap;
    +import java.util.concurrent.TimeUnit;
    +
    +import static org.junit.Assert.assertArrayEquals;
    +import static org.junit.Assert.assertEquals;
    +import static org.junit.Assert.assertFalse;
    +import static org.junit.Assert.assertNotNull;
    +import static org.junit.Assert.assertTrue;
    +
    +/**
    + * Tests filtering of invalid transaction data by the {@link TransactionProcessor} coprocessor.
    + */
    +public class TransactionProcessorTest {
    +  private static final Logger LOG = LoggerFactory.getLogger(TransactionProcessorTest.class);
    +  protected static ChunkCreator chunkCreator;
    +  // 8 versions, 1 hour apart, latest is current ts.
    +  private static final long[] V;
    +
    +  static {
    +    long now = System.currentTimeMillis();
    +    V = new long[9];
    +    for (int i = 0; i < V.length; i++) {
    +      V[i] = (now - TimeUnit.HOURS.toMillis(8 - i)) * TxConstants.MAX_TX_PER_MS;
    +    }
    +  }
    +
    +  @ClassRule
    +  public static TemporaryFolder tmpFolder = new TemporaryFolder();
    +  private static MiniDFSCluster dfsCluster;
    +  private static Configuration conf;
    +  private static LongArrayList invalidSet = new LongArrayList(new long[]{V[3], V[5],
V[7]});
    +  private static TransactionVisibilityState txVisibilityState;
    +
    +  @BeforeClass
    +  public static void setupBeforeClass() throws Exception {
    +    Configuration hConf = new Configuration();
    +    String rootDir = tmpFolder.newFolder().getAbsolutePath();
    +    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, rootDir);
    +    hConf.set(HConstants.HBASE_DIR, rootDir + "/hbase");
    +
    +    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    +    dfsCluster.waitActive();
    +    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());
    +
    +    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    +    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    +    String localTestDir = tmpFolder.newFolder().getAbsolutePath();
    +    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    +    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());
    +
    +    // write an initial transaction snapshot
    +    InvalidTxList invalidTxList = new InvalidTxList();
    +    invalidTxList.addAll(invalidSet);
    +    TransactionSnapshot txSnapshot = TransactionSnapshot.copyFrom(
    +        System.currentTimeMillis(), V[6] - 1, V[7], invalidTxList,
    +        // this will set visibility upper bound to V[6]
    +        Maps.newTreeMap(ImmutableSortedMap.of(V[6], new TransactionManager.InProgressTx(
    +          V[6] - 1, Long.MAX_VALUE, TransactionManager.InProgressType.SHORT))),
    +        new HashMap<Long, TransactionManager.ChangeSet>(), new TreeMap<Long,
TransactionManager.ChangeSet>());
    +    txVisibilityState = new TransactionSnapshot(txSnapshot.getTimestamp(), txSnapshot.getReadPointer(),
    +                                                txSnapshot.getWritePointer(), txSnapshot.getInvalid(),
    +                                                txSnapshot.getInProgress());
    +    HDFSTransactionStateStorage tmpStorage =
    +      new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector());
    +    tmpStorage.startAndWait();
    +    tmpStorage.writeSnapshot(txSnapshot);
    +    tmpStorage.stopAndWait();
    +    long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
    +            .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
    +    chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false,
    +            globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null);
    +    assertTrue(chunkCreator != null);
    +  }
    +
    +  @AfterClass
    +  public static void shutdownAfterClass() throws Exception {
    +    dfsCluster.shutdown();
    +  }
    +
    +  @Test
    +  public void testDataJanitorRegionScanner() throws Exception {
    +    String tableName = "TestRegionScanner";
    +    byte[] familyBytes = Bytes.toBytes("f");
    +    byte[] columnBytes = Bytes.toBytes("c");
    +    HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3));
    +    try {
    +      region.initialize();
    +      TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    +      LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
    +
    +      for (int i = 1; i <= 8; i++) {
    +        for (int k = 1; k <= i; k++) {
    +          Put p = new Put(Bytes.toBytes(i));
    +          p.addColumn(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k]));
    +          region.put(p);
    +        }
    +      }
    +
    +      List<Cell> results = Lists.newArrayList();
    +
    +      // force a flush to clear the data
    +      // during flush, the coprocessor should drop all KeyValues with timestamps in the
invalid set
    +
    +      LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    +      FlushResultImpl flushResult = region.flushcache(true, false, new FlushLifeCycleTracker()
{ });
    +      Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded());
    +
    +      // now a normal scan should only return the valid rows
    +      // do not use a filter here to test that cleanup works on flush
    +      Scan scan = new Scan();
    +      scan.setMaxVersions(10);
    +      RegionScanner regionScanner = region.getScanner(scan);
    +
    +      // first returned value should be "4" with version "4"
    +      results.clear();
    +      assertTrue(regionScanner.next(results));
    +      assertKeyValueMatches(results, 4, new long[]{V[4]});
    +
    +      results.clear();
    +      assertTrue(regionScanner.next(results));
    +      assertKeyValueMatches(results, 5, new long[] {V[4]});
    +
    +      results.clear();
    +      assertTrue(regionScanner.next(results));
    +      assertKeyValueMatches(results, 6, new long[]{V[6], V[4]});
    +
    +      results.clear();
    +      assertTrue(regionScanner.next(results));
    +      assertKeyValueMatches(results, 7, new long[]{V[6], V[4]});
    +
    +      results.clear();
    +      assertFalse(regionScanner.next(results));
    +      assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]});
    +    } finally {
    +      //region.close();
    --- End diff --
    
    Any reason for commenting out the `region.close()` here and in other places of this file?


---

Mime
View raw message