ignite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From agoncha...@apache.org
Subject [05/11] ignite git commit: IGNITE-5267 - Moved WAL system properties to WAL configuration
Date Thu, 08 Jun 2017 16:11:23 GMT
http://git-wip-us.apache.org/repos/asf/ignite/blob/cd4d0400/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/WalRecoveryTxLogicalRecordsTest.java
----------------------------------------------------------------------
diff --git a/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/WalRecoveryTxLogicalRecordsTest.java b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/WalRecoveryTxLogicalRecordsTest.java
deleted file mode 100644
index fa53723..0000000
--- a/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/WalRecoveryTxLogicalRecordsTest.java
+++ /dev/null
@@ -1,951 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.database.db.file;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReferenceArray;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.IgniteDataStreamer;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.CacheAtomicityMode;
-import org.apache.ignite.cache.CacheRebalanceMode;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.cache.query.QueryCursor;
-import org.apache.ignite.cache.query.SqlFieldsQuery;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.apache.ignite.configuration.BinaryConfiguration;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.configuration.MemoryConfiguration;
-import org.apache.ignite.configuration.PersistentStoreConfiguration;
-import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.pagemem.PageIdAllocator;
-import org.apache.ignite.internal.pagemem.store.PageStore;
-import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
-import org.apache.ignite.internal.processors.cache.GridCacheContext;
-import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager;
-import org.apache.ignite.internal.processors.cache.IgniteRebalanceIterator;
-import org.apache.ignite.internal.processors.cache.database.CacheDataRow;
-import org.apache.ignite.internal.processors.cache.database.GridCacheDatabaseSharedManager;
-import org.apache.ignite.internal.processors.cache.database.file.FilePageStoreManager;
-import org.apache.ignite.internal.processors.cache.database.freelist.FreeListImpl;
-import org.apache.ignite.internal.processors.cache.database.freelist.PagesList;
-import org.apache.ignite.internal.processors.cache.database.tree.reuse.ReuseListImpl;
-import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.internal.util.typedef.internal.CU;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.apache.ignite.transactions.Transaction;
-import org.junit.Assert;
-
-/**
- *
- */
-public class WalRecoveryTxLogicalRecordsTest extends GridCommonAbstractTest {
-    /** Cache name. */
-    private static final String CACHE_NAME = "cache";
-
-    /** Cache 2 name. */
-    private static final String CACHE2_NAME = "cache2";
-
-    /** */
-    public static final int PARTS = 32;
-
-    /** */
-    public static final int WAL_HIST_SIZE = 30;
-
-    /** */
-    private int pageSize = 4 * 1024;
-
-    /** */
-    private CacheConfiguration extraCcfg;
-
-    /** */
-    private Long checkpointFreq;
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        CacheConfiguration<Integer, IndexedValue> ccfg = new CacheConfiguration<>(CACHE_NAME);
-
-        ccfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL);
-        ccfg.setRebalanceMode(CacheRebalanceMode.SYNC);
-        ccfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
-        ccfg.setIndexedTypes(Integer.class, IndexedValue.class);
-
-        if (extraCcfg != null)
-            cfg.setCacheConfiguration(ccfg, new CacheConfiguration<>(extraCcfg));
-        else
-            cfg.setCacheConfiguration(ccfg);
-
-        MemoryConfiguration dbCfg = new MemoryConfiguration();
-
-        dbCfg.setPageSize(pageSize);
-
-        cfg.setMemoryConfiguration(dbCfg);
-
-        PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration();
-
-        pCfg.setWalHistorySize(WAL_HIST_SIZE);
-
-        if (checkpointFreq != null)
-            pCfg.setCheckpointingFrequency(checkpointFreq);
-
-        cfg.setPersistentStoreConfiguration(pCfg);
-
-        cfg.setMarshaller(null);
-
-        BinaryConfiguration binCfg = new BinaryConfiguration();
-
-        binCfg.setCompactFooter(false);
-
-        cfg.setBinaryConfiguration(binCfg);
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false));
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false));
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testWalTxSimple() throws Exception {
-        Ignite ignite = startGrid();
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.enableCheckpoints(false).get();
-
-            IgniteCache<Integer, IndexedValue> cache = ignite.cache(CACHE_NAME);
-
-            int txCnt = 100;
-
-            int keysPerTx = 10;
-
-            for (int i = 0; i < txCnt; i++) {
-                try (Transaction tx = ignite.transactions().txStart()) {
-                    for (int j = 0; j < keysPerTx; j++) {
-                        int k = i * keysPerTx + j;
-
-                        cache.put(k, new IndexedValue(k));
-                    }
-
-                    tx.commit();
-                }
-            }
-
-            for (int i = 0; i < txCnt; i++) {
-                for (int j = 0; j < keysPerTx; j++) {
-                    int k = i * keysPerTx + j;
-
-                    assertEquals(k, cache.get(k).value());
-                }
-            }
-
-            stopGrid();
-
-            ignite = startGrid();
-
-            cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < txCnt; i++) {
-                for (int j = 0; j < keysPerTx; j++) {
-                    int k = i * keysPerTx + j;
-
-                    assertEquals(k, cache.get(k).value());
-                }
-            }
-
-            for (int i = 0; i < txCnt; i++) {
-                for (int j = 0; j < keysPerTx; j++) {
-                    int k = i * keysPerTx + j;
-
-                    QueryCursor<List<?>> cur = cache.query(
-                        new SqlFieldsQuery("select sVal from IndexedValue where iVal=?").setArgs(k));
-
-                    List<List<?>> vals = cur.getAll();
-
-                    assertEquals(vals.size(), 1);
-                    assertEquals("string-" + k, vals.get(0).get(0));
-                }
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalRecoveryRemoves() throws Exception {
-        Ignite ignite = startGrid();
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            IgniteCache<Integer, IndexedValue> cache = ignite.cache(CACHE_NAME);
-
-            int txCnt = 100;
-
-            int keysPerTx = 10;
-
-            for (int i = 0; i < txCnt; i++) {
-                try (Transaction tx = ignite.transactions().txStart()) {
-                    for (int j = 0; j < keysPerTx; j++) {
-                        int k = i * keysPerTx + j;
-
-                        cache.put(k, new IndexedValue(k));
-                    }
-
-                    tx.commit();
-                }
-            }
-
-            for (int i = 0; i < txCnt; i++) {
-                for (int j = 0; j < keysPerTx; j++) {
-                    int k = i * keysPerTx + j;
-
-                    assertEquals(k, cache.get(k).value());
-                }
-            }
-
-            dbMgr.waitForCheckpoint("test");
-            dbMgr.enableCheckpoints(false).get();
-
-            for (int i = 0; i < txCnt / 2; i++) {
-                try (Transaction tx = ignite.transactions().txStart()) {
-                    for (int j = 0; j < keysPerTx; j++) {
-                        int k = i * keysPerTx + j;
-
-                        cache.remove(k);
-                    }
-
-                    tx.commit();
-                }
-            }
-
-            stopGrid();
-
-            ignite = startGrid();
-
-            cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < txCnt; i++) {
-                for (int j = 0; j < keysPerTx; j++) {
-                    int k = i * keysPerTx + j;
-
-                    QueryCursor<List<?>> cur = cache.query(
-                        new SqlFieldsQuery("select sVal from IndexedValue where iVal=?").setArgs(k));
-
-                    List<List<?>> vals = cur.getAll();
-
-                    if (i < txCnt / 2) {
-                        assertNull(cache.get(k));
-                        assertTrue(F.isEmpty(vals));
-                    }
-                    else {
-                        assertEquals(k, cache.get(k).value());
-
-                        assertEquals(1, vals.size());
-                        assertEquals("string-" + k, vals.get(0).get(0));
-                    }
-                }
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testRebalanceIterator() throws Exception {
-        extraCcfg = new CacheConfiguration(CACHE2_NAME);
-        extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
-
-        Ignite ignite = startGrid();
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.waitForCheckpoint("test");
-
-            // This number depends on wal history size.
-            int entries = 25;
-
-            IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
-            IgniteCache<Integer, Integer> cache2 = ignite.cache(CACHE2_NAME);
-
-            for (int i = 0; i < entries; i++) {
-                // Put to partition 0.
-                cache.put(i * PARTS, i * PARTS);
-
-                // Put to partition 1.
-                cache.put(i * PARTS + 1, i * PARTS + 1);
-
-                // Put to another cache.
-                cache2.put(i, i);
-
-                dbMgr.waitForCheckpoint("test");
-            }
-
-            for (int i = 0; i < entries; i++) {
-                assertEquals((Integer)(i * PARTS), cache.get(i * PARTS));
-                assertEquals((Integer)(i * PARTS + 1), cache.get(i * PARTS + 1));
-                assertEquals((Integer)(i), cache2.get(i));
-            }
-
-            GridCacheContext<Object, Object> cctx = ((IgniteEx)ignite).context().cache().cache(CACHE_NAME).context();
-            IgniteCacheOffheapManager offh = cctx.offheap();
-            AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();
-
-            for (int i = 0; i < entries; i++) {
-                try (IgniteRebalanceIterator it = offh.rebalanceIterator(0, topVer, (long)i)) {
-                    assertTrue("Not historical for iteration: " + i, it.historical());
-
-                    assertNotNull(it);
-
-                    for (int j = i; j < entries; j++) {
-                        assertTrue("i=" + i + ", j=" + j, it.hasNextX());
-
-                        CacheDataRow row = it.next();
-
-                        assertEquals(j * PARTS, (int)row.key().value(cctx.cacheObjectContext(), false));
-                        assertEquals(j * PARTS, (int)row.value().value(cctx.cacheObjectContext(), false));
-                    }
-
-                    assertFalse(it.hasNext());
-                }
-
-                try (IgniteRebalanceIterator it = offh.rebalanceIterator(1, topVer, (long)i)) {
-                    assertNotNull(it);
-
-                    assertTrue("Not historical for iteration: " + i, it.historical());
-
-                    for (int j = i; j < entries; j++) {
-                        assertTrue(it.hasNextX());
-
-                        CacheDataRow row = it.next();
-
-                        assertEquals(j * PARTS + 1, (int)row.key().value(cctx.cacheObjectContext(), false));
-                        assertEquals(j * PARTS + 1, (int)row.value().value(cctx.cacheObjectContext(), false));
-                    }
-
-                    assertFalse(it.hasNext());
-                }
-            }
-
-            stopAllGrids();
-
-            // Check that iterator is valid after restart.
-            ignite = startGrid();
-
-            cctx = ((IgniteEx)ignite).context().cache().cache(CACHE_NAME).context();
-            offh = cctx.offheap();
-            topVer = cctx.affinity().affinityTopologyVersion();
-
-            for (int i = 0; i < entries; i++) {
-                long start = System.currentTimeMillis();
-
-                try (IgniteRebalanceIterator it = offh.rebalanceIterator(0, topVer, (long)i)) {
-                    long end = System.currentTimeMillis();
-
-                    info("Time to get iterator: " + (end - start));
-
-                    assertTrue("Not historical for iteration: " + i, it.historical());
-
-                    assertNotNull(it);
-
-                    start = System.currentTimeMillis();
-
-                    for (int j = i; j < entries; j++) {
-                        assertTrue("i=" + i + ", j=" + j, it.hasNextX());
-
-                        CacheDataRow row = it.next();
-
-                        assertEquals(j * PARTS, (int)row.key().value(cctx.cacheObjectContext(), false));
-                        assertEquals(j * PARTS, (int)row.value().value(cctx.cacheObjectContext(), false));
-                    }
-
-                    end = System.currentTimeMillis();
-
-                    info("Time to iterate: " + (end - start));
-
-                    assertFalse(it.hasNext());
-                }
-
-                try (IgniteRebalanceIterator it = offh.rebalanceIterator(1, topVer, (long)i)) {
-                    assertNotNull(it);
-
-                    assertTrue("Not historical for iteration: " + i, it.historical());
-
-                    for (int j = i; j < entries; j++) {
-                        assertTrue(it.hasNextX());
-
-                        CacheDataRow row = it.next();
-
-                        assertEquals(j * PARTS + 1, (int)row.key().value(cctx.cacheObjectContext(), false));
-                        assertEquals(j * PARTS + 1, (int)row.value().value(cctx.cacheObjectContext(), false));
-                    }
-
-                    assertFalse(it.hasNext());
-                }
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testCheckpointHistory() throws Exception {
-        Ignite ignite = startGrid();
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.waitForCheckpoint("test");
-
-            // This number depends on wal history size.
-            int entries = WAL_HIST_SIZE * 2;
-
-            IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < entries; i++) {
-                // Put to partition 0.
-                cache.put(i * PARTS, i * PARTS);
-
-                // Put to partition 1.
-                cache.put(i * PARTS + 1, i * PARTS + 1);
-
-                dbMgr.waitForCheckpoint("test");
-            }
-
-            GridCacheDatabaseSharedManager.CheckpointHistory hist = dbMgr.checkpointHistory();
-
-            assertTrue(hist.checkpoints().size() <= WAL_HIST_SIZE);
-
-            File cpDir = dbMgr.checkpointDirectory();
-
-            File[] cpFiles = cpDir.listFiles();
-
-            assertTrue(cpFiles.length <= WAL_HIST_SIZE * 2);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testWalAfterPreloading() throws Exception {
-        Ignite ignite = startGrid();
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.enableCheckpoints(false).get();
-
-            int entries = 100;
-
-            try (IgniteDataStreamer<Integer, Integer> streamer = ignite.dataStreamer(CACHE_NAME)) {
-                for (int i = 0; i < entries; i++)
-                    streamer.addData(i, i);
-            }
-
-            IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < entries; i++)
-                assertEquals(new Integer(i), cache.get(i));
-
-            stopGrid();
-
-            ignite = startGrid();
-
-            cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < entries; i++)
-                assertEquals(new Integer(i), cache.get(i));
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRecoveryRandomPutRemove() throws Exception {
-        try {
-            pageSize = 1024;
-
-            extraCcfg = new CacheConfiguration(CACHE2_NAME);
-            extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
-
-            Ignite ignite = startGrid(0);
-
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.enableCheckpoints(false).get();
-
-            IgniteCache<Integer, IndexedValue> cache1 = ignite.cache(CACHE_NAME);
-            IgniteCache<Object, Object> cache2 = ignite.cache(CACHE2_NAME);
-
-            final int KEYS1 = 100;
-
-            for (int i = 0; i < KEYS1; i++)
-                cache1.put(i, new IndexedValue(i));
-
-            for (int i = 0; i < KEYS1; i++) {
-                if (i % 2 == 0)
-                    cache1.remove(i);
-            }
-
-            ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-            for (int i = 0; i < KEYS1; i++) {
-                cache2.put(i, new byte[rnd.nextInt(512)]);
-
-                if (rnd.nextBoolean())
-                    cache2.put(i, new byte[rnd.nextInt(512)]);
-
-                if (rnd.nextBoolean())
-                    cache2.remove(i);
-            }
-
-            ignite.close();
-
-            ignite = startGrid(0);
-
-            ignite.cache(CACHE_NAME).put(1, new IndexedValue(0));
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRecoveryNoPageLost1() throws Exception {
-        recoveryNoPageLost(false);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRecoveryNoPageLost2() throws Exception {
-        recoveryNoPageLost(true);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRecoveryNoPageLost3() throws Exception {
-        try {
-            pageSize = 1024;
-            checkpointFreq = 100L;
-            extraCcfg = new CacheConfiguration(CACHE2_NAME);
-            extraCcfg.setAffinity(new RendezvousAffinityFunction(false, 32));
-
-            List<Integer> pages = null;
-
-            for (int iter = 0; iter < 5; iter++) {
-                log.info("Start node: " + iter);
-
-                Ignite ignite = startGrid(0);
-
-                if (pages != null) {
-                    List<Integer> curPags = allocatedPages(ignite, CACHE2_NAME);
-
-                    assertEquals("Iter = " + iter, pages, curPags);
-                }
-
-                final IgniteCache<Integer, Object> cache = ignite.cache(CACHE2_NAME);
-
-                final int ops = ThreadLocalRandom.current().nextInt(10) + 10;
-
-                GridTestUtils.runMultiThreaded(new Callable<Void>() {
-                    @Override public Void call() throws Exception {
-                        ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-                        for (int i = 0; i < ops; i++) {
-                            Integer key = rnd.nextInt(1000);
-
-                            cache.put(key, new byte[rnd.nextInt(512)]);
-
-                            if (rnd.nextBoolean())
-                                cache.remove(key);
-                        }
-
-                        return null;
-                    }
-                }, 10, "update");
-
-                pages = allocatedPages(ignite, CACHE2_NAME);
-
-                Ignition.stop(ignite.name(), false); //will make checkpoint
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @param checkpoint Checkpoint enable flag.
-     * @throws Exception If failed.
-     */
-    private void recoveryNoPageLost(boolean checkpoint) throws Exception {
-        try {
-            pageSize = 1024;
-            extraCcfg = new CacheConfiguration(CACHE2_NAME);
-            extraCcfg.setAffinity(new RendezvousAffinityFunction(false, 32));
-
-            List<Integer> pages = null;
-
-            AtomicInteger cnt = new AtomicInteger();
-
-            for (int iter = 0; iter < 5; iter++) {
-                log.info("Start node: " + iter);
-
-                Ignite ignite = startGrid(0);
-
-                GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                    .cache().context().database();
-
-                if (!checkpoint)
-                    dbMgr.enableCheckpoints(false).get();
-
-                if (pages != null) {
-                    List<Integer> curPags = allocatedPages(ignite, CACHE2_NAME);
-
-                    assertEquals(pages, curPags);
-                }
-
-                IgniteCache<Integer, Object> cache = ignite.cache(CACHE2_NAME);
-
-                for (int i = 0; i < 128; i++)
-                    cache.put(cnt.incrementAndGet(), new byte[256 + iter * 100]);
-
-                pages = allocatedPages(ignite, CACHE2_NAME);
-
-                ignite.close();
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @param ignite Node.
-     * @param cacheName Cache name.
-     * @return Allocated pages per-store.
-     * @throws Exception If failed.
-     */
-    private List<Integer> allocatedPages(Ignite ignite, String cacheName) throws Exception {
-        FilePageStoreManager storeMgr =
-            (FilePageStoreManager)((IgniteEx)ignite).context().cache().context().pageStore();
-
-        int parts = ignite.affinity(cacheName).partitions();
-
-        List<Integer> res = new ArrayList<>(parts);
-
-        for (int p = 0; p < parts; p++) {
-            PageStore store = storeMgr.getStore(CU.cacheId(cacheName), p);
-
-            store.sync();
-
-            res.add(store.pages());
-        }
-
-        PageStore store = storeMgr.getStore(CU.cacheId(cacheName), PageIdAllocator.INDEX_PARTITION);
-
-        store.sync();
-
-        res.add(store.pages());
-
-        return res;
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testFreeListRecovery() throws Exception {
-        try {
-            pageSize = 1024;
-            extraCcfg = new CacheConfiguration(CACHE2_NAME);
-
-            Ignite ignite = startGrid(0);
-
-            IgniteCache<Integer, IndexedValue> cache1 = ignite.cache(CACHE_NAME);
-            IgniteCache<Object, Object> cache2 = ignite.cache(CACHE2_NAME);
-
-            final int KEYS1 = 2048;
-
-            for (int i = 0; i < KEYS1; i++)
-                cache1.put(i, new IndexedValue(i));
-
-            for (int i = 0; i < KEYS1; i++) {
-                if (i % 2 == 0)
-                    cache1.remove(i);
-            }
-
-            ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-            for (int i = 0; i < KEYS1; i++) {
-                cache2.put(i, new byte[rnd.nextInt(512)]);
-
-                if (rnd.nextBoolean())
-                    cache2.put(i, new byte[rnd.nextInt(512)]);
-
-                if (rnd.nextBoolean())
-                    cache2.remove(i);
-            }
-
-            Map<Integer, T2<Map<Integer, long[]>, int[]>> cache1_1 = getFreeListData(ignite, CACHE_NAME);
-            Map<Integer, T2<Map<Integer, long[]>, int[]>> cache2_1 = getFreeListData(ignite, CACHE2_NAME);
-            T2<long[], Integer> rl1_1 = getReuseListData(ignite, CACHE_NAME);
-            T2<long[], Integer> rl2_1 = getReuseListData(ignite, CACHE2_NAME);
-
-            ignite.close();
-
-            ignite = startGrid(0);
-
-            cache1 = ignite.cache(CACHE_NAME);
-            cache2 = ignite.cache(CACHE2_NAME);
-
-            for (int i = 0; i < KEYS1; i++) {
-                cache1.get(i);
-                cache2.get(i);
-            }
-
-            Map<Integer, T2<Map<Integer, long[]>, int[]>> cache1_2 = getFreeListData(ignite, CACHE_NAME);
-            Map<Integer, T2<Map<Integer, long[]>, int[]>> cache2_2 = getFreeListData(ignite, CACHE2_NAME);
-            T2<long[], Integer> rl1_2 = getReuseListData(ignite, CACHE_NAME);
-            T2<long[], Integer> rl2_2 = getReuseListData(ignite, CACHE2_NAME);
-
-            checkEquals(cache1_1, cache1_2);
-            checkEquals(cache2_1, cache2_2);
-            checkEquals(rl1_1, rl1_2);
-            checkEquals(rl2_1, rl2_2);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @param ignite Node.
-     * @param cacheName Cache name.
-     * @return Cache reuse list data.
-     */
-    private T2<long[], Integer> getReuseListData(Ignite ignite, String cacheName) {
-        GridCacheContext ctx = ((IgniteEx)ignite).context().cache().cache(cacheName).context();
-
-        ReuseListImpl reuseList = GridTestUtils.getFieldValue(ctx.offheap(), "reuseList");
-        PagesList.Stripe[] bucket = GridTestUtils.getFieldValue(reuseList, "bucket");
-
-        long[] ids = null;
-
-        if (bucket != null) {
-            ids = new long[bucket.length];
-
-            for (int i = 0; i < bucket.length; i++)
-                ids[i] = bucket[i].tailId;
-        }
-
-//        AtomicIntegerArray cnts = GridTestUtils.getFieldValue(reuseList, PagesList.class, "cnts");
-//        assertEquals(1, cnts.length());
-
-        return new T2<>(ids, 0);
-    }
-
-    /**
-     * @param rl1 Data 1 (before stop).
-     * @param rl2 Data 2 (after restore).
-     */
-    private void checkEquals(T2<long[], Integer> rl1, T2<long[], Integer> rl2) {
-        Assert.assertArrayEquals(rl1.get1(), rl2.get1());
-        assertEquals(rl1.get2(), rl2.get2());
-    }
-
-    /**
-     * @param partsLists1 Data 1 (before stop).
-     * @param partsLists2 Data 2 (after restore).
-     */
-    private void checkEquals(Map<Integer, T2<Map<Integer, long[]>, int[]>> partsLists1,
-        Map<Integer, T2<Map<Integer, long[]>, int[]>> partsLists2) {
-        assertEquals(partsLists1.size(), partsLists2.size());
-
-        for (Integer part : partsLists1.keySet()) {
-            T2<Map<Integer, long[]>, int[]> t1 = partsLists1.get(part);
-            T2<Map<Integer, long[]>, int[]> t2 = partsLists2.get(part);
-
-            Map<Integer, long[]> m1 = t1.get1();
-            Map<Integer, long[]> m2 = t2.get1();
-
-            assertEquals(m1.size(), m2.size());
-
-            for (Integer bucket : m1.keySet()) {
-                long tails1[] = m1.get(bucket);
-                long tails2[] = m2.get(bucket);
-
-                Assert.assertArrayEquals(tails1, tails2);
-            }
-
-            Assert.assertArrayEquals("Wrong counts [part=" + part + ']', t1.get2(), t2.get2());
-        }
-    }
-
-    /**
-     * @param ignite Node.
-     * @param cacheName Cache name.
-     * @return Cache free lists data.
-     */
-    private Map<Integer, T2<Map<Integer, long[]>, int[]>> getFreeListData(Ignite ignite, String cacheName) {
-        GridCacheContext ctx = ((IgniteEx)ignite).context().cache().cache(cacheName).context();
-
-        List<GridDhtLocalPartition> parts = ctx.topology().localPartitions();
-
-        assertTrue(!parts.isEmpty());
-        assertEquals(ctx.affinity().partitions(), parts.size());
-
-        Map<Integer, T2<Map<Integer, long[]>, int[]>> res = new HashMap<>();
-
-        boolean foundNonEmpty = false;
-        boolean foundTails = false;
-
-        for (GridDhtLocalPartition part : parts) {
-            FreeListImpl freeList = GridTestUtils.getFieldValue(part.dataStore(), "freeList");
-
-            if (freeList == null)
-                // Lazy store.
-                continue;
-
-            AtomicReferenceArray<PagesList.Stripe[]> buckets = GridTestUtils.getFieldValue(freeList,
-                FreeListImpl.class, "buckets");
-            //AtomicIntegerArray cnts = GridTestUtils.getFieldValue(freeList, PagesList.class, "cnts");
-
-            assertNotNull(buckets);
-            //assertNotNull(cnts);
-            assertTrue(buckets.length() > 0);
-            //assertEquals(cnts.length(), buckets.length());
-
-            Map<Integer, long[]> tailsPerBucket = new HashMap<>();
-
-            for (int i = 0; i < buckets.length(); i++) {
-                PagesList.Stripe[] tails = buckets.get(i);
-
-                long ids[] = null;
-
-                if (tails != null) {
-                    ids = new long[tails.length];
-
-                    for (int j = 0; j < tails.length; j++)
-                        ids[j] = tails[j].tailId;
-                }
-
-                tailsPerBucket.put(i, ids);
-
-                    if (tails != null) {
-                        assertTrue(tails.length > 0);
-
-                        foundTails = true;
-                    }
-                }
-
-//            int[] cntsPerBucket = new int[cnts.length()];
-//
-//            for (int i = 0; i < cnts.length(); i++) {
-//                cntsPerBucket[i] = cnts.get(i);
-//
-//                if (cntsPerBucket[i] > 0)
-//                    foundNonEmpty = true;
-//            }
-
-            res.put(part.id(), new T2<>(tailsPerBucket, (int[])null));
-        }
-
-        //assertTrue(foundNonEmpty);
-        assertTrue(foundTails);
-
-        return res;
-    }
-
-    /**
-     *
-     */
-    private static class IndexedValue {
-        /** */
-        @QuerySqlField(index = true)
-        private int iVal;
-
-        /** */
-        @QuerySqlField
-        private String sVal;
-
-        /**
-         * @param iVal Indexed value.
-         */
-        private IndexedValue(int iVal) {
-            this.iVal = iVal;
-            sVal = "string-" + iVal;
-        }
-
-        /**
-         * @return Value.
-         */
-        private int value() {
-            return iVal;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/cd4d0400/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/ignitePdsCheckpointSimulationTest.java
----------------------------------------------------------------------
diff --git a/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/ignitePdsCheckpointSimulationTest.java b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/ignitePdsCheckpointSimulationTest.java
new file mode 100644
index 0000000..b30162d
--- /dev/null
+++ b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/file/ignitePdsCheckpointSimulationTest.java
@@ -0,0 +1,1000 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.database.db.file;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.nio.ByteOrder;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheRebalanceMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.MemoryConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.PersistentStoreConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.pagemem.FullPageId;
+import org.apache.ignite.internal.pagemem.PageIdAllocator;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.database.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.database.pagemem.PageMemoryEx;
+import org.apache.ignite.internal.processors.cache.database.pagemem.PageMemoryImpl;
+import org.apache.ignite.internal.processors.cache.database.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.database.tree.io.PageIO;
+import org.apache.ignite.internal.processors.cache.database.tree.io.TrackingPageIO;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
+import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord;
+import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
+import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
+import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot;
+import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheObjectContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheOperation;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class ignitePdsCheckpointSimulationTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final int TOTAL_PAGES = 1000;
+
+    /** */
+    private static final boolean VERBOSE = false;
+
+    /** Cache name. */
+    private final String cacheName = "cache";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        CacheConfiguration ccfg = new CacheConfiguration(cacheName);
+
+        ccfg.setRebalanceMode(CacheRebalanceMode.NONE);
+
+        cfg.setCacheConfiguration(ccfg);
+
+        MemoryConfiguration dbCfg = new MemoryConfiguration();
+
+        cfg.setMemoryConfiguration(dbCfg);
+
+        cfg.setPersistentStoreConfiguration(
+            new PersistentStoreConfiguration()
+                .setCheckpointingFrequency(500)
+                .setWalMode(WALMode.LOG_ONLY)
+                .setAlwaysWriteFullPages(true)
+        );
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        deleteWorkFiles();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        deleteWorkFiles();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testCheckpointSimulationMultiThreaded() throws Exception {
+        IgniteEx ig = startGrid(0);
+
+        GridCacheSharedContext<Object, Object> shared = ig.context().cache().context();
+
+        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)shared.database();
+
+        IgnitePageStoreManager pageStore = shared.pageStore();
+
+        U.sleep(1_000);
+
+        // Disable integrated checkpoint thread.
+        dbMgr.enableCheckpoints(false).get();
+
+        // Must put something in partition 0 in order to initialize meta page.
+        // Otherwise we will violate page store integrity rules.
+        ig.cache(cacheName).put(0, 0);
+
+        PageMemory mem = shared.database().memoryPolicy(null).pageMemory();
+
+        IgniteBiTuple<Map<FullPageId, Integer>, WALPointer> res;
+
+        try {
+            res = runCheckpointing(ig, (PageMemoryImpl)mem, pageStore, shared.wal(),
+                shared.cache().cache(cacheName).context().cacheId());
+        }
+        catch (Throwable th) {
+            log().error("Error while running checkpointing", th);
+
+            throw th;
+        }
+        finally {
+            dbMgr.enableCheckpoints(true).get();
+
+            stopAllGrids(false);
+        }
+
+        ig = startGrid(0);
+
+        shared = ig.context().cache().context();
+
+        dbMgr = (GridCacheDatabaseSharedManager)shared.database();
+
+        dbMgr.enableCheckpoints(false).get();
+
+        mem = shared.database().memoryPolicy(null).pageMemory();
+
+        verifyReads(res.get1(), mem, res.get2(), shared.wal());
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testGetForInitialWrite() throws Exception {
+        IgniteEx ig = startGrid(0);
+
+        GridCacheSharedContext<Object, Object> shared = ig.context().cache().context();
+
+        int cacheId = shared.cache().cache(cacheName).context().cacheId();
+
+        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)shared.database();
+
+        // Disable integrated checkpoint thread.
+        dbMgr.enableCheckpoints(false);
+
+        PageMemory mem = shared.database().memoryPolicy(null).pageMemory();
+
+        IgniteWriteAheadLogManager wal = shared.wal();
+
+        WALPointer start = wal.log(new CheckpointRecord(null, false));
+
+        final FullPageId[] initWrites = new FullPageId[10];
+
+        ig.context().cache().context().database().checkpointReadLock();
+
+        try {
+            for (int i = 0; i < initWrites.length; i++)
+                initWrites[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
+
+            // Check getForInitialWrite methods.
+            for (FullPageId fullId : initWrites) {
+                long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+                try {
+                    long pageAddr = mem.writeLock(fullId.cacheId(), fullId.pageId(), page);
+
+                    try {
+                        DataPageIO.VERSIONS.latest().initNewPage(pageAddr, fullId.pageId(), mem.pageSize());
+
+                        for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++)
+                            PageUtils.putByte(pageAddr, i, (byte)0xAB);
+                    }
+                    finally {
+                        mem.writeUnlock(fullId.cacheId(), fullId.pageId(), page, null, true);
+                    }
+                }
+                finally {
+                    mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+                }
+            }
+
+            wal.fsync(null);
+        }
+        finally {
+            ig.context().cache().context().database().checkpointReadUnlock();
+            stopAllGrids(false);
+        }
+
+        ig = startGrid(0);
+
+        shared = ig.context().cache().context();
+
+        dbMgr = (GridCacheDatabaseSharedManager)shared.database();
+
+        dbMgr.enableCheckpoints(false);
+
+        wal = shared.wal();
+
+        try (WALIterator it = wal.replay(start)) {
+            it.nextX();
+
+            for (FullPageId initialWrite : initWrites) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
+
+                assertTrue(String.valueOf(tup.get2()), tup.get2() instanceof PageSnapshot);
+
+                PageSnapshot snap = (PageSnapshot)tup.get2();
+
+                FullPageId actual = snap.fullPageId();
+
+                //there are extra tracking pages, skip them
+                if (TrackingPageIO.VERSIONS.latest().trackingPageFor(actual.pageId(), mem.pageSize()) == actual.pageId()) {
+                    tup = it.nextX();
+
+                    assertTrue(tup.get2() instanceof PageSnapshot);
+
+                    actual = ((PageSnapshot)tup.get2()).fullPageId();
+                }
+
+                assertEquals(initialWrite, actual);
+            }
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDataWalEntries() throws Exception {
+        IgniteEx ig = startGrid(0);
+
+        GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
+        GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(cacheName).context();
+
+        GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+        IgniteWriteAheadLogManager wal = sharedCtx.wal();
+
+        assertTrue(wal.isAlwaysWriteFullPages());
+
+        db.enableCheckpoints(false);
+
+        final int cnt = 10;
+
+        List<DataEntry> entries = new ArrayList<>(cnt);
+
+        for (int i = 0; i < cnt; i++) {
+            GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
+
+            KeyCacheObject key = cctx.toCacheKeyObject(i);
+
+            CacheObject val = null;
+
+            if (op != GridCacheOperation.DELETE)
+                val = cctx.toCacheObject("value-" + i);
+
+            entries.add(new DataEntry(cctx.cacheId(), key, val, op, null, cctx.versions().next(), 0L,
+                cctx.affinity().partition(i), i));
+        }
+
+        UUID cpId = UUID.randomUUID();
+
+        WALPointer start = wal.log(new CheckpointRecord(cpId, null, false));
+
+        wal.fsync(start);
+
+        for (DataEntry entry : entries)
+            wal.log(new DataRecord(entry));
+
+        WALPointer end = wal.log(new CheckpointRecord(cpId, start, true));
+
+        wal.fsync(end);
+
+        // Data will not be written to the page store.
+        stopAllGrids();
+
+        ig = startGrid(0);
+
+        sharedCtx = ig.context().cache().context();
+        cctx = sharedCtx.cache().cache(cacheName).context();
+
+        db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+        wal = sharedCtx.wal();
+
+        db.enableCheckpoints(false);
+
+        try (WALIterator it = wal.replay(start)) {
+            IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
+
+            assert tup.get2() instanceof CheckpointRecord;
+
+            assertEquals(start, tup.get1());
+
+            CheckpointRecord cpRec = (CheckpointRecord)tup.get2();
+
+            assertEquals(cpId, cpRec.checkpointId());
+            assertNull(cpRec.checkpointMark());
+            assertFalse(cpRec.end());
+
+            int idx = 0;
+            CacheObjectContext coctx = cctx.cacheObjectContext();
+
+            while (idx < entries.size()) {
+                tup = it.nextX();
+
+                assert tup.get2() instanceof DataRecord;
+
+                DataRecord dataRec = (DataRecord)tup.get2();
+
+                DataEntry entry = entries.get(idx);
+
+                assertEquals(1, dataRec.writeEntries().size());
+
+                DataEntry readEntry = dataRec.writeEntries().get(0);
+
+                assertEquals(entry.cacheId(), readEntry.cacheId());
+                assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
+                assertEquals(entry.op(), readEntry.op());
+
+                if (entry.op() == GridCacheOperation.UPDATE)
+                    assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
+                else
+                    assertNull(entry.value());
+
+                assertEquals(entry.writeVersion(), readEntry.writeVersion());
+                assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
+                assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
+
+                idx++;
+            }
+
+            tup = it.nextX();
+
+            assert tup.get2() instanceof CheckpointRecord;
+
+            assertEquals(end, tup.get1());
+
+            cpRec = (CheckpointRecord)tup.get2();
+
+            assertEquals(cpId, cpRec.checkpointId());
+            assertEquals(start, cpRec.checkpointMark());
+            assertTrue(cpRec.end());
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testPageWalEntries() throws Exception {
+        IgniteEx ig = startGrid(0);
+
+        GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
+        int cacheId = sharedCtx.cache().cache(cacheName).context().cacheId();
+
+        GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+        PageMemory pageMem = sharedCtx.database().memoryPolicy(null).pageMemory();
+        IgniteWriteAheadLogManager wal = sharedCtx.wal();
+
+        db.enableCheckpoints(false).get();
+
+        int pageCnt = 100;
+
+        List<FullPageId> pageIds = new ArrayList<>();
+
+        for (int i = 0; i < pageCnt; i++) {
+            db.checkpointReadLock();
+            try {
+                pageIds.add(new FullPageId(pageMem.allocatePage(cacheId, PageIdAllocator.INDEX_PARTITION,
+                    PageIdAllocator.FLAG_IDX), cacheId));
+            }
+            finally {
+                db.checkpointReadUnlock();
+            }
+        }
+
+        UUID cpId = UUID.randomUUID();
+
+        WALPointer start = wal.log(new CheckpointRecord(cpId, null, false));
+
+        wal.fsync(start);
+
+        ig.context().cache().context().database().checkpointReadLock();
+
+        try {
+            for (FullPageId pageId : pageIds)
+                writePageData(pageId, pageMem);
+        }
+        finally {
+            ig.context().cache().context().database().checkpointReadUnlock();
+        }
+
+        WALPointer end = wal.log(new CheckpointRecord(cpId, start, true));
+
+        wal.fsync(end);
+
+        // Data will not be written to the page store.
+        stopAllGrids();
+
+        ig = startGrid(0);
+
+        sharedCtx = ig.context().cache().context();
+
+        db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+        wal = sharedCtx.wal();
+
+        db.enableCheckpoints(false);
+
+        try (WALIterator it = wal.replay(start)) {
+            IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
+
+            assert tup.get2() instanceof CheckpointRecord : tup.get2();
+
+            assertEquals(start, tup.get1());
+
+            CheckpointRecord cpRec = (CheckpointRecord)tup.get2();
+
+            assertEquals(cpId, cpRec.checkpointId());
+            assertNull(cpRec.checkpointMark());
+            assertFalse(cpRec.end());
+
+            int idx = 0;
+
+            while (idx < pageIds.size()) {
+                tup = it.nextX();
+
+                assert tup.get2() instanceof PageSnapshot : tup.get2().getClass();
+
+                PageSnapshot snap = (PageSnapshot)tup.get2();
+
+                //there are extra tracking pages, skip them
+                if (TrackingPageIO.VERSIONS.latest().trackingPageFor(snap.fullPageId().pageId(), pageMem.pageSize()) == snap.fullPageId().pageId()) {
+                    tup = it.nextX();
+
+                    assertTrue(tup.get2() instanceof PageSnapshot);
+
+                    snap = (PageSnapshot)tup.get2();
+                }
+
+                assertEquals(pageIds.get(idx), snap.fullPageId());
+
+                idx++;
+            }
+
+            tup = it.nextX();
+
+            assert tup.get2() instanceof CheckpointRecord;
+
+            assertEquals(end, tup.get1());
+
+            cpRec = (CheckpointRecord)tup.get2();
+
+            assertEquals(cpId, cpRec.checkpointId());
+            assertEquals(start, cpRec.checkpointMark());
+            assertTrue(cpRec.end());
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDirtyFlag() throws Exception {
+        IgniteEx ig = startGrid(0);
+
+        GridCacheSharedContext<Object, Object> shared = ig.context().cache().context();
+
+        int cacheId = shared.cache().cache(cacheName).context().cacheId();
+
+        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)shared.database();
+
+        // Disable integrated checkpoint thread.
+        dbMgr.enableCheckpoints(false);
+
+        PageMemoryEx mem = (PageMemoryEx) dbMgr.memoryPolicy(null).pageMemory();
+
+        ig.context().cache().context().database().checkpointReadLock();
+
+        FullPageId[] pageIds = new FullPageId[100];
+
+        try {
+            for (int i = 0; i < pageIds.length; i++)
+                pageIds[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
+
+            for (FullPageId fullId : pageIds) {
+                long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+
+                try {
+                    assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page)); //page is dirty right after allocation
+
+                    long pageAddr = mem.writeLock(fullId.cacheId(), fullId.pageId(), page);
+
+                    PageIO.setPageId(pageAddr, fullId.pageId());
+
+                    try {
+                        assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+                    }
+                    finally {
+                        mem.writeUnlock(fullId.cacheId(), fullId.pageId(),page, null,true);
+                    }
+
+                    assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+                }
+                finally {
+                    mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+                }
+            }
+        }
+        finally {
+            ig.context().cache().context().database().checkpointReadUnlock();
+        }
+
+        Collection<FullPageId> cpPages = mem.beginCheckpoint();
+
+        ig.context().cache().context().database().checkpointReadLock();
+
+        try {
+            for (FullPageId fullId : pageIds) {
+                assertTrue(cpPages.contains(fullId));
+
+                ByteBuffer buf = ByteBuffer.allocate(mem.pageSize());
+
+                long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+
+                try {
+                    assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+
+                    long pageAddr = mem.writeLock(fullId.cacheId(), fullId.pageId(), page);
+
+                    try {
+                        assertFalse(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+
+                        for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++)
+                            PageUtils.putByte(pageAddr, i, (byte)1);
+                    }
+                    finally {
+                        mem.writeUnlock(fullId.cacheId(), fullId.pageId(), page, null, true);
+                    }
+
+                    assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+
+                    buf.rewind();
+
+                    mem.getForCheckpoint(fullId, buf, null);
+
+                    buf.position(PageIO.COMMON_HEADER_END);
+
+                    while (buf.hasRemaining())
+                        assertEquals((byte)0, buf.get());
+                }
+                finally {
+                    mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+                }
+            }
+        }
+        finally {
+            ig.context().cache().context().database().checkpointReadUnlock();
+        }
+
+        mem.finishCheckpoint();
+
+        for (FullPageId fullId : pageIds) {
+            long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+            try {
+                assertTrue(mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+            }
+            finally {
+                mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+            }
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    private void writePageData(FullPageId fullId, PageMemory mem) throws Exception {
+        long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+        try {
+            long pageAddr = mem.writeLock(fullId.cacheId(), fullId.pageId(), page);
+
+            try {
+                DataPageIO.VERSIONS.latest().initNewPage(pageAddr, fullId.pageId(), mem.pageSize());
+
+                ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++)
+                    PageUtils.putByte(pageAddr, i, (byte)rnd.nextInt(255));
+            }
+            finally {
+                mem.writeUnlock(fullId.cacheId(), fullId.pageId(), page, null, true);
+            }
+        }
+        finally {
+            mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+        }
+    }
+
+    /**
+     * @param res Result map to verify.
+     * @param mem Memory.
+     */
+    private void verifyReads(
+        Map<FullPageId, Integer> res,
+        PageMemory mem,
+        WALPointer start,
+        IgniteWriteAheadLogManager wal
+    ) throws Exception {
+        Map<FullPageId, byte[]> replay = new HashMap<>();
+
+        try (WALIterator it = wal.replay(start)) {
+            IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
+
+            assertTrue("Invalid record: " + tup, tup.get2() instanceof CheckpointRecord);
+
+            CheckpointRecord cpRec = (CheckpointRecord)tup.get2();
+
+            while (it.hasNextX()) {
+                tup = it.nextX();
+
+                WALRecord rec = tup.get2();
+
+                if (rec instanceof CheckpointRecord) {
+                    CheckpointRecord end = (CheckpointRecord)rec;
+
+                    // Found the finish mark.
+                    if (end.checkpointId().equals(cpRec.checkpointId()) && end.end())
+                        break;
+                }
+                else if (rec instanceof PageSnapshot) {
+                    PageSnapshot page = (PageSnapshot)rec;
+
+                    replay.put(page.fullPageId(), page.pageData());
+                }
+            }
+        }
+
+        // Check read-through from the file store.
+        for (Map.Entry<FullPageId, Integer> entry : res.entrySet()) {
+            FullPageId fullId = entry.getKey();
+            int state = entry.getValue();
+
+            if (state == -1) {
+                info("Page was never written: " + fullId);
+
+                continue;
+            }
+
+            byte[] walData = replay.get(fullId);
+
+            assertNotNull("Missing WAL record for a written page: " + fullId, walData);
+
+            long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+            try {
+                long pageAddr = mem.readLock(fullId.cacheId(), fullId.pageId(), page);
+
+                try {
+                    for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++) {
+                        assertEquals("Invalid state [pageId=" + fullId + ", pos=" + i + ']',
+                            state & 0xFF, PageUtils.getByte(pageAddr, i) & 0xFF);
+
+                        assertEquals("Invalid WAL state [pageId=" + fullId + ", pos=" + i + ']',
+                            state & 0xFF, walData[i] & 0xFF);
+                    }
+                }
+                finally {
+                    mem.readUnlock(fullId.cacheId(), fullId.pageId(), page);
+                }
+            }
+            finally {
+                mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+            }
+        }
+    }
+
+    /**
+     * @param mem Memory to use.
+     * @param storeMgr Store manager.
+     * @param cacheId Cache ID.
+     * @return Result map of random operations.
+     * @throws Exception If failure occurred.
+     */
+    private IgniteBiTuple<Map<FullPageId, Integer>, WALPointer> runCheckpointing(
+        final IgniteEx ig,
+        final PageMemoryImpl mem,
+        final IgnitePageStoreManager storeMgr,
+        final IgniteWriteAheadLogManager wal,
+        final int cacheId
+    ) throws Exception {
+        final ConcurrentMap<FullPageId, Integer> resMap = new ConcurrentHashMap<>();
+
+        final FullPageId pages[] = new FullPageId[TOTAL_PAGES];
+
+        Set<FullPageId> allocated = new HashSet<>();
+
+        for (int i = 0; i < TOTAL_PAGES; i++) {
+            FullPageId fullId = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
+
+            resMap.put(fullId, -1);
+
+            pages[i] = fullId;
+
+            allocated.add(fullId);
+        }
+
+        final AtomicBoolean run = new AtomicBoolean(true);
+
+        // Simulate transaction lock.
+        final ReadWriteLock updLock = new ReentrantReadWriteLock();
+
+        // Mark the start position.
+        CheckpointRecord cpRec = new CheckpointRecord(null, false);
+
+        WALPointer start = wal.log(cpRec);
+
+        wal.fsync(start);
+
+        IgniteInternalFuture<Long> updFut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                while (true) {
+                    FullPageId fullId = pages[ThreadLocalRandom.current().nextInt(TOTAL_PAGES)];
+
+                    updLock.readLock().lock();
+
+                    try {
+                        if (!run.get())
+                            return null;
+
+                        ig.context().cache().context().database().checkpointReadLock();
+
+                        try {
+                            long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+
+                            try {
+                                long pageAddr = mem.writeLock(fullId.cacheId(), fullId.pageId(), page);
+
+                                PageIO.setPageId(pageAddr, fullId.pageId());
+
+                                try {
+                                    int state = resMap.get(fullId);
+
+                                    if (state != -1) {
+                                        if (VERBOSE)
+                                            info("Verify page [fullId=" + fullId + ", state=" + state +
+                                                ", buf=" + pageAddr +
+                                                ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) +
+                                                ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
+
+                                        for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++)
+                                            assertEquals("Verify page failed [fullId=" + fullId +
+                                                    ", i=" + i +
+                                                    ", state=" + state +
+                                                    ", buf=" + pageAddr +
+                                                    ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) +
+                                                    ", page=" + U.hexLong(System.identityHashCode(page)) + ']',
+                                                state & 0xFF, PageUtils.getByte(pageAddr, i) & 0xFF);
+                                    }
+
+                                    state = (state + 1) & 0xFF;
+
+                                    if (VERBOSE)
+                                        info("Write page [fullId=" + fullId + ", state=" + state +
+                                            ", buf=" + pageAddr +
+                                            ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) +
+                                            ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
+
+                                    for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++)
+                                        PageUtils.putByte(pageAddr, i, (byte)state);
+
+                                    resMap.put(fullId, state);
+                                }
+                                finally {
+                                    mem.writeUnlock(fullId.cacheId(), fullId.pageId(),page, null,true);
+                                }
+                            }
+                            finally {
+                                mem.releasePage(fullId.cacheId(), fullId.pageId(),page);}
+                            }
+                            finally {
+                                ig.context().cache().context().database().checkpointReadUnlock();
+                            }
+                        }
+                        finally {
+                            updLock.readLock().unlock();
+                        }
+                    }
+                }
+            }, 8, "update-thread");
+
+        int checkpoints = 20;
+
+        while (checkpoints > 0) {
+            Map<FullPageId, Integer> snapshot = null;
+
+            Collection<FullPageId> pageIds;
+
+            updLock.writeLock().lock();
+
+            try {
+                snapshot = new HashMap<>(resMap);
+
+                pageIds = mem.beginCheckpoint();
+
+                checkpoints--;
+
+                if (checkpoints == 0)
+                    // No more writes should be done at this point.
+                    run.set(false);
+
+                info("Acquired pages for checkpoint: " + pageIds.size());
+            }
+            finally {
+                updLock.writeLock().unlock();
+            }
+
+            boolean ok = false;
+
+            try {
+                ByteBuffer tmpBuf = ByteBuffer.allocate(mem.pageSize());
+
+                tmpBuf.order(ByteOrder.nativeOrder());
+
+                long begin = System.currentTimeMillis();
+
+                long cp = 0;
+
+                long write = 0;
+
+                for (FullPageId fullId : pageIds) {
+                    long cpStart = System.nanoTime();
+
+                    Integer tag = mem.getForCheckpoint(fullId, tmpBuf, null);
+
+                    if (tag == null)
+                        continue;
+
+                    long cpEnd = System.nanoTime();
+
+                    cp += cpEnd - cpStart;
+
+                    Integer state = snapshot.get(fullId);
+
+                    if (allocated.contains(fullId) && state != -1) {
+                        tmpBuf.rewind();
+
+                        Integer first = null;
+
+                        for (int i = PageIO.COMMON_HEADER_END; i < mem.pageSize(); i++) {
+                            int val = tmpBuf.get(i) & 0xFF;
+
+                            if (first == null)
+                                first = val;
+
+                            // Avoid string concat.
+                            if (first != val)
+                                assertEquals("Corrupted buffer at position [pageId=" + fullId + ", pos=" + i + ']',
+                                    (int)first, val);
+
+                            // Avoid string concat.
+                            if (state != val)
+                                assertEquals("Invalid value at position [pageId=" + fullId + ", pos=" + i + ']',
+                                    (int)state, val);
+                        }
+                    }
+
+                    tmpBuf.rewind();
+
+                    long writeStart = System.nanoTime();
+
+                    storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag);
+
+                    long writeEnd = System.nanoTime();
+
+                    write += writeEnd - writeStart;
+
+                    tmpBuf.rewind();
+                }
+
+                long syncStart = System.currentTimeMillis();
+
+                storeMgr.sync(cacheId, 0);
+
+                long end = System.currentTimeMillis();
+
+                info("Written pages in " + (end - begin) + "ms, copy took " + (cp / 1_000_000) + "ms, " +
+                    "write took " + (write / 1_000_000) + "ms, sync took " + (end - syncStart) + "ms");
+
+                ok = true;
+            }
+            finally {
+                info("Finishing checkpoint...");
+
+                mem.finishCheckpoint();
+
+                info("Finished checkpoint");
+
+                if (!ok) {
+                    info("Cancelling updates...");
+
+                    run.set(false);
+
+                    updFut.get();
+                }
+            }
+
+            if (checkpoints != 0)
+                Thread.sleep(2_000);
+        }
+
+        info("checkpoints=" + checkpoints + ", done=" + updFut.isDone());
+
+        updFut.get();
+
+        // Mark the end.
+        wal.fsync(wal.log(new CheckpointRecord(cpRec.checkpointId(), start, true)));
+
+        assertEquals(0, mem.activePagesCount());
+
+        for (FullPageId fullId : pages) {
+
+            long page = mem.acquirePage(fullId.cacheId(), fullId.pageId());
+
+            try {
+                assertFalse("Page has a temp heap copy after the last checkpoint: [cacheId=" +
+                    fullId.cacheId() + ", pageId=" + fullId.pageId() + "]", mem.hasTempCopy(page));
+
+                assertFalse("Page is dirty after the last checkpoint: [cacheId=" +
+                    fullId.cacheId() + ", pageId=" + fullId.pageId() + "]", mem.isDirty(fullId.cacheId(), fullId.pageId(), page));
+            }
+            finally {
+                mem.releasePage(fullId.cacheId(), fullId.pageId(), page);
+            }
+        }
+
+        return F.t((Map<FullPageId, Integer>)resMap, start);
+    }
+
+    /**
+     *
+     */
+    private void deleteWorkFiles() throws IgniteCheckedException {
+        deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false));
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/cd4d0400/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgnitePdsWalTlbTest.java
----------------------------------------------------------------------
diff --git a/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgnitePdsWalTlbTest.java b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgnitePdsWalTlbTest.java
new file mode 100644
index 0000000..03b1384
--- /dev/null
+++ b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgnitePdsWalTlbTest.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.database.db.wal;
+
+import javax.cache.CacheException;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.MemoryConfiguration;
+import org.apache.ignite.configuration.MemoryPolicyConfiguration;
+import org.apache.ignite.configuration.PersistentStoreConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.configuration.PersistentStoreConfiguration.DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE;
+
+/**
+ *
+ */
+public class IgnitePdsWalTlbTest extends GridCommonAbstractTest {
+    /** Ip finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Cache name. */
+    private static final String CACHE_NAME = "cache";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        CacheConfiguration<Integer, Integer> ccfg = new CacheConfiguration<>(CACHE_NAME);
+
+        cfg.setCacheConfiguration(ccfg);
+
+        MemoryConfiguration memCfg = new MemoryConfiguration();
+
+        MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration();
+
+        memPlcCfg.setName("dfltMemPlc");
+        memPlcCfg.setInitialSize(100 * 1024 * 1024);
+        memPlcCfg.setMaxSize(100 * 1024 * 1024);
+
+        memCfg.setMemoryPolicies(memPlcCfg);
+        memCfg.setDefaultMemoryPolicyName("dfltMemPlc");
+
+        cfg.setMemoryConfiguration(memCfg);
+
+        cfg.setPersistentStoreConfiguration(
+            new PersistentStoreConfiguration()
+                .setCheckpointingPageBufferSize(DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE + 1)
+                .setTlbSize(640000000)
+        );
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        if (gridName.endsWith("1"))
+            cfg.setClientMode(true);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 30_000;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false));
+
+        stopAllGrids();
+
+        startGrids(2);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalDirectOutOfMemory() throws Exception {
+        IgniteEx ig = grid(1);
+
+        boolean locked = true;
+
+        try {
+            IgniteDataStreamer<Integer, Integer> streamer = ig.dataStreamer(CACHE_NAME);
+
+            for (int i = 0; i < 100_000; i++) {
+                streamer.addData(i, 1);
+
+                if (i > 0 && i % 10_000 == 0)
+                    info("Done put: " + i);
+            }
+        }
+        catch (CacheException ignore) {
+            // expected
+            locked = false;
+        }
+        finally {
+            assertFalse(locked);
+
+            stopAllGrids();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/cd4d0400/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgniteWalDirectoriesConfigurationTest.java
----------------------------------------------------------------------
diff --git a/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgniteWalDirectoriesConfigurationTest.java b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgniteWalDirectoriesConfigurationTest.java
new file mode 100644
index 0000000..4cca401
--- /dev/null
+++ b/modules/pds/src/test/java/org/apache/ignite/cache/database/db/wal/IgniteWalDirectoriesConfigurationTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.database.db.wal;
+
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.PersistentStoreConfiguration;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class IgniteWalDirectoriesConfigurationTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration();
+
+        pCfg.setWalStorePath("test/db/wal");
+
+        cfg.setPersistentStoreConfiguration(pCfg);
+
+        return cfg;
+    }
+
+    /**
+     *
+     */
+    public void testPartialWalConfigurationNotAllowed() {
+        try {
+            startGrid();
+        }
+        catch (Exception ignore) {
+            return;
+        }
+
+        fail("Node successfully started with incorrect configuration, exception is expected.");
+    }
+}


Mime
View raw message