Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id BF03F200C3F for ; Wed, 22 Mar 2017 19:51:06 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id BD809160B74; Wed, 22 Mar 2017 18:51:06 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 66462160B86 for ; Wed, 22 Mar 2017 19:51:05 +0100 (CET) Received: (qmail 67777 invoked by uid 500); 22 Mar 2017 18:51:04 -0000 Mailing-List: contact commits-help@geode.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@geode.apache.org Delivered-To: mailing list commits@geode.apache.org Received: (qmail 67768 invoked by uid 99); 22 Mar 2017 18:51:04 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 22 Mar 2017 18:51:04 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 74174DFBD3; Wed, 22 Mar 2017 18:51:04 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: eshu11@apache.org To: commits@geode.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: geode git commit: GEODE-2535: added a boolean flag to track if the value of a RecoveredEntry is in memory or on disk Date: Wed, 22 Mar 2017 18:51:04 +0000 (UTC) archived-at: Wed, 22 Mar 2017 18:51:06 -0000 Repository: geode Updated Branches: refs/heads/develop b06fffd70 -> 5ed0b4d18 GEODE-2535: added a boolean flag to track if the value of a RecoveredEntry is in memory or on disk Update stat based on the boolean value. Avoid negating keyId by using the boolean flag. Project: http://git-wip-us.apache.org/repos/asf/geode/repo Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/5ed0b4d1 Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/5ed0b4d1 Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/5ed0b4d1 Branch: refs/heads/develop Commit: 5ed0b4d18b33b2cc414da43ac6136b79f2351210 Parents: b06fffd Author: eshu Authored: Wed Mar 22 11:46:25 2017 -0700 Committer: eshu Committed: Wed Mar 22 11:46:25 2017 -0700 ---------------------------------------------------------------------- .../geode/internal/cache/AbstractRegionMap.java | 4 +- .../apache/geode/internal/cache/DiskEntry.java | 209 ++++++++----------- .../org/apache/geode/internal/cache/DiskId.java | 22 -- .../geode/internal/cache/DiskStoreImpl.java | 3 - .../org/apache/geode/internal/cache/Oplog.java | 5 +- .../cache/DiskRegRecoveryJUnitTest.java | 50 ++++- .../geode/internal/cache/OplogJUnitTest.java | 4 +- 7 files changed, 132 insertions(+), 165 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java index e3e87ea..eaababa 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java @@ -719,8 +719,8 @@ public abstract class AbstractRegionMap implements RegionMap { _getOwner().calculateRegionEntryValueSize(re)); } } else { - DiskEntry.Helper.updateRecoveredEntry((PlaceHolderDiskRegion) _getOwnerObject(), - (DiskEntry) re, value, (RegionEntryContext) _getOwnerObject()); + value.applyToDiskEntry((PlaceHolderDiskRegion) _getOwnerObject(), (DiskEntry) re, + (RegionEntryContext) _getOwnerObject()); } } catch (RegionClearedException rce) { throw new IllegalStateException( http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java index d00f7a0..bf7c4d2 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskEntry.java @@ -455,7 +455,7 @@ public interface DiskEntry extends RegionEntry { did.setKeyId(re.getRecoveredKeyId()); did.setUserBits(re.getUserBits()); did.setValueLength(re.getValueLength()); - if (re.getRecoveredKeyId() < 0) { + if (!re.getValueRecovered()) { updateStats(drv, r, 0/* InVM */, 1/* OnDisk */, did.getValueLength()); } else { entry.setValueWithContext(drv, @@ -899,63 +899,7 @@ public interface DiskEntry extends RegionEntry { } } } else if (newValue instanceof RecoveredEntry) { - // Now that oplog creates are immediately put in cache - // a later oplog modify will get us here - RecoveredEntry re = (RecoveredEntry) newValue; - long oldKeyId = did.getKeyId(); - Object oldValueAsToken = entry.getValueAsToken(); - long oldOplogId = did.getOplogId(); - long newOplogId = re.getOplogId(); - if (newOplogId != oldOplogId) { - did.setOplogId(newOplogId); - re.setOplogId(oldOplogId); // so caller knows oldoplog id - } - did.setOffsetInOplog(re.getOffsetInOplog()); - // id already set - did.setUserBits(re.getUserBits()); - oldValueLength = did.getValueLength(); - did.setValueLength(re.getValueLength()); - - if (re.getRecoveredKeyId() < 0) { - if (!entry.isValueNull()) { - entry.handleValueOverflow(region); - entry.setValueWithContext(region, null); // fixes bug 41119 - } - } else { - entry.setValueWithContext(region, - entry.prepareValueForCache(region, re.getValue(), false)); - } - - if (re.getRecoveredKeyId() < 0) { // recovering an entry whose new value is on disk - if (oldKeyId >= 0) { // the entry's old value is in vm - // TODO: oldKeyId == 0 is the ILLEGAL id; what does that indicate? - int inVM = -1; - if (Token.isInvalidOrRemoved(oldValueAsToken)) { // but tokens are never in vm - inVM = 0; - } - updateStats(dr, region, inVM, 1/* OnDisk */, did.getValueLength()); - } else { // the entry's old value is also on disk - int valueLenDelta = -oldValueLength; // but it is no longer - valueLenDelta += did.getValueLength(); // new one is now on disk - updateStats(dr, region, 0, 0, valueLenDelta); - } - } else { // recovering an entry whose new value is in vm - int inVM = 1; - if (Token.isInvalidOrRemoved(re.getValue())) { // but tokens never in vm - inVM = 0; - } - if (oldKeyId < 0) { // the entry's old value is on disk - updateStats(dr, region, inVM, -1/* OnDisk */, -oldValueLength); - } else { // the entry's old value was in the vm - if (inVM == 1 && Token.isInvalidOrRemoved(oldValueAsToken)) { - // the old state was not in vm and not on disk. But now we are in vm. - updateStats(dr, region, 1, 0, 0); - } else if (inVM == 0 && !Token.isInvalidOrRemoved(oldValueAsToken)) { - // the old state was in vm and not on disk. But now we are not in vm. - updateStats(dr, region, -1, 0, 0); - } - } - } + ((RecoveredEntry) newValue).applyToDiskEntry(entry, region, dr, did); } else { // The new value in the entry needs to be set after the disk writing // has succeeded. @@ -1054,67 +998,6 @@ public interface DiskEntry extends RegionEntry { return result; } - public static void updateRecoveredEntry(PlaceHolderDiskRegion drv, DiskEntry entry, - RecoveredEntry newValue, RegionEntryContext context) { - if (newValue == null) { - throw new NullPointerException( - LocalizedStrings.DiskEntry_ENTRYS_VALUE_SHOULD_NOT_BE_NULL.toLocalizedString()); - } - DiskId did = entry.getDiskId(); - synchronized (did) { - Object oldValueAsToken = entry.getValueAsToken(); - boolean oldValueWasNull = oldValueAsToken == null; - int oldValueLength = did.getValueLength(); - // Now that oplog creates are immediately put in cache - // a later oplog modify will get us here - long oldOplogId = did.getOplogId(); - long newOplogId = newValue.getOplogId(); - if (newOplogId != oldOplogId) { - did.setOplogId(newOplogId); - newValue.setOplogId(oldOplogId); // so caller knows oldoplog id - } - did.setOffsetInOplog(newValue.getOffsetInOplog()); - // id already set - did.setUserBits(newValue.getUserBits()); - did.setValueLength(newValue.getValueLength()); - if (newValue.getRecoveredKeyId() >= 0) { - entry.setValueWithContext(context, - entry.prepareValueForCache(drv, newValue.getValue(), false)); - int inVM = 1; - if (Token.isInvalidOrRemoved(newValue.getValue())) { // but tokens never in vm - inVM = 0; - } - if (oldValueWasNull) { // the entry's old value is on disk - updateStats(drv, null, inVM, -1/* OnDisk */, -oldValueLength); - } else { // the entry's old value was in the vm - if (inVM == 1 && Token.isInvalidOrRemoved(oldValueAsToken)) { - // the old state was not in vm and not on disk. But now we are in vm. - updateStats(drv, null, 1, 0, 0); - } else if (inVM == 0 && !Token.isInvalidOrRemoved(oldValueAsToken)) { - // the old state was in vm and not on disk. But now we are not in vm. - updateStats(drv, null, -1, 0, 0); - } - } - } else { - if (!oldValueWasNull) { - entry.handleValueOverflow(context); - entry.setValueWithContext(context, null); // fixes bug 41119 - } - if (!oldValueWasNull) { // the entry's old value is in vm - int inVM = -1; - if (Token.isInvalidOrRemoved(oldValueAsToken)) { // but tokens are never in vm - inVM = 0; - } - updateStats(drv, null, inVM, 1/* OnDisk */, did.getValueLength()); - } else { // the entry's old value is also on disk - int valueLenDelta = -oldValueLength; // but it is no longer - valueLenDelta += did.getValueLength(); // new one is now on disk - updateStats(drv, null, 0, 0, valueLenDelta); - } - } - } - } - public static Object getValueInVMOrDiskWithoutFaultIn(DiskEntry entry, LocalRegion region) { Object result = OffHeapHelper.copyAndReleaseIfNeeded(getValueOffHeapOrDiskWithoutFaultIn(entry, region)); @@ -1282,9 +1165,6 @@ public interface DiskEntry extends RegionEntry { // must have been destroyed value = null; } else { - if (did.isKeyIdNegative()) { - did.setKeyId(-did.getKeyId()); - } // if a bucket region then create a CachedDeserializable here instead of object value = dr.getRaw(did); // fix bug 40192 if (value instanceof BytesAndBits) { @@ -1688,9 +1568,7 @@ public interface DiskEntry extends RegionEntry { } AsyncDiskEntry result = null; - // Asif: This will convert the -ve OplogKeyId to positive as part of fixing // Bug # 39989 - // GEODE-2535 fix should address the original #39989 negative keyId issue. did.unmarkForWriting(); // System.out.println("DEBUG: removeFromDisk doing remove(" + id + ")"); @@ -1786,6 +1664,9 @@ public interface DiskEntry extends RegionEntry { private long oplogId; private VersionTag tag; + /** whether the entry value has been faulted in after recovery. */ + private final boolean valueRecovered; + /** * Only for this constructor, the value is not loaded into the region & it is lying on the * oplogs. Since Oplogs rely on DiskId to furnish user bits so as to correctly interpret bytes, @@ -1793,17 +1674,23 @@ public interface DiskEntry extends RegionEntry { */ public RecoveredEntry(long keyId, long oplogId, long offsetInOplog, byte userBits, int valueLength) { - this(-keyId, oplogId, offsetInOplog, userBits, valueLength, null); + this(keyId, oplogId, offsetInOplog, userBits, valueLength, null, false); } public RecoveredEntry(long keyId, long oplogId, long offsetInOplog, byte userBits, int valueLength, Object value) { + this(keyId, oplogId, offsetInOplog, userBits, valueLength, value, true); + } + + public RecoveredEntry(long keyId, long oplogId, long offsetInOplog, byte userBits, + int valueLength, Object value, boolean valueRecovered) { this.recoveredKeyId = keyId; this.value = value; this.oplogId = oplogId; this.offsetInOplog = offsetInOplog; this.userBits = EntryBits.setRecoveredFromDisk(userBits, true); this.valueLength = valueLength; + this.valueRecovered = valueRecovered; } /** @@ -1847,6 +1734,10 @@ public interface DiskEntry extends RegionEntry { this.oplogId = v; } + public boolean getValueRecovered() { + return this.valueRecovered; + } + public VersionTag getVersionTag() { return this.tag; } @@ -1854,5 +1745,73 @@ public interface DiskEntry extends RegionEntry { public void setVersionTag(VersionTag tag) { this.tag = tag; } + + public void applyToDiskEntry(PlaceHolderDiskRegion drv, DiskEntry entry, + RegionEntryContext context) { + DiskId did = entry.getDiskId(); + synchronized (did) { + applyToDiskEntry(entry, context, drv, did); + } + } + + public void applyToDiskEntry(DiskEntry entry, RegionEntryContext region, AbstractDiskRegion dr, + DiskId did) { + int oldValueLength; + // Now that oplog creates are immediately put in cache + // a later oplog modify will get us here + Object oldValueAsToken = entry.getValueAsToken(); + boolean oldValueWasNull = oldValueAsToken == null; + long oldOplogId = did.getOplogId(); + long newOplogId = getOplogId(); + if (newOplogId != oldOplogId) { + did.setOplogId(newOplogId); + setOplogId(oldOplogId); // so caller knows oldoplog id + } + did.setOffsetInOplog(getOffsetInOplog()); + // id already set + did.setUserBits(getUserBits()); + oldValueLength = did.getValueLength(); + did.setValueLength(getValueLength()); + + if (!getValueRecovered()) { + if (!oldValueWasNull) { + entry.handleValueOverflow(region); + entry.setValueWithContext(region, null); // fixes bug 41119 + } + } else { + entry.setValueWithContext(region, entry.prepareValueForCache(region, getValue(), false)); + } + + if (!getValueRecovered()) { // recovering an entry whose new value is on disk + if (!oldValueWasNull) { // the entry's old value is in vm + // TODO: oldKeyId == 0 is the ILLEGAL id; what does that indicate? + int inVM = -1; + if (Token.isInvalidOrRemoved(oldValueAsToken)) { // but tokens are never in vm + inVM = 0; + } + Helper.updateStats(dr, region, inVM, 1/* OnDisk */, did.getValueLength()); + } else { // the entry's old value is also on disk + int valueLenDelta = -oldValueLength; // but it is no longer + valueLenDelta += did.getValueLength(); // new one is now on disk + Helper.updateStats(dr, region, 0, 0, valueLenDelta); + } + } else { // recovering an entry whose new value is in vm + int inVM = 1; + if (Token.isInvalidOrRemoved(getValue())) { // but tokens never in vm + inVM = 0; + } + if (oldValueWasNull) { // the entry's old value is on disk + Helper.updateStats(dr, region, inVM, -1/* OnDisk */, -oldValueLength); + } else { // the entry's old value was in the vm + if (inVM == 1 && Token.isInvalidOrRemoved(oldValueAsToken)) { + // the old state was not in vm and not on disk. But now we are in vm. + Helper.updateStats(dr, region, 1, 0, 0); + } else if (inVM == 0 && !Token.isInvalidOrRemoved(oldValueAsToken)) { + // the old state was in vm and not on disk. But now we are not in vm. + Helper.updateStats(dr, region, -1, 0, 0); + } + } + } + } } } http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/main/java/org/apache/geode/internal/cache/DiskId.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskId.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskId.java index 8d2c675..fba88f1 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskId.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskId.java @@ -92,8 +92,6 @@ public abstract class DiskId { */ abstract void setOffsetInOplog(long offsetInOplog); - abstract boolean isKeyIdNegative(); - abstract void markForWriting(); abstract void unmarkForWriting(); @@ -409,11 +407,6 @@ public abstract class DiskId { } @Override - boolean isKeyIdNegative() { - return false; - } - - @Override void markForWriting() { this.valueLength |= 0x80000000; } @@ -483,11 +476,6 @@ public abstract class DiskId { } @Override - boolean isKeyIdNegative() { - return false; - } - - @Override void markForWriting() { this.valueLength |= 0x80000000; } @@ -552,11 +540,6 @@ public abstract class DiskId { } @Override - boolean isKeyIdNegative() { - return this.keyId < 0; - } - - @Override void markForWriting() { throw new IllegalStateException("Should not be used for persistent region"); } @@ -630,11 +613,6 @@ public abstract class DiskId { } @Override - boolean isKeyIdNegative() { - return this.keyId < 0; - } - - @Override void markForWriting() { throw new IllegalStateException("Should not be used for persistent region"); } http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java index 642eed3..b97f428 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java @@ -703,9 +703,6 @@ public class DiskStoreImpl implements DiskStore { throws RegionClearedException { DiskRegion dr = region.getDiskRegion(); DiskId id = entry.getDiskId(); - if (dr.isBackup() && id.getKeyId() < 0) { - id.setKeyId(-id.getKeyId()); - } long start = async ? this.stats.startFlush() : this.stats.startWrite(); if (!async) { dr.getStats().startWrite(); http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java index 27f41a2..f6b4023 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java @@ -3659,7 +3659,6 @@ public final class Oplog implements CompactableOplog, Flushable { // This allows us to not encode the oplogEntryId explicitly in the // record long createOplogEntryId = getOplogSet().newOplogEntryId(); - long old_kid = id.getKeyId(); id.setKeyId(createOplogEntryId); // startPosForSynchOp = this.crf.currSize; @@ -3989,7 +3988,7 @@ public final class Oplog implements CompactableOplog, Flushable { return; } userBits = di.getUserBits(); - oplogKeyId = Math.abs(di.getKeyId()); + oplogKeyId = di.getKeyId(); valueOffset = di.getOffsetInOplog(); valueLength = di.getValueLength(); deKey = de.getKey(); @@ -6812,7 +6811,7 @@ public final class Oplog implements CompactableOplog, Flushable { Assert.fail("Attempting to write an entry with keyId=0 to oplog. Entry key=" + entry.getKey() + " diskId=" + entry.getDiskId() + " region=" + dr); } - long delta = calcDelta(abs(keyId), this.opCode); + long delta = calcDelta(keyId, this.opCode); this.deltaIdBytesLength = bytesNeeded(delta); this.size += this.deltaIdBytesLength; this.opCode += this.deltaIdBytesLength - 1; http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/test/java/org/apache/geode/internal/cache/DiskRegRecoveryJUnitTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/DiskRegRecoveryJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/DiskRegRecoveryJUnitTest.java index 3fd091b..dea84b5 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/DiskRegRecoveryJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/DiskRegRecoveryJUnitTest.java @@ -858,27 +858,26 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase { b = EntryBits.setRecoveredFromDisk(b, true); b = EntryBits.setWithVersions(b, true); - // TODO darrel: need to wait for async recovery so keyIds will not be - // negative. - // TODO darrel: the current impl of KRF always has the keyId negative - // until a real get is done. + // TODO need to wait for async recovery for (int i = 0; i < 3; ++i) { DiskEntry de = (DiskEntry) rgn.basicGetEntry("" + i); DiskId did = de.getDiskId(); - // assertTrue(did.getKeyId() < 0); + assertTrue(did.getKeyId() > 0); assertEquals(1, did.getOplogId()); assertTrue(did.getOffsetInOplog() > -1); assertEquals(b, did.getUserBits()); assertTrue(did.getValueLength() > 0); assertEquals("" + i, rgn.get("" + i)); assertTrue(did.getKeyId() > 0); + assertFalse(de.isValueNull()); } // this last oplog does not have a krf because this disk store has not - // been closed. So its keyIds are > 0. + // been closed. So its value should be in memory now. for (int i = 3; i < 6; ++i) { DiskEntry de = (DiskEntry) rgn.basicGetEntry("" + i); DiskId did = de.getDiskId(); assertTrue(did.getKeyId() > 0); + assertFalse(de.isValueNull()); assertEquals(2, did.getOplogId()); assertTrue(did.getOffsetInOplog() > -1); assertEquals(b, did.getUserBits()); @@ -1010,23 +1009,27 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase { for (int i = 0; i < 3; ++i) { DiskEntry de = (DiskEntry) rgn.basicGetEntry("" + i); DiskId did = de.getDiskId(); - assertTrue(did.getKeyId() < 0); + assertTrue(did.getKeyId() > 0); + assertTrue(de.isValueNull()); assertEquals(1, did.getOplogId()); assertTrue(did.getOffsetInOplog() > -1); assertEquals(b, did.getUserBits()); assertTrue(did.getValueLength() > 0); assertEquals("" + i, rgn.get("" + i)); + assertFalse(de.isValueNull()); assertTrue(did.getKeyId() > 0); } for (int i = 3; i < 6; ++i) { DiskEntry de = (DiskEntry) rgn.basicGetEntry("" + i); DiskId did = de.getDiskId(); - assertTrue(did.getKeyId() < 0); + assertTrue(de.isValueNull()); + assertTrue(did.getKeyId() > 0); assertEquals(2, did.getOplogId()); assertTrue(did.getOffsetInOplog() > -1); assertEquals(b, did.getUserBits()); assertTrue(did.getValueLength() > 0); assertEquals("" + i, rgn.get("" + i)); + assertFalse(de.isValueNull()); assertTrue(did.getKeyId() > 0); } // ((LocalRegion)region).getDiskStore().forceCompaction(); @@ -1316,6 +1319,7 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase { assertEquals(0, dr.getNumOverflowOnDisk()); region.put(new Integer(1), new Integer(1)); + region.put(new Integer(1), new Integer(2)); region.localInvalidate(new Integer(1)); region.close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); @@ -1355,6 +1359,35 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase { assertEquals(0, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); + region.clear(); + assertEquals(0, dr.getNumEntriesInVM()); + assertEquals(0, dr.getNumOverflowOnDisk()); + + region.create(new Integer(1), null); + region.put(new Integer(1), new Integer(2)); + region.destroy(new Integer(1)); + region.close(); // No KRF generated, force multiple reads from CRF for same key + region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); + dr = ((LocalRegion) region).getDiskRegion(); + // entry destroyed so not inVM or onDisk + assertEquals(0, dr.getNumEntriesInVM()); + assertEquals(0, dr.getNumOverflowOnDisk()); + + region.clear(); + assertEquals(0, dr.getNumEntriesInVM()); + assertEquals(0, dr.getNumOverflowOnDisk()); + + region.create(new Integer(1), null); + region.put(new Integer(1), new Integer(2)); + region.destroy(new Integer(1)); + region.put(new Integer(1), new Integer(2)); // recreate + region.invalidate(new Integer(1)); + region.close(); // No KRF generated, force multiple reads from CRF for same key + region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); + dr = ((LocalRegion) region).getDiskRegion(); + // entry invalidated so not inVM or onDisk + assertEquals(0, dr.getNumEntriesInVM()); + assertEquals(0, dr.getNumOverflowOnDisk()); } finally { if (oldValue != null) { System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, oldValue); @@ -1379,5 +1412,6 @@ public class DiskRegRecoveryJUnitTest extends DiskRegionTestingBase { public void testVerifyStatsNoValues() { basicVerifyStats(false); } + } http://git-wip-us.apache.org/repos/asf/geode/blob/5ed0b4d1/geode-core/src/test/java/org/apache/geode/internal/cache/OplogJUnitTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogJUnitTest.java index 7549ea7..02e0a82 100755 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogJUnitTest.java @@ -1075,10 +1075,10 @@ public class OplogJUnitTest extends DiskRegionTestingBase { case OP_MODIFY: // @todo how do a know if the key needed to be serialized? - size += 1 + 4 + val.length + Oplog.bytesNeeded(Oplog.abs(opKey)); + size += 1 + 4 + val.length + Oplog.bytesNeeded(opKey); break; case OP_DEL: - size += Oplog.bytesNeeded(Oplog.abs(opKey)); + size += Oplog.bytesNeeded(opKey); break; } return size;