geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sai_boorlaga...@apache.org
Subject incubator-geode git commit: refactored to make logic simpler
Date Tue, 24 May 2016 21:13:32 GMT
Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-93 df6209894 -> a62ef26df


refactored to make logic simpler


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/a62ef26d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/a62ef26d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/a62ef26d

Branch: refs/heads/feature/GEODE-93
Commit: a62ef26df7664ebd02afa4d0d867fb230d748f4a
Parents: df62098
Author: Sai Boorlagadda <sboorlagadda@pivotal.io>
Authored: Tue May 24 12:53:35 2016 -0700
Committer: Sai Boorlagadda <sboorlagadda@pivotal.io>
Committed: Tue May 24 12:53:35 2016 -0700

----------------------------------------------------------------------
 .../gemfire/internal/cache/DiskEntry.java       |  43 ++++----
 .../gemfire/internal/cache/DiskRegionStats.java |   4 -
 .../gemfire/internal/cache/LocalRegion.java     |   4 -
 .../cache/PartitionedRegionStatsJUnitTest.java  | 108 ++-----------------
 4 files changed, 30 insertions(+), 129 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a62ef26d/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
index 7fa0c42..50a4a4f 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
@@ -1070,26 +1070,24 @@ public interface DiskEntry extends RegionEntry {
           boolean wasEvicted = le.testEvicted();
           le.unsetEvicted();
           if (!Token.isRemovedFromDisk(newValue)) {
-            System.out.println("OldValue:" + oldValue + ", NewValue:" + newValue);
-            if (oldValue == null && newValue == Token.TOMBSTONE) {
-              //dr.incNumEntriesInVM(1L);
-              dr.incNumOverflowOnDisk(-1L);
-              dr.incNumOverflowBytesOnDisk(-oldValueLength);
-              incrementBucketStats(region, 0/*InVM*/, -1/*OnDisk*/, -oldValueLength);
-            } else if (oldValue == null && newValue != Token.TOMBSTONE) {
-              dr.incNumEntriesInVM(1L);
-              dr.incNumOverflowOnDisk(-1L);
-              dr.incNumOverflowBytesOnDisk(-oldValueLength);
-              incrementBucketStats(region, 1/*InVM*/, -1/*OnDisk*/, -oldValueLength);
-            } else if(oldValue != null && newValue == Token.TOMBSTONE) {
-              dr.incNumEntriesInVM(-1L);
-              //if(dr.isBackup()) {
-              //  dr.incNumOverflowBytesOnDisk(-oldValueLength);
-              //  incrementBucketStats(region, -1/*InVM*/, 0/*OnDisk*/, -oldValueLength);
-              //}
-            } else if(oldValue == Token.TOMBSTONE && newValue != Token.TOMBSTONE)
{
-              dr.incNumEntriesInVM(1L);
-              incrementBucketStats(region, 1/*InVM*/, 0/*OnDisk*/, 0/*overflowBytesOnDisk*/);
+            if(newValue == Token.TOMBSTONE) {
+              if (oldValue == null) {
+                dr.incNumOverflowOnDisk(-1L);
+                dr.incNumOverflowBytesOnDisk(-oldValueLength);
+                incrementBucketStats(region, 0/*InVM*/, -1/*OnDisk*/, -oldValueLength);
+              } else {
+                dr.incNumEntriesInVM(-1L);
+              }
+            } else {
+              if (oldValue == null) {
+                dr.incNumEntriesInVM(1L);
+                dr.incNumOverflowOnDisk(-1L);
+                dr.incNumOverflowBytesOnDisk(-oldValueLength);
+                incrementBucketStats(region, 1/*InVM*/, -1/*OnDisk*/, -oldValueLength);
+              } else if(oldValue == Token.TOMBSTONE) {
+                dr.incNumEntriesInVM(1L);
+                incrementBucketStats(region, 1/*InVM*/, 0/*OnDisk*/, 0/*overflowBytesOnDisk*/);
+              }
             }
           }
         }
@@ -1491,7 +1489,6 @@ public interface DiskEntry extends RegionEntry {
                                              int entriesInVmDelta,
                                              int overflowOnDiskDelta,
                                              int overflowBytesOnDiskDelta) {
-      System.out.println(">>>>>>> incrementBucketStats:: entriesInVmDelta:"
+ entriesInVmDelta + ", overflowOnDiskDelta:" + overflowOnDiskDelta + ", overflowBytesOnDiskDelta:"
+ overflowBytesOnDiskDelta);
       if (owner instanceof BucketRegion) {
         ((BucketRegion)owner).incNumEntriesInVM(entriesInVmDelta);
         ((BucketRegion)owner).incNumOverflowOnDisk(overflowOnDiskDelta);
@@ -1562,7 +1559,6 @@ public interface DiskEntry extends RegionEntry {
             // and now we are faulting it out
           }
         }
-        System.out.println("overflowToDisk::: entry:" + entry.getKey() + ", wasAlreadyPendingAsync:"
+ wasAlreadyPendingAsync + ", scheduledAsyncHere:" + scheduledAsyncHere);
         boolean movedValueToDisk = false; // added for bug 41849
         
         // If async then if it does not need to be written (because it already was)
@@ -1580,7 +1576,6 @@ public interface DiskEntry extends RegionEntry {
           }finally {
             entry.afterValueOverflow(region);
           }
-          System.out.println("overflowToDisk::: entry:" + entry.getKey() + " is set to null");
           movedValueToDisk = true;
           change = ((LRUClockNode)entry).updateEntrySize(ccHelper);
         }
@@ -1784,7 +1779,7 @@ public interface DiskEntry extends RegionEntry {
                 dr.incNumEntriesInVM(-1);
                 dr.incNumOverflowOnDisk(1L);
                 dr.incNumOverflowBytesOnDisk(did.getValueLength());
-                incrementBucketStats(region, 0/*InVM*/, 1/*OnDisk*/,
+                incrementBucketStats(region, -1/*InVM*/, 1/*OnDisk*/,
                                      did.getValueLength());
                 try {
                  entry.handleValueOverflow(region);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a62ef26d/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
index 0db01cd..75cdf21 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
@@ -22,7 +22,6 @@ import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.StatisticsType;
 import com.gemstone.gemfire.StatisticsTypeFactory;
 import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
-import com.gemstone.gemfire.internal.logging.LogService;
 
 /**
  * GemFire statistics about a {@link DiskRegion}.
@@ -236,7 +235,6 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumOverflowOnDisk(long delta) {
-    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumOverflowOnDisk:::"
+ getNumOverflowOnDisk() + ",delta=" + delta);
     this.stats.incLong(numOverflowOnDiskId, delta);
   }
 
@@ -245,7 +243,6 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumEntriesInVM(long delta) {
-    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumEntriesInVM:::"
+ getNumEntriesInVM() + ",delta=" + delta);
     this.stats.incLong(numEntriesInVMId, delta);
   }
   
@@ -254,7 +251,6 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumOverflowBytesOnDisk(long delta) {
-    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumOverflowBytesOnDisk:::"
+ getNumOverflowBytesOnDisk() + ",delta=" + delta);
     this.stats.incLong(numOverflowBytesOnDiskId, delta);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a62ef26d/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
index 286fef8..d4cbbe8 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
@@ -3438,10 +3438,6 @@ public class LocalRegion extends AbstractRegion
     //Fix for 45204 - don't include the tombstones in
     //any of our entry count stats.
     this.cachePerfStats.incEntryCount(-delta);
-    //if(getDiskRegion() != null) {
-    //  getDiskRegion().incNumEntriesInVM(-delta);
-    //}
-    //DiskEntry.Helper.incrementBucketStats(this, -delta/*InVM*/, 0/*OnDisk*/, 0);
   }
   
   public int getTombstoneCount() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/a62ef26d/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
index 768ad8d..82e3489 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
@@ -332,11 +332,11 @@ public class PartitionedRegionStatsJUnitTest
 
     
     int numEntries = 0;
-    System.out.println(">>>>> put(0)");
+    
     pr.put(0, 0);
     numEntries++;
     pr.getDiskStore().flush();
-    System.out.println(">>>>>> Flush");
+    
     long singleEntryMemSize = stats.getLong("dataStoreBytesInUse");
     assertEquals(1 , stats.getInt("dataStoreEntryCount"));
     assertEquals(0 , diskStats.getNumOverflowBytesOnDisk());
@@ -358,17 +358,6 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
     
     assertTrue(entryOverflowSize > 0);
-    System.out.println("Total entries in VM:::::::");
-    countEntriesInMem(pr);
-    System.out.println(">>>>>>>>>>> GET(0)");
-    pr.get(1);
-    assertEquals(singleEntryMemSize, stats.getLong("dataStoreBytesInUse"));
-    assertEquals(2 , stats.getInt("dataStoreEntryCount"));
-    assertEquals(1 , diskStats.getNumEntriesInVM());
-    assertEquals(1 , diskStats.getNumOverflowOnDisk());
-    assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
-    assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    
     
     for(; numEntries < pr.getTotalNumberOfBuckets() * 5; numEntries++) {
       pr.put(numEntries, numEntries);
@@ -397,14 +386,13 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals((numEntries -1) , diskStats.getNumOverflowOnDisk());
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-
+    
     //Get some entries to trigger evictions
     for(int i = 0; i < numEntries / 2; i++) {
-      countEntriesInMem(pr);
-      System.out.println(">>>>>>>>>> Get(" + i + ") <<<<<<<<<<<<<<<<<<<<<<<<");
       pr.get(i);
     }
     pr.getDiskStore().flush();
+    
     assertEquals(singleEntryMemSize, stats.getLong("dataStoreBytesInUse"));
     assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
     assertEquals((numEntries -1) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
@@ -440,61 +428,16 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals((numEntries -1) , diskStats.getNumOverflowOnDisk());
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    int entriesInMem = 1;
-
+    
    //Put get put - seems to leave entry in memory?
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
-    System.out.println(">>>>>>>>>>> PUT(update): 10");
     pr.put(10, 11);
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());    
-    System.out.println(">>>>>>>>>>> DELETE: 10");
-    pr.remove(10);
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
-    System.out.println(">>>>>>>>>>> PUT(update): 10");
+    pr.get(10);
     pr.put(10, 12);
     
     pr.getDiskStore().flush();
     
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
-    assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
-    assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
-    assertEquals((numEntries - entriesInMem) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
-    assertEquals(entriesInMem , diskStats.getNumEntriesInVM());
-    assertEquals((numEntries - entriesInMem) , diskStats.getNumOverflowOnDisk());
-    assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
-    assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    
-    
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());    
-    System.out.println(">>>>>>>>>>> DELETE: 10");
-    pr.remove(10);
-    numEntries--;
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
-    
-    pr.getDiskStore().flush();
+    int entriesInMem = 1;
     
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
     assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
     assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
     assertEquals((numEntries - entriesInMem) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
@@ -502,32 +445,22 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals((numEntries - entriesInMem) , diskStats.getNumOverflowOnDisk());
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
+    
     //Do some random operations
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+
     System.out.println("----Doing random operations");
     Random rand = new Random(12345L);
     for(int i =0; i < 1000; i++) {
-      System.out.println(">>>>>>> Total Entries: pr.entryCount() - "
+ pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-          ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-          ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-          ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk() +
-          ", getDiskBytesFromBucketStats - " + getDiskBytes(pr));
       int key = rand.nextInt(numEntries);
       int op = rand.nextInt(3);
       switch(op) {
         case 0:
-          System.out.println(">>>>>>>>>>> PUT(update): "
+ key);
           pr.put(key, rand.nextInt());
           break;
         case 1:
-          System.out.println(">>>>>>>>>>> GET: " + key);
           pr.get(key);
           break;
         case 2:
-          System.out.println(">>>>>>>>>>> REMOVE: " + key);
           pr.remove(key);
           break;
       }
@@ -536,11 +469,7 @@ public class PartitionedRegionStatsJUnitTest
     pr.getDiskStore().flush();
     
     System.out.println("----Done with random operations");
-    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " +
pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
-        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
-        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
-        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
-    
+
     numEntries = pr.entryCount();
         
     assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
@@ -552,23 +481,8 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
   }
 
-  private int countEntriesInMem(PartitionedRegion pr) {
-    int entriesInMem = 0;
-    for(BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
-      for(RegionEntry entry : br.entries.regionEntries()) {
-        if(entry._getValue() != null && !Token.isRemoved(entry._getValue())) {
-          System.out.println("Still in memory " + entry.getKey());
-          entriesInMem++;
-        }
-      }
-    }
-    
-    System.out.println("EntriesInMem = " + entriesInMem);
-    return entriesInMem;
-  }
-
   private Object getDiskBytes(PartitionedRegion pr) {
-Set<BucketRegion> brs = pr.getDataStore().getAllLocalBucketRegions();
+    Set<BucketRegion> brs = pr.getDataStore().getAllLocalBucketRegions();
     
     long bytes = 0;
     for(Iterator<BucketRegion> itr = brs.iterator(); itr.hasNext(); ) {


Mime
View raw message