Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 4D0D4200D11 for ; Mon, 2 Oct 2017 17:13:40 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 4B57E160BDB; Mon, 2 Oct 2017 15:13:40 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 436A61609DE for ; Mon, 2 Oct 2017 17:13:37 +0200 (CEST) Received: (qmail 30004 invoked by uid 500); 2 Oct 2017 15:13:31 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 29318 invoked by uid 99); 2 Oct 2017 15:13:31 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Oct 2017 15:13:31 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id CD058F5C30; Mon, 2 Oct 2017 15:13:30 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Mon, 02 Oct 2017 15:13:41 -0000 Message-Id: <500e806ed2e04b0e93dbe318e41e945b@git.apache.org> In-Reply-To: <99a49c8b60554e8fa040d02ce0bee5b3@git.apache.org> References: <99a49c8b60554e8fa040d02ce0bee5b3@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [12/51] [partial] hbase-site git commit: Published site at . archived-at: Mon, 02 Oct 2017 15:13:40 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html index ce2e1a7..8596033 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html @@ -115,10 +115,10 @@ 107 this.flushHandlers = new FlushHandler[handlerCount]; 108 LOG.info("globalMemStoreLimit=" 109 + TraditionalBinaryPrefix -110 .long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1) +110 .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1) 111 + ", globalMemStoreLimitLowMark=" 112 + TraditionalBinaryPrefix.long2String( -113 this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1) +113 this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1) 114 + ", Offheap=" 115 + (this.server.getRegionServerAccounting().isOffheap())); 116 } @@ -144,12 +144,12 @@ 136 while (!flushedOne) { 137 // Find the biggest region that doesn't have too many storefiles 138 // (might be null!) -139 Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); +139 Region bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); 140 // Find the biggest region, total, even if it might have too many flushes. -141 Region bestAnyRegion = getBiggestMemstoreRegion( +141 Region bestAnyRegion = getBiggestMemStoreRegion( 142 regionsBySize, excludedRegions, false); 143 // Find the biggest region that is a secondary region -144 Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, +144 Region bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, 145 excludedRegions); 146 147 if (bestAnyRegion == null && bestRegionReplica == null) { @@ -159,7 +159,7 @@ 151 152 Region regionToFlush; 153 if (bestFlushableRegion != null && -154 bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { +154 bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { 155 // Even if it's not supposed to be flushed, pick a region if it's more than twice 156 // as big as the best flushable one - otherwise when we're under pressure we make 157 // lots of little flushes and cause lots of compactions, etc, which just makes @@ -168,9 +168,9 @@ 160 LOG.debug("Under global heap pressure: " + "Region " 161 + bestAnyRegion.getRegionInfo().getRegionNameAsString() 162 + " has too many " + "store files, but is " -163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) +163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) 164 + " vs best flushable region's " -165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) +165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) 166 + ". Choosing the bigger."); 167 } 168 regionToFlush = bestAnyRegion; @@ -183,20 +183,20 @@ 175 } 176 177 Preconditions.checkState( -178 (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || -179 (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); +178 (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || +179 (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); 180 181 if (regionToFlush == null || 182 (bestRegionReplica != null && 183 ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && -184 (bestRegionReplica.getMemstoreSize() -185 > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { +184 (bestRegionReplica.getMemStoreSize() +185 > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { 186 LOG.info("Refreshing storefiles of region " + bestRegionReplica 187 + " due to global heap pressure. Total memstore datasize=" 188 + StringUtils -189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 190 + " memstore heap size=" + StringUtils.humanReadableInt( -191 server.getRegionServerAccounting().getGlobalMemstoreHeapSize())); +191 server.getRegionServerAccounting().getGlobalMemStoreHeapSize())); 192 flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); 193 if (!flushedOne) { 194 LOG.info("Excluding secondary region " + bestRegionReplica + @@ -206,9 +206,9 @@ 198 } else { 199 LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " 200 + "Total Memstore size=" -201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 202 + ", Region memstore size=" -203 + humanReadableInt(regionToFlush.getMemstoreSize())); +203 + humanReadableInt(regionToFlush.getMemStoreSize())); 204 flushedOne = flushRegion(regionToFlush, true, false); 205 206 if (!flushedOne) { @@ -239,7 +239,7 @@ 231 if (type != FlushType.NORMAL) { 232 LOG.debug("Flush thread woke up because memory above low water=" 233 + TraditionalBinaryPrefix.long2String( -234 server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1)); +234 server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)); 235 // For offheap memstore, even if the lower water mark was breached due to heap overhead 236 // we still select the regions based on the region's memstore data size. 237 // TODO : If we want to decide based on heap over head it can be done without tracking @@ -291,7 +291,7 @@ 283 } 284 } 285 -286 private Region getBiggestMemstoreRegion( +286 private Region getBiggestMemStoreRegion( 287 SortedMap<Long, Region> regionsBySize, 288 Set<Region> excludedRegions, 289 boolean checkStoreFileCount) { @@ -315,7 +315,7 @@ 307 return null; 308 } 309 -310 private Region getBiggestMemstoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, +310 private Region getBiggestMemStoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, 311 Set<Region> excludedRegions) { 312 synchronized (regionsInQueue) { 313 for (Region region : regionsBySize.values()) { @@ -596,19 +596,19 @@ 588 startTime = EnvironmentEdgeManager.currentTime(); 589 if (!server.getRegionServerAccounting().isOffheap()) { 590 logMsg("global memstore heapsize", -591 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -592 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +591 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +592 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 593 } else { 594 switch (flushType) { 595 case ABOVE_OFFHEAP_HIGHER_MARK: 596 logMsg("the global offheap memstore datasize", -597 server.getRegionServerAccounting().getGlobalMemstoreDataSize(), -598 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +597 server.getRegionServerAccounting().getGlobalMemStoreDataSize(), +598 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 599 break; 600 case ABOVE_ONHEAP_HIGHER_MARK: 601 logMsg("global memstore heapsize", -602 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -603 server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit()); +602 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +603 server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit()); 604 break; 605 default: 606 break; @@ -699,8 +699,8 @@ 691 * @param globalMemStoreSize 692 */ 693 @Override -694 public void setGlobalMemstoreLimit(long globalMemStoreSize) { -695 this.server.getRegionServerAccounting().setGlobalMemstoreLimits(globalMemStoreSize); +694 public void setGlobalMemStoreLimit(long globalMemStoreSize) { +695 this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize); 696 reclaimMemStoreMemory(); 697 } 698 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html index ce2e1a7..8596033 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html @@ -115,10 +115,10 @@ 107 this.flushHandlers = new FlushHandler[handlerCount]; 108 LOG.info("globalMemStoreLimit=" 109 + TraditionalBinaryPrefix -110 .long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1) +110 .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1) 111 + ", globalMemStoreLimitLowMark=" 112 + TraditionalBinaryPrefix.long2String( -113 this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1) +113 this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1) 114 + ", Offheap=" 115 + (this.server.getRegionServerAccounting().isOffheap())); 116 } @@ -144,12 +144,12 @@ 136 while (!flushedOne) { 137 // Find the biggest region that doesn't have too many storefiles 138 // (might be null!) -139 Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); +139 Region bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); 140 // Find the biggest region, total, even if it might have too many flushes. -141 Region bestAnyRegion = getBiggestMemstoreRegion( +141 Region bestAnyRegion = getBiggestMemStoreRegion( 142 regionsBySize, excludedRegions, false); 143 // Find the biggest region that is a secondary region -144 Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, +144 Region bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, 145 excludedRegions); 146 147 if (bestAnyRegion == null && bestRegionReplica == null) { @@ -159,7 +159,7 @@ 151 152 Region regionToFlush; 153 if (bestFlushableRegion != null && -154 bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { +154 bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { 155 // Even if it's not supposed to be flushed, pick a region if it's more than twice 156 // as big as the best flushable one - otherwise when we're under pressure we make 157 // lots of little flushes and cause lots of compactions, etc, which just makes @@ -168,9 +168,9 @@ 160 LOG.debug("Under global heap pressure: " + "Region " 161 + bestAnyRegion.getRegionInfo().getRegionNameAsString() 162 + " has too many " + "store files, but is " -163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) +163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) 164 + " vs best flushable region's " -165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) +165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) 166 + ". Choosing the bigger."); 167 } 168 regionToFlush = bestAnyRegion; @@ -183,20 +183,20 @@ 175 } 176 177 Preconditions.checkState( -178 (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || -179 (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); +178 (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || +179 (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); 180 181 if (regionToFlush == null || 182 (bestRegionReplica != null && 183 ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && -184 (bestRegionReplica.getMemstoreSize() -185 > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { +184 (bestRegionReplica.getMemStoreSize() +185 > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { 186 LOG.info("Refreshing storefiles of region " + bestRegionReplica 187 + " due to global heap pressure. Total memstore datasize=" 188 + StringUtils -189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 190 + " memstore heap size=" + StringUtils.humanReadableInt( -191 server.getRegionServerAccounting().getGlobalMemstoreHeapSize())); +191 server.getRegionServerAccounting().getGlobalMemStoreHeapSize())); 192 flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); 193 if (!flushedOne) { 194 LOG.info("Excluding secondary region " + bestRegionReplica + @@ -206,9 +206,9 @@ 198 } else { 199 LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " 200 + "Total Memstore size=" -201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 202 + ", Region memstore size=" -203 + humanReadableInt(regionToFlush.getMemstoreSize())); +203 + humanReadableInt(regionToFlush.getMemStoreSize())); 204 flushedOne = flushRegion(regionToFlush, true, false); 205 206 if (!flushedOne) { @@ -239,7 +239,7 @@ 231 if (type != FlushType.NORMAL) { 232 LOG.debug("Flush thread woke up because memory above low water=" 233 + TraditionalBinaryPrefix.long2String( -234 server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1)); +234 server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)); 235 // For offheap memstore, even if the lower water mark was breached due to heap overhead 236 // we still select the regions based on the region's memstore data size. 237 // TODO : If we want to decide based on heap over head it can be done without tracking @@ -291,7 +291,7 @@ 283 } 284 } 285 -286 private Region getBiggestMemstoreRegion( +286 private Region getBiggestMemStoreRegion( 287 SortedMap<Long, Region> regionsBySize, 288 Set<Region> excludedRegions, 289 boolean checkStoreFileCount) { @@ -315,7 +315,7 @@ 307 return null; 308 } 309 -310 private Region getBiggestMemstoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, +310 private Region getBiggestMemStoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, 311 Set<Region> excludedRegions) { 312 synchronized (regionsInQueue) { 313 for (Region region : regionsBySize.values()) { @@ -596,19 +596,19 @@ 588 startTime = EnvironmentEdgeManager.currentTime(); 589 if (!server.getRegionServerAccounting().isOffheap()) { 590 logMsg("global memstore heapsize", -591 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -592 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +591 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +592 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 593 } else { 594 switch (flushType) { 595 case ABOVE_OFFHEAP_HIGHER_MARK: 596 logMsg("the global offheap memstore datasize", -597 server.getRegionServerAccounting().getGlobalMemstoreDataSize(), -598 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +597 server.getRegionServerAccounting().getGlobalMemStoreDataSize(), +598 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 599 break; 600 case ABOVE_ONHEAP_HIGHER_MARK: 601 logMsg("global memstore heapsize", -602 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -603 server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit()); +602 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +603 server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit()); 604 break; 605 default: 606 break; @@ -699,8 +699,8 @@ 691 * @param globalMemStoreSize 692 */ 693 @Override -694 public void setGlobalMemstoreLimit(long globalMemStoreSize) { -695 this.server.getRegionServerAccounting().setGlobalMemstoreLimits(globalMemStoreSize); +694 public void setGlobalMemStoreLimit(long globalMemStoreSize) { +695 this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize); 696 reclaimMemStoreMemory(); 697 } 698 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html index ce2e1a7..8596033 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html @@ -115,10 +115,10 @@ 107 this.flushHandlers = new FlushHandler[handlerCount]; 108 LOG.info("globalMemStoreLimit=" 109 + TraditionalBinaryPrefix -110 .long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1) +110 .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1) 111 + ", globalMemStoreLimitLowMark=" 112 + TraditionalBinaryPrefix.long2String( -113 this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1) +113 this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1) 114 + ", Offheap=" 115 + (this.server.getRegionServerAccounting().isOffheap())); 116 } @@ -144,12 +144,12 @@ 136 while (!flushedOne) { 137 // Find the biggest region that doesn't have too many storefiles 138 // (might be null!) -139 Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); +139 Region bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); 140 // Find the biggest region, total, even if it might have too many flushes. -141 Region bestAnyRegion = getBiggestMemstoreRegion( +141 Region bestAnyRegion = getBiggestMemStoreRegion( 142 regionsBySize, excludedRegions, false); 143 // Find the biggest region that is a secondary region -144 Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, +144 Region bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, 145 excludedRegions); 146 147 if (bestAnyRegion == null && bestRegionReplica == null) { @@ -159,7 +159,7 @@ 151 152 Region regionToFlush; 153 if (bestFlushableRegion != null && -154 bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { +154 bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { 155 // Even if it's not supposed to be flushed, pick a region if it's more than twice 156 // as big as the best flushable one - otherwise when we're under pressure we make 157 // lots of little flushes and cause lots of compactions, etc, which just makes @@ -168,9 +168,9 @@ 160 LOG.debug("Under global heap pressure: " + "Region " 161 + bestAnyRegion.getRegionInfo().getRegionNameAsString() 162 + " has too many " + "store files, but is " -163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) +163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) 164 + " vs best flushable region's " -165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) +165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) 166 + ". Choosing the bigger."); 167 } 168 regionToFlush = bestAnyRegion; @@ -183,20 +183,20 @@ 175 } 176 177 Preconditions.checkState( -178 (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || -179 (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); +178 (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || +179 (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); 180 181 if (regionToFlush == null || 182 (bestRegionReplica != null && 183 ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && -184 (bestRegionReplica.getMemstoreSize() -185 > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { +184 (bestRegionReplica.getMemStoreSize() +185 > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { 186 LOG.info("Refreshing storefiles of region " + bestRegionReplica 187 + " due to global heap pressure. Total memstore datasize=" 188 + StringUtils -189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 190 + " memstore heap size=" + StringUtils.humanReadableInt( -191 server.getRegionServerAccounting().getGlobalMemstoreHeapSize())); +191 server.getRegionServerAccounting().getGlobalMemStoreHeapSize())); 192 flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); 193 if (!flushedOne) { 194 LOG.info("Excluding secondary region " + bestRegionReplica + @@ -206,9 +206,9 @@ 198 } else { 199 LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " 200 + "Total Memstore size=" -201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 202 + ", Region memstore size=" -203 + humanReadableInt(regionToFlush.getMemstoreSize())); +203 + humanReadableInt(regionToFlush.getMemStoreSize())); 204 flushedOne = flushRegion(regionToFlush, true, false); 205 206 if (!flushedOne) { @@ -239,7 +239,7 @@ 231 if (type != FlushType.NORMAL) { 232 LOG.debug("Flush thread woke up because memory above low water=" 233 + TraditionalBinaryPrefix.long2String( -234 server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1)); +234 server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)); 235 // For offheap memstore, even if the lower water mark was breached due to heap overhead 236 // we still select the regions based on the region's memstore data size. 237 // TODO : If we want to decide based on heap over head it can be done without tracking @@ -291,7 +291,7 @@ 283 } 284 } 285 -286 private Region getBiggestMemstoreRegion( +286 private Region getBiggestMemStoreRegion( 287 SortedMap<Long, Region> regionsBySize, 288 Set<Region> excludedRegions, 289 boolean checkStoreFileCount) { @@ -315,7 +315,7 @@ 307 return null; 308 } 309 -310 private Region getBiggestMemstoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, +310 private Region getBiggestMemStoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, 311 Set<Region> excludedRegions) { 312 synchronized (regionsInQueue) { 313 for (Region region : regionsBySize.values()) { @@ -596,19 +596,19 @@ 588 startTime = EnvironmentEdgeManager.currentTime(); 589 if (!server.getRegionServerAccounting().isOffheap()) { 590 logMsg("global memstore heapsize", -591 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -592 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +591 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +592 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 593 } else { 594 switch (flushType) { 595 case ABOVE_OFFHEAP_HIGHER_MARK: 596 logMsg("the global offheap memstore datasize", -597 server.getRegionServerAccounting().getGlobalMemstoreDataSize(), -598 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +597 server.getRegionServerAccounting().getGlobalMemStoreDataSize(), +598 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 599 break; 600 case ABOVE_ONHEAP_HIGHER_MARK: 601 logMsg("global memstore heapsize", -602 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -603 server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit()); +602 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +603 server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit()); 604 break; 605 default: 606 break; @@ -699,8 +699,8 @@ 691 * @param globalMemStoreSize 692 */ 693 @Override -694 public void setGlobalMemstoreLimit(long globalMemStoreSize) { -695 this.server.getRegionServerAccounting().setGlobalMemstoreLimits(globalMemStoreSize); +694 public void setGlobalMemStoreLimit(long globalMemStoreSize) { +695 this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize); 696 reclaimMemStoreMemory(); 697 } 698 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html index ce2e1a7..8596033 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html @@ -115,10 +115,10 @@ 107 this.flushHandlers = new FlushHandler[handlerCount]; 108 LOG.info("globalMemStoreLimit=" 109 + TraditionalBinaryPrefix -110 .long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1) +110 .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1) 111 + ", globalMemStoreLimitLowMark=" 112 + TraditionalBinaryPrefix.long2String( -113 this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1) +113 this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1) 114 + ", Offheap=" 115 + (this.server.getRegionServerAccounting().isOffheap())); 116 } @@ -144,12 +144,12 @@ 136 while (!flushedOne) { 137 // Find the biggest region that doesn't have too many storefiles 138 // (might be null!) -139 Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); +139 Region bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); 140 // Find the biggest region, total, even if it might have too many flushes. -141 Region bestAnyRegion = getBiggestMemstoreRegion( +141 Region bestAnyRegion = getBiggestMemStoreRegion( 142 regionsBySize, excludedRegions, false); 143 // Find the biggest region that is a secondary region -144 Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, +144 Region bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, 145 excludedRegions); 146 147 if (bestAnyRegion == null && bestRegionReplica == null) { @@ -159,7 +159,7 @@ 151 152 Region regionToFlush; 153 if (bestFlushableRegion != null && -154 bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { +154 bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { 155 // Even if it's not supposed to be flushed, pick a region if it's more than twice 156 // as big as the best flushable one - otherwise when we're under pressure we make 157 // lots of little flushes and cause lots of compactions, etc, which just makes @@ -168,9 +168,9 @@ 160 LOG.debug("Under global heap pressure: " + "Region " 161 + bestAnyRegion.getRegionInfo().getRegionNameAsString() 162 + " has too many " + "store files, but is " -163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) +163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) 164 + " vs best flushable region's " -165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) +165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) 166 + ". Choosing the bigger."); 167 } 168 regionToFlush = bestAnyRegion; @@ -183,20 +183,20 @@ 175 } 176 177 Preconditions.checkState( -178 (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || -179 (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); +178 (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || +179 (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); 180 181 if (regionToFlush == null || 182 (bestRegionReplica != null && 183 ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && -184 (bestRegionReplica.getMemstoreSize() -185 > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { +184 (bestRegionReplica.getMemStoreSize() +185 > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { 186 LOG.info("Refreshing storefiles of region " + bestRegionReplica 187 + " due to global heap pressure. Total memstore datasize=" 188 + StringUtils -189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 190 + " memstore heap size=" + StringUtils.humanReadableInt( -191 server.getRegionServerAccounting().getGlobalMemstoreHeapSize())); +191 server.getRegionServerAccounting().getGlobalMemStoreHeapSize())); 192 flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); 193 if (!flushedOne) { 194 LOG.info("Excluding secondary region " + bestRegionReplica + @@ -206,9 +206,9 @@ 198 } else { 199 LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " 200 + "Total Memstore size=" -201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 202 + ", Region memstore size=" -203 + humanReadableInt(regionToFlush.getMemstoreSize())); +203 + humanReadableInt(regionToFlush.getMemStoreSize())); 204 flushedOne = flushRegion(regionToFlush, true, false); 205 206 if (!flushedOne) { @@ -239,7 +239,7 @@ 231 if (type != FlushType.NORMAL) { 232 LOG.debug("Flush thread woke up because memory above low water=" 233 + TraditionalBinaryPrefix.long2String( -234 server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1)); +234 server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)); 235 // For offheap memstore, even if the lower water mark was breached due to heap overhead 236 // we still select the regions based on the region's memstore data size. 237 // TODO : If we want to decide based on heap over head it can be done without tracking @@ -291,7 +291,7 @@ 283 } 284 } 285 -286 private Region getBiggestMemstoreRegion( +286 private Region getBiggestMemStoreRegion( 287 SortedMap<Long, Region> regionsBySize, 288 Set<Region> excludedRegions, 289 boolean checkStoreFileCount) { @@ -315,7 +315,7 @@ 307 return null; 308 } 309 -310 private Region getBiggestMemstoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, +310 private Region getBiggestMemStoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, 311 Set<Region> excludedRegions) { 312 synchronized (regionsInQueue) { 313 for (Region region : regionsBySize.values()) { @@ -596,19 +596,19 @@ 588 startTime = EnvironmentEdgeManager.currentTime(); 589 if (!server.getRegionServerAccounting().isOffheap()) { 590 logMsg("global memstore heapsize", -591 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -592 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +591 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +592 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 593 } else { 594 switch (flushType) { 595 case ABOVE_OFFHEAP_HIGHER_MARK: 596 logMsg("the global offheap memstore datasize", -597 server.getRegionServerAccounting().getGlobalMemstoreDataSize(), -598 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +597 server.getRegionServerAccounting().getGlobalMemStoreDataSize(), +598 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 599 break; 600 case ABOVE_ONHEAP_HIGHER_MARK: 601 logMsg("global memstore heapsize", -602 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -603 server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit()); +602 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +603 server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit()); 604 break; 605 default: 606 break; @@ -699,8 +699,8 @@ 691 * @param globalMemStoreSize 692 */ 693 @Override -694 public void setGlobalMemstoreLimit(long globalMemStoreSize) { -695 this.server.getRegionServerAccounting().setGlobalMemstoreLimits(globalMemStoreSize); +694 public void setGlobalMemStoreLimit(long globalMemStoreSize) { +695 this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize); 696 reclaimMemStoreMemory(); 697 } 698 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html index ce2e1a7..8596033 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html @@ -115,10 +115,10 @@ 107 this.flushHandlers = new FlushHandler[handlerCount]; 108 LOG.info("globalMemStoreLimit=" 109 + TraditionalBinaryPrefix -110 .long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1) +110 .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1) 111 + ", globalMemStoreLimitLowMark=" 112 + TraditionalBinaryPrefix.long2String( -113 this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1) +113 this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1) 114 + ", Offheap=" 115 + (this.server.getRegionServerAccounting().isOffheap())); 116 } @@ -144,12 +144,12 @@ 136 while (!flushedOne) { 137 // Find the biggest region that doesn't have too many storefiles 138 // (might be null!) -139 Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); +139 Region bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); 140 // Find the biggest region, total, even if it might have too many flushes. -141 Region bestAnyRegion = getBiggestMemstoreRegion( +141 Region bestAnyRegion = getBiggestMemStoreRegion( 142 regionsBySize, excludedRegions, false); 143 // Find the biggest region that is a secondary region -144 Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, +144 Region bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, 145 excludedRegions); 146 147 if (bestAnyRegion == null && bestRegionReplica == null) { @@ -159,7 +159,7 @@ 151 152 Region regionToFlush; 153 if (bestFlushableRegion != null && -154 bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { +154 bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { 155 // Even if it's not supposed to be flushed, pick a region if it's more than twice 156 // as big as the best flushable one - otherwise when we're under pressure we make 157 // lots of little flushes and cause lots of compactions, etc, which just makes @@ -168,9 +168,9 @@ 160 LOG.debug("Under global heap pressure: " + "Region " 161 + bestAnyRegion.getRegionInfo().getRegionNameAsString() 162 + " has too many " + "store files, but is " -163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) +163 + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) 164 + " vs best flushable region's " -165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) +165 + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) 166 + ". Choosing the bigger."); 167 } 168 regionToFlush = bestAnyRegion; @@ -183,20 +183,20 @@ 175 } 176 177 Preconditions.checkState( -178 (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || -179 (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); +178 (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || +179 (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); 180 181 if (regionToFlush == null || 182 (bestRegionReplica != null && 183 ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && -184 (bestRegionReplica.getMemstoreSize() -185 > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { +184 (bestRegionReplica.getMemStoreSize() +185 > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { 186 LOG.info("Refreshing storefiles of region " + bestRegionReplica 187 + " due to global heap pressure. Total memstore datasize=" 188 + StringUtils -189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +189 .humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 190 + " memstore heap size=" + StringUtils.humanReadableInt( -191 server.getRegionServerAccounting().getGlobalMemstoreHeapSize())); +191 server.getRegionServerAccounting().getGlobalMemStoreHeapSize())); 192 flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); 193 if (!flushedOne) { 194 LOG.info("Excluding secondary region " + bestRegionReplica + @@ -206,9 +206,9 @@ 198 } else { 199 LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " 200 + "Total Memstore size=" -201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize()) +201 + humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize()) 202 + ", Region memstore size=" -203 + humanReadableInt(regionToFlush.getMemstoreSize())); +203 + humanReadableInt(regionToFlush.getMemStoreSize())); 204 flushedOne = flushRegion(regionToFlush, true, false); 205 206 if (!flushedOne) { @@ -239,7 +239,7 @@ 231 if (type != FlushType.NORMAL) { 232 LOG.debug("Flush thread woke up because memory above low water=" 233 + TraditionalBinaryPrefix.long2String( -234 server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 1)); +234 server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)); 235 // For offheap memstore, even if the lower water mark was breached due to heap overhead 236 // we still select the regions based on the region's memstore data size. 237 // TODO : If we want to decide based on heap over head it can be done without tracking @@ -291,7 +291,7 @@ 283 } 284 } 285 -286 private Region getBiggestMemstoreRegion( +286 private Region getBiggestMemStoreRegion( 287 SortedMap<Long, Region> regionsBySize, 288 Set<Region> excludedRegions, 289 boolean checkStoreFileCount) { @@ -315,7 +315,7 @@ 307 return null; 308 } 309 -310 private Region getBiggestMemstoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, +310 private Region getBiggestMemStoreOfRegionReplica(SortedMap<Long, Region> regionsBySize, 311 Set<Region> excludedRegions) { 312 synchronized (regionsInQueue) { 313 for (Region region : regionsBySize.values()) { @@ -596,19 +596,19 @@ 588 startTime = EnvironmentEdgeManager.currentTime(); 589 if (!server.getRegionServerAccounting().isOffheap()) { 590 logMsg("global memstore heapsize", -591 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -592 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +591 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +592 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 593 } else { 594 switch (flushType) { 595 case ABOVE_OFFHEAP_HIGHER_MARK: 596 logMsg("the global offheap memstore datasize", -597 server.getRegionServerAccounting().getGlobalMemstoreDataSize(), -598 server.getRegionServerAccounting().getGlobalMemstoreLimit()); +597 server.getRegionServerAccounting().getGlobalMemStoreDataSize(), +598 server.getRegionServerAccounting().getGlobalMemStoreLimit()); 599 break; 600 case ABOVE_ONHEAP_HIGHER_MARK: 601 logMsg("global memstore heapsize", -602 server.getRegionServerAccounting().getGlobalMemstoreHeapSize(), -603 server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit()); +602 server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), +603 server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit()); 604 break; 605 default: 606 break; @@ -699,8 +699,8 @@ 691 * @param globalMemStoreSize 692 */ 693 @Override -694 public void setGlobalMemstoreLimit(long globalMemStoreSize) { -695 this.server.getRegionServerAccounting().setGlobalMemstoreLimits(globalMemStoreSize); +694 public void setGlobalMemStoreLimit(long globalMemStoreSize) { +695 this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize); 696 reclaimMemStoreMemory(); 697 } 698