harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r644602 - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ gen/ los/ mark_compact/ verify/
Date Fri, 04 Apr 2008 04:16:30 GMT
Author: xli
Date: Thu Apr  3 21:16:12 2008
New Revision: 644602

URL: http://svn.apache.org/viewvc?rev=644602&view=rev
Log:
HARMONY-5688 : [drlvm][gc]Map/Unmap based LOS. This is a patch that uses virtual memory support
for LOS space adjustment.

Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.cpp Thu Apr  3 21:16:12 2008
@@ -76,7 +76,6 @@
   void* new_base = (void*)&(space->blocks[space->num_managed_blocks - block_dec_count]);
  
   void* decommit_base = (void*)round_down_to_size((POINTER_SIZE_INT)new_base, SPACE_ALLOC_UNIT);
-  
   assert( ((Block_Header*)decommit_base)->block_idx >= space->free_block_idx);
   
   void* old_end = (void*)&space->blocks[space->num_managed_blocks];
@@ -85,7 +84,8 @@
   
   Boolean result = vm_decommit_mem(decommit_base, decommit_size);
   assert(result == TRUE);
-  
+
+  space->heap_end = decommit_base;
   space->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)space->heap_start;
   space->num_managed_blocks = (unsigned int)(space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Thu Apr  3 21:16:12 2008
@@ -480,7 +480,6 @@
 extern Boolean NOS_PARTIAL_FORWARD;
 
 //#define STATIC_NOS_MAPPING
-
 #ifdef STATIC_NOS_MAPPING
 
   //#define NOS_BOUNDARY ((void*)0x2ea20000)  //this is for 512M
@@ -491,7 +490,8 @@
 #else /* STATIC_NOS_MAPPING */
 
         extern void* nos_boundary;
-
+    extern Boolean share_los_boundary;
+    extern Boolean LOS_ADJUST_BOUNDARY;
 #endif /* STATIC_NOS_MAPPING */
 
 void gc_init_collector_alloc(GC* gc, Collector* collector);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp Thu Apr  3 21:16:12 2008
@@ -54,6 +54,7 @@
 POINTER_SIZE_INT min_heap_size_bytes = 16 * MB;
 POINTER_SIZE_INT max_heap_size_bytes = 0;
 
+Boolean share_los_boundary = FALSE;
 
 unsigned int GC_PROP;
 
@@ -345,6 +346,11 @@
     destroy_property_value(value);
   }
 
+  if (is_property_set("gc.share_los_boundary", VM_PROPERTIES) == 1){
+    share_los_boundary = get_boolean_property("gc.share_los_boundary");     
+  }
+
+
   if (is_property_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
     Boolean use_all_concurrent_phase= get_boolean_property("gc.concurrent_gc");
     if(use_all_concurrent_phase){
@@ -377,7 +383,7 @@
       USE_CONCURRENT_GC = TRUE;
     }
   }
-
+ 
   char* concurrent_algo = NULL;
   
   if (is_property_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
@@ -418,5 +424,6 @@
 
   return gc;
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp Thu Apr  3 21:16:12
2008
@@ -90,7 +90,6 @@
   *By default, we will use allocation speed computed in minor collection. */
 //#define SPACE_TUNE_BY_MAJOR_SPEED
 
-
 /* The tuning size computing before marking is not precise. We only estimate the probable
direction of space tuning.
   * If this function decide to set TRANS_NOTHING, then we just call the normal marking function.
   * Else, we call the marking function for space tuning.  */
@@ -253,8 +252,13 @@
       max_tuning_size = max_tune_for_min_non_los;
 
     /*Round up to satisfy LOS alloc demand.*/
-    tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
-    max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
+    if(LOS_ADJUST_BOUNDARY)  {
+      tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+      max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
+    }else {
+      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
+      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
+    }
 
     /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
     /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
@@ -274,7 +278,7 @@
         tuner->kind = TRANS_NOTHING;
       }else{
         /*We have tuner->tuning_size > max_tuning_size up there.*/
-        extend_heap_size = tuner->tuning_size - max_tuning_size;
+        extend_heap_size = tuner->tuning_size - max_tuning_size;    
         blocked_space_extend(fspace, (unsigned int)extend_heap_size);
         gc->committed_heap_size += extend_heap_size;
         tuner->kind = TRANS_FROM_MOS_TO_LOS;
@@ -358,7 +362,7 @@
       tuner->reverse = 1;
     }
   }
-
+  
   return;  
 }
 
@@ -372,9 +376,15 @@
 
   POINTER_SIZE_INT max_tuning_size = 0;  
   POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
-
-  gc_compute_live_object_size_after_marking(gc, non_los_size);
-
+  if(LOS_ADJUST_BOUNDARY) 
+    gc_compute_live_object_size_after_marking(gc, non_los_size);
+  else {
+    unsigned int collector_num = gc->num_active_collectors;
+    POINTER_SIZE_INT reserve_size = collector_num <<(GC_BLOCK_SHIFT_COUNT+2);
+    los_live_obj_size = (POINTER_SIZE_INT) lspace->last_surviving_size + reserve_size;
+    non_los_live_obj_size = ((POINTER_SIZE_INT)(mspace->free_block_idx-mspace->first_block_idx)<<GC_BLOCK_SHIFT_COUNT)+reserve_size;
+    non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, SPACE_ALLOC_UNIT); 
+  }
   check_tuning_size(gc);
   
   /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
@@ -396,7 +406,11 @@
       if( tuner->tuning_size > max_tuning_size)
         tuner->tuning_size = max_tuning_size;
       /*Round down so as not to break max_tuning_size*/
-      tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+      if(LOS_ADJUST_BOUNDARY)
+        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+      else
+        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
+
        /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors
relink the block list.*/
       if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
     }else{ 
@@ -414,7 +428,12 @@
       if(tuner->tuning_size > max_tuning_size) 
         tuner->tuning_size = max_tuning_size;
       /*Round down so as not to break max_tuning_size*/
-      tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+
+      if (LOS_ADJUST_BOUNDARY)
+        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+      else
+        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
+
       if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
     }else{
       /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/
       
@@ -428,7 +447,6 @@
   POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
   if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) &&
(tuner->tuning_size > failure_size) )
     doforce = FALSE;
-
   if( (tuner->force_tune) && (doforce) )
     compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los);
 
@@ -537,6 +555,7 @@
   STD_FREE(tuner->interim_blocks);
   return;
 }
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Thu Apr  3 21:16:12 2008
@@ -46,6 +46,8 @@
 
 Boolean JVMTI_HEAP_ITERATION = true;
 
+Boolean LOS_ADJUST_BOUNDARY = FALSE;
+
 GC* gc_gen_create()
 {
   GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));  
@@ -170,19 +172,33 @@
   }
   physical_start = reserved_base;
   
-#else /* NON_STATIC_NOS_MAPPING */
+#else  /* NON_STATIC_NOS_MAPPING */
+
+  LOS_ADJUST_BOUNDARY = share_los_boundary;
 
+  if(large_page_hint) 
+    LOS_ADJUST_BOUNDARY = TRUE;
+  
   reserved_base = NULL;
-  if(large_page_hint){
-    reserved_base = alloc_large_pages(max_heap_size, large_page_hint);
-    if(reserved_base){
-      WARN2("gc.base","GC use large pages.");
-    } else {
-      free(large_page_hint);
-      large_page_hint = NULL;
-      WARN2("gc.base","GC use small pages.");
+
+  if(!LOS_ADJUST_BOUNDARY) {
+     reserved_base = vm_reserve_mem(NULL, max_heap_size+max_heap_size + SPACE_ALLOC_UNIT);
+     if(!reserved_base) 
+       LOS_ADJUST_BOUNDARY= TRUE;
+   }
+  
+  if (LOS_ADJUST_BOUNDARY)  {
+    reserved_base = NULL;
+    if(large_page_hint){
+      reserved_base = alloc_large_pages(max_heap_size, large_page_hint);
+      if(reserved_base){
+        WARN2("gc.base","GC use large pages.");
+      } else {
+        free(large_page_hint);
+        large_page_hint = NULL;
+        WARN2("gc.base","GC use small pages.");
+      }
     }
-  }
   
   if(reserved_base == NULL){
     if(max_heap_size < min_heap_size){
@@ -214,7 +230,50 @@
   /* Determine intial nos_boundary while NOS is not statically mapped */
   nos_base = (void*)((POINTER_SIZE_INT)reserved_base + mos_commit_size + los_size);
   nos_boundary = nos_base;
+  } else { /*LOS_ADJUST_BOUNDARY else*/
+   /*Large page not enabled at present for non LOS_ADJUST_BOUNDARY */
+#if 0  /* large page */
+    if(large_page_hint){
+      reserved_base = alloc_large_pages(max_heap_size+max_heap_size, large_page_hint);
+      if(reserved_base){
+      WARN2("gc.base","GC use large pages.");
+    } else {
+      free(large_page_hint);
+      large_page_hint = NULL;
+      WARN2("gc.base","GC use small pages.");
+    }
+  }
+  
+  if(reserved_base == NULL){
+    if(max_heap_size < min_heap_size){
+      DIE2("gc.base","Max heap size is smaller than min heap size. Please choose other values.");
+      exit(0);
+    }
 
+    unsigned int max_size_reduced = 0;
+    reserved_base = vm_reserve_mem(NULL, max_heap_size+max_heap_size + SPACE_ALLOC_UNIT);
+    while( !reserved_base ){
+      max_size_reduced += SPACE_ALLOC_UNIT;
+      max_heap_size -= SPACE_ALLOC_UNIT;
+      reserved_base = vm_reserve_mem(NULL, max_heap_size + max_heap_size + SPACE_ALLOC_UNIT);
+    }
+    
+    if(max_size_reduced){
+      DIE2("gc.base","Max heap size: can't be reserved. The max size can be reserved is "<<
max_heap_size/MB<<" MB. ");
+      exit(0);
+    }
+#endif  /* large page */
+    physical_start = reserved_base;
+        
+    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
+    assert(!((POINTER_SIZE_INT)reserved_base % SPACE_ALLOC_UNIT));
+    
+    reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size +max_heap_size
);
+  
+    /* Determine intial nos_boundary while NOS is not statically mapped */
+    nos_base = (void*)((POINTER_SIZE_INT)reserved_base +max_heap_size+ mos_commit_size);
+    nos_boundary = nos_base;
+  }
 #endif  /* STATIC_NOS_MAPPING else */
 
   HEAP_BASE = (POINTER_SIZE_INT)reserved_base;
@@ -225,7 +284,10 @@
 #ifdef STATIC_NOS_MAPPING
   gc_gen->reserved_heap_size = los_mos_reserve_size + nos_reserve_size;
 #else
-  gc_gen->reserved_heap_size = max_heap_size;
+  if (LOS_ADJUST_BOUNDARY)
+    gc_gen->reserved_heap_size = max_heap_size;
+  else
+    gc_gen->reserved_heap_size = max_heap_size+max_heap_size;
 #endif
   /* Commented out for that the frontmost reserved mem size in los is not counted in los'
committed size.
    * gc_gen->committed_heap_size = min_heap_size;
@@ -238,10 +300,13 @@
 
   max_heap_size_bytes = max_heap_size;
   min_heap_size_bytes = min_heap_size;
-  
+
   gc_los_initialize(gc_gen, reserved_base, los_size);
-  gc_mos_initialize(gc_gen, (void*)((POINTER_SIZE_INT)reserved_base + los_size), mos_reserve_size,
mos_commit_size);
-  gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size);
+  if(LOS_ADJUST_BOUNDARY)
+    gc_mos_initialize(gc_gen, (void*)((POINTER_SIZE_INT)reserved_base + los_size), mos_reserve_size,
mos_commit_size);
+  else
+    gc_mos_initialize(gc_gen, (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size), mos_reserve_size,
mos_commit_size);
+   gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size);
   
   gc_gen->committed_heap_size = space_committed_size(gc_get_nos(gc_gen))
                                                 + space_committed_size(gc_get_mos(gc_gen))
@@ -750,6 +815,7 @@
 #ifdef GC_GEN_STATS
       gc->stats->num_minor_collections++;
 #endif
+      if(LOS_ADJUST_BOUNDARY) gc->tuner->kind=TRANS_NOTHING;
       los_collection(los);
     }
     

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Thu Apr  3 21:16:12 2008
@@ -317,10 +317,17 @@
 #ifdef STATIC_NOS_MAPPING
     total_size = max_heap_size_bytes - space_committed_size(los);
 #else
-    POINTER_SIZE_INT curr_heap_commit_end = 
-                              (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE
+ gc->committed_heap_size;
-    assert(curr_heap_commit_end > (POINTER_SIZE_INT)mos->heap_start);
-    total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mos->heap_start;
+    POINTER_SIZE_INT curr_heap_commit_end;
+   
+    if(LOS_ADJUST_BOUNDARY) {
+      curr_heap_commit_end=(POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE
+ gc->committed_heap_size;
+      assert(curr_heap_commit_end > (POINTER_SIZE_INT)mos->heap_start);
+      total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mos->heap_start;
+    }else {/*LOS_ADJUST_BOUNDARY else */
+      curr_heap_commit_end =  (nos->committed_heap_size)? (POINTER_SIZE_INT) nos->heap_start
+ nos->committed_heap_size: 
+               (POINTER_SIZE_INT) mos->heap_start+mos->committed_heap_size;
+      total_size = curr_heap_commit_end - (POINTER_SIZE_INT) mos->heap_start;
+    }
 #endif
   assert(total_size >= used_mos_size);
   POINTER_SIZE_INT total_free = total_size - used_mos_size;
@@ -411,8 +418,11 @@
   }
 
   /* below are ajustment */  
-  POINTER_SIZE_INT curr_heap_commit_end = 
-                             (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE
+ gc->committed_heap_size;
+  POINTER_SIZE_INT curr_heap_commit_end;
+  if(LOS_ADJUST_BOUNDARY)
+    curr_heap_commit_end = (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE
+ gc->committed_heap_size;
+  else
+    curr_heap_commit_end = (POINTER_SIZE_INT)nos->heap_start + nos->committed_heap_size;
   
   void* new_nos_boundary = (void*)(curr_heap_commit_end - new_nos_size);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp Thu Apr  3 21:16:12 2008
@@ -143,7 +143,7 @@
 {
   Collector** collector = gc->collectors;
   GC_Gen_Collector_Stats* stats;
-  for (unsigned int i=0; i<gc->num_active_collectors; i++){
+  for (unsigned int i=0; i<gc->num_collectors; i++){
     stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
     memset(stats, 0, sizeof(GC_Gen_Collector_Stats));
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h Thu Apr  3 21:16:12 2008
@@ -110,9 +110,9 @@
 
     while(!reach_heap_end){
         //FIXME: This while shoudl be if, try it!
-        while(!*((POINTER_SIZE_INT*)next_area_start)){
+        while((next_area_start< (POINTER_SIZE_INT)lspace->heap_end)&&!*((POINTER_SIZE_INT*)next_area_start)){
             assert(((Free_Area*)next_area_start)->size);
-            next_area_start += ((Free_Area*)next_area_start)->size;
+            next_area_start += ((Free_Area*)next_area_start)->size;            
         }
         if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
             //If there is a living object at this addr, return it, and update iterate_index
@@ -154,12 +154,12 @@
     POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index)
* KB;
     BOOLEAN reach_heap_end = 0;
     unsigned int hash_extend_size = 0;
-
-    while(!reach_heap_end){
-        //FIXME: This while shoudl be if, try it!
-        while(!*((POINTER_SIZE_INT*)next_area_start)){
+   
+while(!reach_heap_end){        
+   //FIXME: This while shoudl be if, try it!
+        while((next_area_start<(POINTER_SIZE_INT)lspace->heap_end) && !*((POINTER_SIZE_INT*)next_area_start)){
             assert(((Free_Area*)next_area_start)->size);
-            next_area_start += ((Free_Area*)next_area_start)->size;
+            next_area_start += ((Free_Area*)next_area_start)->size;    
         }
         if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
             //If there is a living object at this addr, return it, and update iterate_index

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Thu Apr  3 21:16:12
2008
@@ -295,8 +295,16 @@
 void lspace_sliding_compact(Collector* collector, Lspace* lspace)
 {
   unsigned int iterate_index = 0;
-  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
-  Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); 
+  Partial_Reveal_Object* p_obj; 
+  POINTER_SIZE_INT last_one=(POINTER_SIZE_INT) lspace->heap_start;
+  
+  
+  p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
+  if(!LOS_ADJUST_BOUNDARY)
+    lspace->last_surviving_size=0;
+  
+  if(!p_obj) return;
+  
 
   while( p_obj ){
     assert( obj_is_marked_in_vt(p_obj));
@@ -312,6 +320,7 @@
 #endif
     Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
     POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size;
+    last_one = target_obj_end;
     if( p_obj != p_target_obj){
       memmove(p_target_obj, p_obj, obj_size);
     }
@@ -319,6 +328,9 @@
     p_obj = lspace_get_next_marked_object(lspace, &iterate_index);  
   }
 
+ if(!LOS_ADJUST_BOUNDARY)
+   lspace->last_surviving_size = ALIGN_UP_TO_KILO(last_one) - (POINTER_SIZE_INT) lspace->heap_start;
+
   return;
 }
 
@@ -329,36 +341,85 @@
     POINTER_SIZE_INT trans_size = tuner->tuning_size;
     POINTER_SIZE_INT new_fa_size = 0;
     assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
-    
+    Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos;
+    Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos;
+
     /* Reset the pool first because its info is useless now. */
     free_area_pool_reset(lspace->free_pool);
 
+    /*Lspace collection in major collection must move object*/
+     
+    assert(lspace->move_object);
+
     switch(tuner->kind){
       case TRANS_FROM_MOS_TO_LOS:{
-        /*Lspace collection in major collection must move object*/
-        assert(lspace->move_object);
         //debug_minor_sweep
-        Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
-        lspace->heap_end = (void*)mos_first_block;
-        assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
+        if(LOS_ADJUST_BOUNDARY ) {
+          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
+          lspace->heap_end = (void*)mos_first_block;
+          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
+        }else{
+          vm_commit_mem(lspace->heap_end, trans_size);
+          lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size);
  
+          //fixme: need to add decommit in NOS
+          if(trans_size < nos->committed_heap_size) {
+            nos->free_block_idx=nos->first_block_idx;
+            blocked_space_shrink((Blocked_Space*)nos, trans_size);
+          } else {
+            POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos);
+            void *uncommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size);
+            vm_decommit_mem(uncommit_base,trans_size);
+            unsigned int reduced_mos_size = trans_size - nos->committed_heap_size;
+            unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT);
+            unsigned int nos_size=(unsigned int )(size*nos->survive_ratio/(nos->survive_ratio+mos->survive_ratio));
+            if(nos_size<GC_BLOCK_SIZE_BYTES)  nos_size=GC_BLOCK_SIZE_BYTES;
+            nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES);
+            mos->num_managed_blocks-=((nos_size+reduced_mos_size)>>GC_BLOCK_SHIFT_COUNT);
+            mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx;
+            mos->num_total_blocks=mos->num_managed_blocks;
+            mos->ceiling_block_idx-=((nos_size+reduced_mos_size)>>GC_BLOCK_SHIFT_COUNT);
+            assert(mos->num_used_blocks<=mos->num_managed_blocks);
+            void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]);
+            assert(start_address< uncommit_base);
+            mos->heap_end = start_address;
+            mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT)
mos->heap_start;
+            nos->heap_start = start_address;
+            nos->heap_end = uncommit_base;
+            nos->committed_heap_size=nos->reserved_heap_size = (POINTER_SIZE_INT)uncommit_base-
(POINTER_SIZE_INT) start_address;   
+            nos->num_total_blocks=nos->num_managed_blocks=nos_size>>GC_BLOCK_SHIFT_COUNT;
+            nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address);
+            nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1;
+            nos->num_used_blocks = 0;
+            space_init_blocks((Blocked_Space*)nos);
+          }
+        }
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start
+ tuner->tuning_size;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
         if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool,
fa);
         lspace->committed_heap_size += trans_size;
+        
         break;
       }
       case TRANS_FROM_LOS_TO_MOS:{
         assert(lspace->move_object);
-        assert(tuner->tuning_size);
-        Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
-        assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block
);
-        lspace->heap_end = (void*)mos_first_block;
+        if(LOS_ADJUST_BOUNDARY ){
+          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
+          assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block
);
+              lspace->heap_end = (void*)mos_first_block;
+        }else{
+          void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size);
+          vm_decommit_mem(p, trans_size);
+          lspace->heap_end=p;
+          //fixme: need to add decommit in NOS
+          blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size);
+        }
         lspace->committed_heap_size -= trans_size;
         /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact
LOS*/
         assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start
+ tuner->tuning_size);
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start
- tuner->tuning_size;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
         if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool,
fa);
+
         break;
       }
       default:{
@@ -479,6 +540,7 @@
   TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
   return;
 }
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Thu
Apr  3 21:16:12 2008
@@ -260,18 +260,23 @@
   mspace->num_collections++;
 
   GC* gc = mspace->gc;  
-
+  Transform_Kind kind= gc->tuner->kind;
+ 
   /* init the pool before starting multiple collectors */
 
   pool_iterator_init(gc->metadata->gc_rootset_pool);
 
   //For_LOS_extend
-  if(gc->tuner->kind != TRANS_NOTHING){
-    major_set_compact_slide();
-  }else if (collect_is_fallback()){
-    major_set_compact_slide();
-  }else{
-    major_set_compact_move();    
+  if(LOS_ADJUST_BOUNDARY){
+    if(gc->tuner->kind != TRANS_NOTHING){
+      major_set_compact_slide();
+    }else if (collect_is_fallback()){
+      major_set_compact_slide();
+    }else{
+      major_set_compact_move();    
+    }
+  }else {
+    gc->tuner->kind = TRANS_NOTHING;
   }
 
   if(major_is_compact_slide()){
@@ -291,8 +296,14 @@
     exit(0);
   }
 
+  if((!LOS_ADJUST_BOUNDARY)&&(kind != TRANS_NOTHING) ) {
+    gc->tuner->kind = kind;
+    gc_compute_space_tune_size_after_marking(gc);
+  }
+  
   return;  
 } 
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Thu Apr
 3 21:16:12 2008
@@ -193,6 +193,7 @@
   Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
   
   unsigned int num_active_collectors = gc->num_active_collectors;
+  Boolean is_fallback = collect_is_fallback();
   
   /* Pass 1: **************************************************
      mark all live objects in heap, and save all the slots that 
@@ -201,8 +202,8 @@
   TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:
pass1: mark live objects in heap ...");
 
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
-
-  if(!collect_is_fallback())
+  
+  if(!is_fallback)
        mark_scan_heap(collector);  
   else
        mark_scan_heap_for_fallback(collector);
@@ -222,6 +223,13 @@
     }
 #endif
     gc_identify_dead_weak_roots(gc);
+
+#ifndef LOS_ADJUST_BOUNDARY
+#ifdef USE_32BITS_HASHCODE
+    if(is_fallback)
+      fallback_clear_fwd_obj_oi_init(collector);
+#endif
+#endif
     debug_num_compact_blocks = 0;
     /* let other collectors go */
     num_marking_collectors++; 
@@ -236,6 +244,10 @@
   TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:
pass2: move object and set the forwarding offset table ...");
 
   atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
+#ifdef USE_32BITS_HASHCODE
+  if(is_fallback)
+    fallback_clear_fwd_obj_oi(collector);
+#endif
 
   mspace_move_objects(collector, mspace);   
   
@@ -280,7 +292,7 @@
     lspace_fix_repointed_refs(collector, lspace);   
     gc_fix_rootset(collector, FALSE);
     if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    
-    
+
     num_fixing_collectors++; 
   }
   while(num_fixing_collectors != num_active_collectors + 1);
@@ -309,6 +321,7 @@
      while(num_extending_collectors != num_active_collectors);  
   }
 
+ 
   TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:
 finish pass4");
 
   /* Leftover: **************************************************

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp?rev=644602&r1=644601&r2=644602&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp Thu Apr  3 21:16:12
2008
@@ -337,7 +337,7 @@
 inline Partial_Reveal_Object* lspace_get_next_object( Space* lspace, POINTER_SIZE_INT* &
next_area_start){
   POINTER_SIZE_INT* ret_obj = NULL;
   
-  while(!*next_area_start && (POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
+  while(((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end)&&!*next_area_start
){
     next_area_start =(POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + ((Free_Area*)next_area_start)->size);
   }
   if((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
@@ -440,6 +440,7 @@
   heap_verifier->live_obj_scanner = verifier_scan_live_objects;
   heap_verifier->all_obj_scanner   = verifier_scan_all_objects;
 }
+
 
 
 



Mime
View raw message