harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r559382 [2/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ thread/ trace_forward/ verify/
Date Wed, 25 Jul 2007 10:02:11 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp Wed Jul 25 03:02:07 2007
@@ -49,7 +49,7 @@
 
   lspace->gc = gc;
   /*LOS_Shrink:*/
-  lspace->move_object = FALSE;
+  lspace->move_object = 0;
 
   /*Treat with free area buddies*/
   lspace->free_pool = (Free_Area_Pool*)STD_MALLOC(sizeof(Free_Area_Pool));
@@ -61,7 +61,12 @@
   lspace->num_collections = 0;
   lspace->time_collections = 0;
   lspace->survive_ratio = 0.5f;
-
+  lspace->last_alloced_size = 0;
+  lspace->accumu_alloced_size = 0;  
+  lspace->total_alloced_size = 0;
+  lspace->last_surviving_size = 0;
+  lspace->period_surviving_size = 0;
+  
   gc_set_los((GC_Gen*)gc, (Space*)lspace);
   p_global_lspace_move_obj = &(lspace->move_object);
   los_boundary = lspace->heap_end;
@@ -104,15 +109,18 @@
 
 void lspace_collection(Lspace* lspace)
 {
-  /* heap is marked already, we need only sweep here. */
   lspace->num_collections ++;
-  lspace_reset_after_collection(lspace); 
-  /*When sliding compacting lspace, we don't need to sweep it anymore.
-  What's more, the assumption that the first word of one KB must be zero when iterating 
-  lspace in that function lspace_get_next_marked_object is not true*/  
-  if(!lspace->move_object) lspace_sweep(lspace);
-  else lspace->surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
-  lspace->move_object = FALSE;
+
+  if(!lspace->move_object){
+    lspace_reset_for_sweep(lspace);
+    lspace_sweep(lspace);   
+  }else{
+    /* The real action of LOS sliding compaction is done together with MOS compaction. */
+    lspace_reset_for_slide(lspace); 
+    /* When sliding compacting lspace, we don't need to sweep it anymore.
+      * What's more, the assumption that the first word of one KB must be zero when iterating 
+      * lspace in that function lspace_get_next_marked_object is not true */  
+  }
   return;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h Wed Jul 25 03:02:07 2007
@@ -48,10 +48,18 @@
   GC* gc;
   /*LOS_Shrink:This field stands for sliding compact to lspace */
   Boolean move_object;
-  /*For_statistic: size allocated science last time collect los, ie. last major*/
-  volatile POINTER_SIZE_INT alloced_size;
-  /*For_statistic: size survived after lspace_sweep*/  
-  POINTER_SIZE_INT surviving_size;
+
+  /* Size allocted since last minor collection. */
+  volatile uint64 last_alloced_size;
+  /* Size allocted since last major collection. */
+  uint64 accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  uint64 total_alloced_size;
+
+  /* Size survived from last collection. */
+  uint64 last_surviving_size;
+  /* Size survived after a certain period. */
+  uint64 period_surviving_size;  
   /* END of Space --> */
 
   Free_Area_Pool* free_pool;
@@ -72,8 +80,7 @@
 void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size);
 void lspace_sliding_compact(Collector* collector, Lspace* lspace);
 void lspace_compute_object_target(Collector* collector, Lspace* lspace);
-void lspace_sweep(Lspace* lspace);
-void lspace_reset_after_collection(Lspace* lspace);
+void lspace_reset_for_slide(Lspace* lspace);
 void lspace_collection(Lspace* lspace);
 
 inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; }
@@ -125,5 +132,50 @@
 void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace);
 
 POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace);
+
+inline Partial_Reveal_Object* lspace_get_next_marked_object_by_oi( Lspace* lspace, unsigned int* iterate_index)
+{
+    POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB;
+    BOOLEAN reach_heap_end = 0;
+    unsigned int hash_extend_size = 0;
+
+    while(!reach_heap_end){
+        //FIXME: This while shoudl be if, try it!
+        while(!*((POINTER_SIZE_INT*)next_area_start)){
+            assert(((Free_Area*)next_area_start)->size);
+            next_area_start += ((Free_Area*)next_area_start)->size;
+        }
+        if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
+            //If there is a living object at this addr, return it, and update iterate_index
+
+#ifdef USE_32BITS_HASHCODE
+            hash_extend_size  = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0;
+#endif
+
+            if(obj_is_marked_in_oi((Partial_Reveal_Object*)next_area_start)){
+                POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size);
+                *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO);
+                return (Partial_Reveal_Object*)next_area_start;
+            //If this is a dead object, go on to find  a living one.
+            }else{
+                POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)+ hash_extend_size);
+                next_area_start += obj_size;
+            }
+        }else{
+            reach_heap_end = 1;
+        } 
+    }
+    return NULL;
+
+}
+
+inline static Partial_Reveal_Object* lspace_get_first_marked_object_by_oi(Lspace* lspace, unsigned int* mark_bit_idx)
+{
+    return lspace_get_next_marked_object_by_oi(lspace, mark_bit_idx);
+}
+
+void lspace_reset_for_sweep(Lspace* lspace);
+void lspace_sweep(Lspace* lspace);
+
 
 #endif /*_LSPACE_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Wed Jul 25 03:02:07 2007
@@ -160,10 +160,10 @@
           p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size);
           if(p_result){
               memset(p_result, 0, alloc_size);
-              POINTER_SIZE_INT vold = lspace->alloced_size;
-              POINTER_SIZE_INT vnew = vold + alloc_size;
-              while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){                      
-                  vold = lspace->alloced_size;
+              uint64 vold = lspace->last_alloced_size;
+              uint64 vnew = vold + alloc_size;
+              while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){                      
+                  vold = lspace->last_alloced_size;
                   vnew = vold + alloc_size;
               }
               return p_result;
@@ -179,10 +179,10 @@
           p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size);
           if(p_result){
               memset(p_result, 0, alloc_size);
-              POINTER_SIZE_INT vold = lspace->alloced_size;
-              POINTER_SIZE_INT vnew = vold + alloc_size;
-              while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){                      
-                  vold = lspace->alloced_size;
+              uint64 vold = lspace->last_alloced_size;
+              uint64 vnew = vold + alloc_size;
+              while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){                      
+                  vold = lspace->last_alloced_size;
                   vnew = vold + alloc_size;
               }
               return p_result;
@@ -308,7 +308,7 @@
   return;
 }
 
-void lspace_reset_after_collection(Lspace* lspace)
+void lspace_reset_for_slide(Lspace* lspace)
 {
     GC* gc = lspace->gc;
     Space_Tuner* tuner = gc->tuner;
@@ -321,22 +321,15 @@
 
     switch(tuner->kind){
       case TRANS_FROM_MOS_TO_LOS:{
-        if(lspace->move_object){
-          assert(tuner->force_tune);
-          Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
-          lspace->heap_end = (void*)mos_first_block;
-          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
-          new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
-          Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
-          if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
-        }else{
-          void* origin_end = lspace->heap_end;
-          lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks);
-          /*The assumption that the first word of one KB must be zero when iterating lspace in 
-          that function lspace_get_next_marked_object is not true*/
-          Free_Area* trans_fa = free_area_new(origin_end, trans_size);
-          if(trans_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, trans_fa);
-        }
+        /*Lspace collection in major collection must move object*/
+        assert(lspace->move_object);
+        //debug_minor_sweep
+        Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
+        lspace->heap_end = (void*)mos_first_block;
+        assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
+        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
+        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
+        if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         lspace->committed_heap_size += trans_size;
         break;
       }
@@ -355,47 +348,51 @@
         break;
       }
       default:{
-        if(lspace->move_object){
-          assert(tuner->kind == TRANS_NOTHING);
-          assert(!tuner->tuning_size);
-          new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
-          Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
-          if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
-        }
+        assert(lspace->move_object);
+        assert(tuner->kind == TRANS_NOTHING);
+        assert(!tuner->tuning_size);
+        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
+        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
+        if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         break;
       }
     }
 
-    /*For_statistic los information.*/
-    lspace->alloced_size = 0;    
-    lspace->surviving_size = 0;
+//    lspace->accumu_alloced_size = 0;    
+//    lspace->last_alloced_size = 0;        
+    lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
+    lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size;
 
     los_boundary = lspace->heap_end;
 }
 
+
+void lspace_reset_for_sweep(Lspace* lspace)
+{
+//  lspace->last_alloced_size = 0;    
+  lspace->last_surviving_size = 0;
+}
+
 void lspace_sweep(Lspace* lspace)
 {
   unsigned int mark_bit_idx = 0;
   POINTER_SIZE_INT cur_size = 0;
   void *cur_area_start, *cur_area_end;
 
-  /*If it is TRANS_FROM_MOS_TO_LOS now, we must clear the fa alread added in lspace_reset_after_collection*/
   free_area_pool_reset(lspace->free_pool);
 
   Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
-  Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
+  Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object_by_oi(lspace, &mark_bit_idx);
   if(p_next_obj){
-    obj_unmark_in_vt(p_next_obj);
-    /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked
-       in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode,
-       the last time marked object is thought to be already marked and not scanned for this cycle. */
+//    obj_unmark_in_vt(p_next_obj);
+    /*Fixme: This might not be necessary, for there is a bit clearing operation in forward_object->obj_mark_in_oi*/
     obj_clear_dual_bits_in_oi(p_next_obj);
     /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
-unsigned int obj_size = vm_object_size(p_next_obj);
+    unsigned int obj_size = vm_object_size(p_next_obj);
 #ifdef USE_32BITS_HASHCODE
     obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
 #endif
-    lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);    
+    lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);    
   }
 
   cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
@@ -416,16 +413,16 @@
     /* successfully create an area */
 
     p_prev_obj = p_next_obj;
-    p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
+    p_next_obj = lspace_get_next_marked_object_by_oi(lspace, &mark_bit_idx);
     if(p_next_obj){
-      obj_unmark_in_vt(p_next_obj);
+//      obj_unmark_in_vt(p_next_obj);
       obj_clear_dual_bits_in_oi(p_next_obj);
       /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
       unsigned int obj_size = vm_object_size(p_next_obj);
 #ifdef USE_32BITS_HASHCODE
       obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
 #endif
-      lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);
+      lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);
     }
 
 #ifdef USE_32BITS_HASHCODE
@@ -449,10 +446,6 @@
    mark_bit_idx = 0;
    assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
 
-  /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/
-  /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/
-  lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size;
-
   return;
-
 }
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Wed Jul 25 03:02:07 2007
@@ -35,6 +35,7 @@
 {
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   assert(p_obj);
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   
   if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
     assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
@@ -208,4 +209,5 @@
   fspace_block_iterate_init((Fspace*)((GC_Gen*)collector->gc)->nos);
 }
 #endif
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Wed Jul 25 03:02:07 2007
@@ -71,6 +71,12 @@
   mspace->num_collections = 0;
   mspace->time_collections = 0;
   mspace->survive_ratio = 0.2f;
+  mspace->last_alloced_size = 0;
+  mspace->accumu_alloced_size = 0;  
+  mspace->total_alloced_size = 0;
+  mspace->last_surviving_size = 0;
+  mspace->period_surviving_size = 0;
+  
 
   mspace->move_object = TRUE;
   mspace->gc = gc;
@@ -165,5 +171,6 @@
 {
     return mspace->expected_threshold_ratio;
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Wed Jul 25 03:02:07 2007
@@ -36,10 +36,18 @@
   unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
-  /*Size allocted after last collection.*/
-  POINTER_SIZE_INT alloced_size;
-  /*For_statistic: size survived after major*/  
-  POINTER_SIZE_INT surviving_size;
+
+  /* Size allocted since last minor collection. */
+  volatile uint64 last_alloced_size;
+  /* Size allocted since last major collection. */
+  uint64 accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  uint64 total_alloced_size;
+
+  /* Size survived from last collection. */
+  uint64 last_surviving_size;
+  /* Size survived after a certain period. */
+  uint64 period_surviving_size;  
   /* END of Space --> */
     
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Wed Jul 25 03:02:07 2007
@@ -36,10 +36,9 @@
     }
     /* ok, got one */
     Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]);
-    mspace->alloced_size += GC_BLOCK_SIZE_BYTES;
 
     allocator_init_free_block(allocator, alloc_block);
-    
+
     return TRUE;
   }
 
@@ -74,4 +73,5 @@
     
   return p_return;
 }
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Wed Jul 25 03:02:07 2007
@@ -54,6 +54,8 @@
   }
 }
 
+
+Space* gc_get_nos(GC_Gen* gc);
 void mspace_reset_after_compaction(Mspace* mspace)
 {
   unsigned int old_num_used = mspace->num_used_blocks;
@@ -79,9 +81,8 @@
   }
   mspace->num_used_blocks = new_num_used;
   /*For_statistic mos infomation*/
-  mspace->surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES;
-  mspace->alloced_size = 0;
-  
+  mspace->period_surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES;
+ 
   /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */
   for(; i < mspace->num_managed_blocks; i++){
     Block_Header* block = (Block_Header*)&(blocks[i]);
@@ -335,6 +336,7 @@
 
   return;  
 } 
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Wed Jul 25 03:02:07 2007
@@ -105,6 +105,7 @@
 
       /* current sector is done, let's move it. */
       POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
+      assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
       curr_block->table[curr_sector] = sector_distance;
 
       memmove(dest_sector_addr, src_sector_addr, curr_sector_size);
@@ -151,6 +152,7 @@
 static volatile unsigned int num_marking_collectors = 0;
 static volatile unsigned int num_fixing_collectors = 0;
 static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_restoring_collectors = 0;
 static volatile unsigned int num_extending_collectors = 0;
 
 void move_compact_mspace(Collector* collector) 
@@ -201,6 +203,7 @@
   old_num = atomic_inc32(&num_moving_collectors);
   if( ++old_num == num_active_collectors ){
     /* single thread world */
+    if(lspace->move_object) lspace_compute_object_target(collector, lspace);    
     gc->collect_result = gc_collection_result(gc);
     if(!gc->collect_result){
       num_moving_collectors++; 
@@ -225,9 +228,21 @@
     /* last collector's world here */
     lspace_fix_repointed_refs(collector, lspace);   
     gc_fix_rootset(collector);
+    if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    
     num_fixing_collectors++; 
   }
   while(num_fixing_collectors != num_active_collectors + 1);
+
+
+  /* Pass 4: **************************************************
+     restore obj_info                                         */
+  atomic_cas32( &num_restoring_collectors, 0, num_active_collectors);
+  
+  collector_restore_obj_info(collector);
+
+  atomic_inc32(&num_restoring_collectors);
+
+  while(num_restoring_collectors != num_active_collectors);
 
    /* Dealing with out of memory in mspace */  
   if(mspace->free_block_idx > fspace->first_block_idx){    

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Wed Jul 25 03:02:07 2007
@@ -368,6 +368,7 @@
 
        unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);
       if(p_obj != p_target_obj){
+        assert((((POINTER_SIZE_INT)p_target_obj) % GC_OBJECT_ALIGNMENT) == 0);
         memmove(p_target_obj, p_obj, obj_size);
       }
       set_obj_info(p_target_obj, 0);
@@ -466,7 +467,6 @@
   old_num = atomic_inc32(&num_repointing_collectors);
   /*last collector's world here*/
   if( ++old_num == num_active_collectors ){
-    /*LOS_Shrink: but lspace->move_object could be set individually without shrinking LOS.*/
     if(lspace->move_object) lspace_compute_object_target(collector, lspace);
     gc->collect_result = gc_collection_result(gc);
     if(!gc->collect_result){

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp Wed Jul 25 03:02:07 2007
@@ -16,9 +16,9 @@
 
 #include "sspace.h"
 #include "sspace_chunk.h"
-#include "../gen/gen.h"
-#include "../common/gc_space.h"
 #include "sspace_verify.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
 
 struct GC_Gen;
 
@@ -56,8 +56,13 @@
   sspace->gc = gc;
   
   sspace_init_chunks(sspace);
-  
-  gc_set_pos((GC_Gen*)gc, (Space*)sspace);
+
+#ifdef ONLY_SSPACE_IN_HEAP
+  gc_ms_set_sspace((GC_MS*)gc, sspace);
+#else
+  gc_set_mos((GC_Gen*)gc, (Space*)sspace);
+#endif
+
 #ifdef SSPACE_VERIFY
   sspace_verify_init(gc);
 #endif
@@ -73,13 +78,66 @@
   STD_FREE(sspace);
 }
 
-void mutator_init_small_chunks(Mutator *mutator)
+void allocator_init_local_chunks(Allocator *allocator)
 {
-  unsigned int size = sizeof(Chunk_Header*) * (SMALL_LOCAL_CHUNK_NUM + MEDIUM_LOCAL_CHUNK_NUM);
-  Chunk_Header **chunks = (Chunk_Header**)STD_MALLOC(size);
-  memset(chunks, 0, size);
-  mutator->small_chunks = chunks;
-  mutator->medium_chunks = chunks + SMALL_LOCAL_CHUNK_NUM;
+  Sspace *sspace = gc_get_sspace(allocator->gc);
+  Size_Segment **size_segs = sspace->size_segments;
+  
+  /* Alloc mem for size segments (Chunk_Header**) */
+  unsigned int seg_size = sizeof(Chunk_Header**) * SIZE_SEGMENT_NUM;
+  Chunk_Header ***local_chunks = (Chunk_Header***)STD_MALLOC(seg_size);
+  memset(local_chunks, 0, seg_size);
+  
+  /* Alloc mem for local chunk pointers */
+  unsigned int chunk_ptr_size = 0;
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(size_segs[i]->local_alloc){
+      chunk_ptr_size += size_segs[i]->chunk_num;
+    }
+  }
+  chunk_ptr_size *= sizeof(Chunk_Header*);
+  Chunk_Header **chunk_ptrs = (Chunk_Header**)STD_MALLOC(chunk_ptr_size);
+  memset(chunk_ptrs, 0, chunk_ptr_size);
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size_segs[i]->local_alloc){
+      local_chunks[i] = chunk_ptrs;
+      chunk_ptrs += size_segs[i]->chunk_num;
+    }
+  }
+  
+  allocator->local_chunks = local_chunks;
+}
+
+void allocactor_destruct_local_chunks(Allocator *allocator)
+{
+  Sspace *sspace = gc_get_sspace(allocator->gc);
+  Size_Segment **size_segs = sspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  Chunk_Header **chunk_ptrs = NULL;
+  unsigned int chunk_ptr_num = 0;
+  
+  /* Find local chunk pointers' head and their number */
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size_segs[i]->local_alloc){
+      chunk_ptr_num += size_segs[i]->chunk_num;
+      assert(local_chunks[i]);
+      if(!chunk_ptrs)
+        chunk_ptrs = local_chunks[i];
+    }
+  }
+  
+  /* Put local pfc to the according pools */
+  for(unsigned int i = 0; i < chunk_ptr_num; ++i){
+    if(chunk_ptrs[i])
+      sspace_put_pfc(sspace, chunk_ptrs[i]);
+  }
+  
+  /* Free mem for local chunk pointers */
+  STD_FREE(chunk_ptrs);
+  
+  /* Free mem for size segments (Chunk_Header**) */
+  STD_FREE(local_chunks);
 }
 
 extern void mark_sweep_sspace(Collector *collector);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h Wed Jul 25 03:02:07 2007
@@ -26,8 +26,7 @@
  * The sweep space accomodates objects collected by mark-sweep
  */
 
-#define ONLY_SSPACE_IN_HEAP
-
+struct Size_Segment;
 struct Free_Chunk_List;
 
 typedef struct Sspace {
@@ -42,15 +41,23 @@
   unsigned int collect_algorithm;
   GC *gc;
   Boolean move_object;
-  /* Size allocted after last collection. Not available in fspace now. */
-  unsigned int alloced_size;
-  /* For_statistic: not available now for fspace */
-  unsigned int surviving_size;
+
+  /* Size allocted since last minor collection. */
+  volatile POINTER_SIZE_INT last_alloced_size;
+  /* Size allocted since last major collection. */
+  volatile POINTER_SIZE_INT accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  volatile POINTER_SIZE_INT total_alloced_size;
+
+  /* Size survived from last collection. */
+  POINTER_SIZE_INT last_surviving_size;
+  /* Size survived after a certain period. */
+  POINTER_SIZE_INT period_surviving_size;  
+
   /* END of Space --> */
   
-  Pool **small_pfc_pools;
-  Pool **medium_pfc_pools;
-  Pool **large_pfc_pools;
+  Size_Segment **size_segments;
+  Pool ***pfc_pools;
   Free_Chunk_List *aligned_free_chunk_lists;
   Free_Chunk_List *unaligned_free_chunk_lists;
   Free_Chunk_List *hyper_free_chunk_list;
@@ -59,16 +66,24 @@
 void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size);
 void sspace_destruct(Sspace *sspace);
 
-void *sspace_fast_alloc(unsigned size, Allocator *allocator);
+void *sspace_thread_local_alloc(unsigned size, Allocator *allocator);
 void *sspace_alloc(unsigned size, Allocator *allocator);
 
 void sspace_reset_for_allocation(Sspace *sspace);
 
 void sspace_collection(Sspace *sspace);
 
-void mutator_init_small_chunks(Mutator *mutator);
+void allocator_init_local_chunks(Allocator *allocator);
+void allocactor_destruct_local_chunks(Allocator *allocator);
 void collector_init_free_chunk_list(Collector *collector);
 
 POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace);
+
+
+#ifndef ONLY_SSPACE_IN_HEAP
+#define gc_get_sspace(gc) ((Sspace*)gc_get_mos((GC_Gen*)(gc)))
+#else
+#define gc_get_sspace(gc) (gc_ms_get_sspace((GC_MS*)(gc)));
+#endif
 
 #endif // _SWEEP_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp Wed Jul 25 03:02:07 2007
@@ -17,6 +17,7 @@
 #include "sspace.h"
 #include "sspace_chunk.h"
 #include "sspace_mark_sweep.h"
+#include "gc_ms.h"
 #include "../gen/gen.h"
 
 static Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
@@ -105,9 +106,9 @@
 }
 
 
-/* 1. No need of synchronization. This is a mutator local chunk no matter it is a small or medium obj chunk.
+/* 1. No need of synchronization. This is a allocator local chunk no matter it is a small or medium obj chunk.
  * 2. If this chunk runs out of space, clear the chunk pointer.
- *    So it is important to give an argument which is a local chunk pointer of a mutator while invoking this func.
+ *    So it is important to give an argument which is a local chunk pointer of a allocator while invoking this func.
  */
 static void *alloc_in_chunk(Chunk_Header* &chunk)
 {
@@ -132,143 +133,107 @@
 }
 
 /* alloc small without-fin object in sspace without getting new free chunk */
-void *sspace_fast_alloc(unsigned size, Allocator *allocator)
+void *sspace_thread_local_alloc(unsigned size, Allocator *allocator)
 {
   if(size > SUPER_OBJ_THRESHOLD) return NULL;
   
-  if(size <= MEDIUM_OBJ_THRESHOLD){  /* small object */
-    size = SMALL_SIZE_ROUNDUP(size);
-    Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks;
-    unsigned int index = SMALL_SIZE_TO_INDEX(size);
-    
-    if(!small_chunks[index]){
-      Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-      Chunk_Header *chunk = sspace_get_small_pfc(sspace, index);
-      //if(!chunk)
-        //chunk = sspace_steal_small_pfc(sspace, index);
-      if(!chunk) return NULL;
-      small_chunks[index] = chunk;
-    }
-    return alloc_in_chunk(small_chunks[index]);
-  } else if(size <= LARGE_OBJ_THRESHOLD){  /* medium object */
-    size = MEDIUM_SIZE_ROUNDUP(size);
-    Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks;
-    unsigned int index = MEDIUM_SIZE_TO_INDEX(size);
-    
-    if(!medium_chunks[index]){
-      Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-      Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index);
-      //if(!chunk)
-        //chunk = sspace_steal_medium_pfc(sspace, index);
+  Sspace *sspace = gc_get_sspace(allocator->gc);
+  void *p_obj = NULL;
+  
+  unsigned int seg_index = 0;
+  Size_Segment *size_seg = sspace->size_segments[0];
+  
+  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg)
+    if(size <= size_seg->size_max) break;
+  assert(seg_index < SIZE_SEGMENT_NUM);
+  
+  size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
+  Boolean local_alloc = size_seg->local_alloc;
+  Chunk_Header *chunk = NULL;
+  
+  if(local_alloc){
+    Chunk_Header **chunks = allocator->local_chunks[seg_index];
+    chunk = chunks[index];
+    if(!chunk){
+      chunk = sspace_get_pfc(sspace, seg_index, index);
+      //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
       if(!chunk) return NULL;
-      medium_chunks[index] = chunk;
+      chunk->status |= CHUNK_IN_USE;
+      chunks[index] = chunk;
     }
-    return alloc_in_chunk(medium_chunks[index]);
-  } else {  /* large object */
-    assert(size <= SUPER_OBJ_THRESHOLD);
-    size = LARGE_SIZE_ROUNDUP(size);
-    unsigned int index = LARGE_SIZE_TO_INDEX(size);
-    Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-    Chunk_Header *chunk = sspace_get_large_pfc(sspace, index);
-    //if(!chunk)
-      //chunk = sspace_steal_large_pfc(sspace, index);
+    p_obj = alloc_in_chunk(chunks[index]);
+  } else {
+    chunk = sspace_get_pfc(sspace, seg_index, index);
+    //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
     if(!chunk) return NULL;
-    void *p_obj = alloc_in_chunk(chunk);
+    p_obj = alloc_in_chunk(chunk);
     if(chunk)
-      sspace_put_large_pfc(sspace, chunk, index);
-    return p_obj;
+      sspace_put_pfc(sspace, chunk);
   }
-}
+  
+  assert(p_obj);
+
+#ifdef SSPACE_ALLOC_INFO
+  sspace_alloc_info(size);
+#endif
+#ifdef SSPACE_VERIFY
+  sspace_verify_alloc(p_obj, size);
+#endif
 
-static void *alloc_small_obj(unsigned size, Allocator *allocator)
+  return p_obj;
+}
+static void *sspace_alloc_normal_obj(Sspace *sspace, unsigned size, Allocator *allocator)
 {
-  assert(size <= MEDIUM_OBJ_THRESHOLD);
-  assert(!(size & SMALL_GRANULARITY_LOW_MASK));
+  void *p_obj = NULL;
   
-  Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks;
-  unsigned int index = SMALL_SIZE_TO_INDEX(size);
-  if(!small_chunks[index]){
-    Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-    Chunk_Header *chunk = sspace_get_small_pfc(sspace, index);
-    //if(!chunk)
-      //chunk = sspace_steal_small_pfc(sspace, index);
-    if(!chunk){
-      chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
-      if(chunk){
-        normal_chunk_init(chunk, size);
-      } else {
-        /*chunk = sspace_steal_small_pfc(sspace, index);
-        if(!chunk)*/ return NULL;
-      }
-    }
-    chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL;
-    small_chunks[index] = chunk;
-  }
+  unsigned int seg_index = 0;
+  Size_Segment *size_seg = sspace->size_segments[0];
   
-  return alloc_in_chunk(small_chunks[index]);
-}
-
-static void *alloc_medium_obj(unsigned size, Allocator *allocator)
-{
-  assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD));
-  assert(!(size & MEDIUM_GRANULARITY_LOW_MASK));
+  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg)
+    if(size <= size_seg->size_max) break;
+  assert(seg_index < SIZE_SEGMENT_NUM);
+  
+  size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
+  Boolean local_alloc = size_seg->local_alloc;
+  Chunk_Header *chunk = NULL;
   
-  Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks;
-  unsigned int index = MEDIUM_SIZE_TO_INDEX(size);
-  if(!medium_chunks[index]){
-    Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-    Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index);
-    //if(!chunk)
-      //chunk = sspace_steal_medium_pfc(sspace, index);
+  if(local_alloc){
+    Chunk_Header **chunks = allocator->local_chunks[seg_index];
+    chunk = chunks[index];
     if(!chunk){
-      chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
-      if(chunk){
-        normal_chunk_init(chunk, size);
-      } else {
-        /*chunk = sspace_steal_medium_pfc(sspace, index);
-        if(!chunk) */return NULL;
+      chunk = sspace_get_pfc(sspace, seg_index, index);
+      if(!chunk){
+        chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
+        if(chunk) normal_chunk_init(chunk, size);
       }
+      //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
+      if(!chunk) return NULL;
+      chunk->status |= CHUNK_IN_USE;
+      chunks[index] = chunk;
     }
-    chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL;
-    medium_chunks[index] = chunk;
-  }
-  
-  return alloc_in_chunk(medium_chunks[index]);
-}
-
-/* FIXME:: this is a simple version. It may return NULL while there are still pfc in pool put by other mutators */
-static void *alloc_large_obj(unsigned size, Allocator *allocator)
-{
-  assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD));
-  assert(!(size & LARGE_GRANULARITY_LOW_MASK));
-  
-  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
-  unsigned int index = LARGE_SIZE_TO_INDEX(size);
-  Chunk_Header *chunk = sspace_get_large_pfc(sspace, index);
-  //if(!chunk)
-    //chunk = sspace_steal_large_pfc(sspace, index);
-  if(!chunk){
-    chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
-    if(chunk){
-      normal_chunk_init(chunk, size);
-    } else {
-      /*chunk = sspace_steal_large_pfc(sspace, index);
-      if(!chunk)*/ return NULL;
+    p_obj = alloc_in_chunk(chunks[index]);
+  } else {
+    chunk = sspace_get_pfc(sspace, seg_index, index);
+    if(!chunk){
+      chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
+      if(chunk) normal_chunk_init(chunk, size);
     }
+    //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
+    if(!chunk) return NULL;
+    p_obj = alloc_in_chunk(chunk);
+    if(chunk)
+      sspace_put_pfc(sspace, chunk);
   }
-  chunk->status |= CHUNK_NORMAL;
   
-  void *p_obj = alloc_in_chunk(chunk);
-  if(chunk)
-    sspace_put_large_pfc(sspace, chunk, index);
   return p_obj;
 }
 
-static void *alloc_super_obj(unsigned size, Allocator *allocator)
+static void *sspace_alloc_super_obj(Sspace *sspace, unsigned size, Allocator *allocator)
 {
   assert(size > SUPER_OBJ_THRESHOLD);
-  
-  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+
   unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size);
   assert(chunk_size > SUPER_OBJ_THRESHOLD);
   assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK));
@@ -281,7 +246,6 @@
   
   if(!chunk) return NULL;
   abnormal_chunk_init(chunk, chunk_size, size);
-  chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL;
   chunk->table[0] = cur_alloc_color;
   set_super_obj_mask(chunk->base);
   assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
@@ -291,14 +255,22 @@
 
 static void *sspace_try_alloc(unsigned size, Allocator *allocator)
 {
-  if(size <= MEDIUM_OBJ_THRESHOLD)
-    return alloc_small_obj(SMALL_SIZE_ROUNDUP(size), allocator);
-  else if(size <= LARGE_OBJ_THRESHOLD)
-    return alloc_medium_obj(MEDIUM_SIZE_ROUNDUP(size), allocator);
-  else if(size <= SUPER_OBJ_THRESHOLD)
-    return alloc_large_obj(LARGE_SIZE_ROUNDUP(size), allocator);
+  Sspace *sspace = gc_get_sspace(allocator->gc);
+  void *p_obj = NULL;
+  
+  if(size <= SUPER_OBJ_THRESHOLD)
+    p_obj = sspace_alloc_normal_obj(sspace, size, allocator);
   else
-    return alloc_super_obj(size, allocator);
+    p_obj = sspace_alloc_super_obj(sspace, size, allocator);
+
+#ifdef SSPACE_ALLOC_INFO
+  if(p_obj) sspace_alloc_info(size);
+#endif
+#ifdef SSPACE_VERIFY
+  if(p_obj) sspace_verify_alloc(p_obj, size);
+#endif
+
+  return p_obj;
 }
 
 /* FIXME:: the collection should be seperated from the alloation */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp Wed Jul 25 03:02:07 2007
@@ -16,63 +16,68 @@
 
 #include "sspace_chunk.h"
 
-/* PFC stands for partially free chunk */
-#define SMALL_PFC_POOL_NUM    SMALL_LOCAL_CHUNK_NUM
-#define MEDIUM_PFC_POOL_NUM   MEDIUM_LOCAL_CHUNK_NUM
-#define LARGE_PFC_POOL_NUM    ((SUPER_OBJ_THRESHOLD - LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS)
 #define NUM_ALIGNED_FREE_CHUNK_BUCKET   (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT)
 #define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS)
 
-
 /* PFC stands for partially free chunk */
-static Pool  *small_pfc_pools[SMALL_PFC_POOL_NUM];
-static Pool  *medium_pfc_pools[MEDIUM_PFC_POOL_NUM];
-static Pool  *large_pfc_pools[LARGE_PFC_POOL_NUM];
+static Size_Segment *size_segments[SIZE_SEGMENT_NUM];
+static Pool **pfc_pools[SIZE_SEGMENT_NUM];
+static Boolean  *pfc_steal_flags[SIZE_SEGMENT_NUM];
+
 static Free_Chunk_List  aligned_free_chunk_lists[NUM_ALIGNED_FREE_CHUNK_BUCKET];
 static Free_Chunk_List  unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET];
 static Free_Chunk_List  hyper_free_chunk_list;
 
-static Boolean  small_pfc_steal_flags[SMALL_PFC_POOL_NUM];
-static Boolean  medium_pfc_steal_flags[MEDIUM_PFC_POOL_NUM];
-static Boolean  large_pfc_steal_flags[LARGE_PFC_POOL_NUM];
+
+static void init_size_segment(Size_Segment *seg, unsigned int size_min, unsigned int size_max, unsigned int gran_shift_bits, Boolean local_alloc)
+{
+  seg->size_min = size_min;
+  seg->size_max = size_max;
+  seg->local_alloc = local_alloc;
+  seg->chunk_num = (seg->size_max - seg->size_min) >> gran_shift_bits;
+  seg->gran_shift_bits = gran_shift_bits;
+  seg->granularity = (POINTER_SIZE_INT)(1 << gran_shift_bits);
+  seg->gran_low_mask = seg->granularity - 1;
+  seg->gran_high_mask = ~seg->gran_low_mask;
+}
 
 void sspace_init_chunks(Sspace *sspace)
 {
-  unsigned int i;
+  unsigned int i, j;
   
-  /* Init small obj partially free chunk pools */
-  for(i=SMALL_PFC_POOL_NUM; i--;){
-    small_pfc_steal_flags[i] = FALSE;
-    small_pfc_pools[i] = sync_pool_create();
-  }
-  
-  /* Init medium obj partially free chunk pools */
-  for(i=MEDIUM_PFC_POOL_NUM; i--;){
-    medium_pfc_steal_flags[i] = FALSE;
-    medium_pfc_pools[i] = sync_pool_create();
-  }
-  
-  /* Init large obj partially free chunk pools */
-  for(i=LARGE_PFC_POOL_NUM; i--;){
-    large_pfc_steal_flags[i] = FALSE;
-    large_pfc_pools[i] = sync_pool_create();
+  /* Init size segments */
+  Size_Segment *size_seg_start = (Size_Segment*)STD_MALLOC(sizeof(Size_Segment) * SIZE_SEGMENT_NUM);
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    size_segments[i] = size_seg_start + i;
+    size_segments[i]->seg_index = i;
+  }
+  init_size_segment(size_segments[0], 0, MEDIUM_OBJ_THRESHOLD, SMALL_GRANULARITY_BITS, SMALL_IS_LOCAL_ALLOC);
+  init_size_segment(size_segments[1], MEDIUM_OBJ_THRESHOLD, LARGE_OBJ_THRESHOLD, MEDIUM_GRANULARITY_BITS, MEDIUM_IS_LOCAL_ALLOC);
+  init_size_segment(size_segments[2], LARGE_OBJ_THRESHOLD, SUPER_OBJ_THRESHOLD, LARGE_GRANULARITY_BITS, LARGE_IS_LOCAL_ALLOC);
+  
+  /* Init partially free chunk pools */
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    pfc_pools[i] = (Pool**)STD_MALLOC(sizeof(Pool*) * size_segments[i]->chunk_num);
+    pfc_steal_flags[i] = (Boolean*)STD_MALLOC(sizeof(Boolean) * size_segments[i]->chunk_num);
+    for(j=size_segments[i]->chunk_num; j--;){
+      pfc_pools[i][j] = sync_pool_create();
+      pfc_steal_flags[i][j] = FALSE;
+    }
   }
   
   /* Init aligned free chunk lists */
-  for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+  for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
     free_chunk_list_init(&aligned_free_chunk_lists[i]);
   
   /* Init nonaligned free chunk lists */
-  for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+  for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
     free_chunk_list_init(&unaligned_free_chunk_lists[i]);
   
   /* Init super free chunk lists */
   free_chunk_list_init(&hyper_free_chunk_list);
-    
-  /* Init Sspace struct's chunk fields */
-  sspace->small_pfc_pools = small_pfc_pools;
-  sspace->medium_pfc_pools = medium_pfc_pools;
-  sspace->large_pfc_pools = large_pfc_pools;
+  
+  sspace->size_segments = size_segments;
+  sspace->pfc_pools = pfc_pools;
   sspace->aligned_free_chunk_lists = aligned_free_chunk_lists;
   sspace->unaligned_free_chunk_lists = unaligned_free_chunk_lists;
   sspace->hyper_free_chunk_list = &hyper_free_chunk_list;
@@ -85,7 +90,7 @@
   sspace_put_free_chunk(sspace, free_chunk);
 }
 
-static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, unsigned int &steal_flag)
+static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, Boolean &steal_flag)
 {
   Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pool);
   while(chunk){
@@ -105,29 +110,16 @@
 
 void sspace_clear_chunk_list(GC *gc)
 {
-  unsigned int i;
+  unsigned int i, j;
   unsigned int collector_num = gc->num_collectors;
-  unsigned int steal_threshold;
+  unsigned int steal_threshold = collector_num << PFC_STEAL_THRESHOLD;
   
-  steal_threshold = collector_num << SMALL_PFC_STEAL_THRESHOLD;
-  for(i=SMALL_PFC_POOL_NUM; i--;){
-    Pool *pool = small_pfc_pools[i];
-    pfc_pool_set_steal_flag(pool, steal_threshold, small_pfc_steal_flags[i]);
-    empty_pool(pool);
-  }
-  
-  steal_threshold = collector_num << MEDIUM_PFC_STEAL_THRESHOLD;
-  for(i=MEDIUM_PFC_POOL_NUM; i--;){
-    Pool *pool = medium_pfc_pools[i];
-    pfc_pool_set_steal_flag(pool, steal_threshold, medium_pfc_steal_flags[i]);
-    empty_pool(pool);
-  }
-  
-  steal_threshold = collector_num << LARGE_PFC_STEAL_THRESHOLD;
-  for(i=LARGE_PFC_POOL_NUM; i--;){
-    Pool *pool = large_pfc_pools[i];
-    pfc_pool_set_steal_flag(pool, steal_threshold, large_pfc_steal_flags[i]);
-    empty_pool(pool);
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    for(j = size_segments[i]->chunk_num; j--;){
+      Pool *pool = pfc_pools[i][j];
+      pfc_pool_set_steal_flag(pool, steal_threshold, pfc_steal_flags[i][j]);
+      empty_pool(pool);
+    }
   }
   
   for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
@@ -141,12 +133,17 @@
   /* release small obj chunks of each mutator */
   Mutator *mutator = gc->mutator_list;
   while(mutator){
-    Chunk_Header **chunks = mutator->small_chunks;
-    for(i=SMALL_LOCAL_CHUNK_NUM; i--;)
-      chunks[i] = NULL;
-    chunks = mutator->medium_chunks;
-    for(i=MEDIUM_LOCAL_CHUNK_NUM; i--;)
-      chunks[i] = NULL;
+    Chunk_Header ***local_chunks = mutator->local_chunks;
+    for(i = SIZE_SEGMENT_NUM; i--;){
+      if(!size_segments[i]->local_alloc){
+        assert(!local_chunks[i]);
+        continue;
+      }
+      Chunk_Header **chunks = local_chunks[i];
+      assert(chunks);
+      for(j = size_segments[i]->chunk_num; j--;)
+        chunks[j] = NULL;
+    }
     mutator = mutator->next;
   }
 }
@@ -377,38 +374,15 @@
 
 #define min_value(x, y) (((x) < (y)) ? (x) : (y))
 
-Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index)
+Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index)
 {
+  Size_Segment *size_seg = sspace->size_segments[seg_index];
   Chunk_Header *pfc = NULL;
-  unsigned int max_index = min_value(index + SMALL_PFC_STEAL_NUM + 1, SMALL_PFC_POOL_NUM);
+  unsigned int max_index = min_value(index + PFC_STEAL_NUM + 1, size_seg->chunk_num);
   ++index;
   for(; index < max_index; ++index){
-    if(!small_pfc_steal_flags[index]) continue;
-    pfc = sspace_get_small_pfc(sspace, index);
-    if(pfc) return pfc;
-  }
-  return NULL;
-}
-Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index)
-{
-  Chunk_Header *pfc = NULL;
-  unsigned int max_index = min_value(index + MEDIUM_PFC_STEAL_NUM + 1, MEDIUM_PFC_POOL_NUM);
-  ++index;
-  for(; index < max_index; ++index){
-    if(!medium_pfc_steal_flags[index]) continue;
-    pfc = sspace_get_medium_pfc(sspace, index);
-    if(pfc) return pfc;
-  }
-  return NULL;
-}
-Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index)
-{
-  Chunk_Header *pfc = NULL;
-  unsigned int max_index = min_value(index + LARGE_PFC_STEAL_NUM + 1, LARGE_PFC_POOL_NUM);
-  ++index;
-  for(; index < max_index; ++index){
-    if(!large_pfc_steal_flags[index]) continue;
-    pfc = sspace_get_large_pfc(sspace, index);
+    if(!pfc_steal_flags[seg_index][index]) continue;
+    pfc = sspace_get_pfc(sspace, seg_index, index);
     if(pfc) return pfc;
   }
   return NULL;
@@ -479,42 +453,27 @@
   return live_num;
 }
 
-enum Obj_Type {
-  SMALL_OBJ,
-  MEDIUM_OBJ,
-  LARGE_OBJ
-};
-static unsigned int index_to_size(unsigned int index, Obj_Type type)
+static void pfc_pools_info(Sspace *sspace, Boolean before_gc)
 {
-  if(type == SMALL_OBJ)
-    return SMALL_INDEX_TO_SIZE(index);
-  if(type == MEDIUM_OBJ)
-    return MEDIUM_INDEX_TO_SIZE(index);
-  assert(type == LARGE_OBJ);
-  return LARGE_INDEX_TO_SIZE(index);
-}
-
-static void pfc_pools_info(Sspace *sspace, Pool **pools, unsigned int pool_num, Obj_Type type, Boolean before_gc)
-{
-  unsigned int index;
-  
-  for(index = 0; index < pool_num; ++index){
-    Pool *pool = pools[index];
-    Chunk_Header *chunk = NULL;
-    unsigned int chunk_counter = 0;
-    unsigned int slot_num = 0;
-    unsigned int live_num = 0;
-    pool_iterator_init(pool);
-    while(chunk = (Chunk_Header*)pool_iterator_next(pool)){
-      ++chunk_counter;
-      slot_num += chunk->slot_num;
-      live_num += pfc_info(chunk, before_gc);
-    }
-    if(slot_num){
-      printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", index_to_size(index, type), chunk_counter, live_num, slot_num, (float)live_num/slot_num);
-      assert(live_num < slot_num);
-      free_mem_size += index_to_size(index, type) * (slot_num-live_num);
-      assert(free_mem_size < sspace->committed_heap_size);
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    for(unsigned int j = 0; j < size_segments[i]->chunk_num; ++j){
+      Pool *pool = pfc_pools[i][j];
+      Chunk_Header *chunk = NULL;
+      unsigned int chunk_counter = 0;
+      unsigned int slot_num = 0;
+      unsigned int live_num = 0;
+      pool_iterator_init(pool);
+      while(chunk = (Chunk_Header*)pool_iterator_next(pool)){
+        ++chunk_counter;
+        slot_num += chunk->slot_num;
+        live_num += pfc_info(chunk, before_gc);
+      }
+      if(slot_num){
+        printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segments[i]), chunk_counter, live_num, slot_num, (float)live_num/slot_num);
+        assert(live_num < slot_num);
+        free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segments[i]) * (slot_num-live_num);
+        assert(free_mem_size < sspace->committed_heap_size);
+      }
     }
   }
 }
@@ -554,14 +513,8 @@
 {
   if(!before_gc) return;
   
-  printf("\n\nSMALL PFC INFO:\n\n");
-  pfc_pools_info(sspace, small_pfc_pools, SMALL_PFC_POOL_NUM, SMALL_OBJ, before_gc);
-  
-  printf("\n\nMEDIUM PFC INFO:\n\n");
-  pfc_pools_info(sspace, medium_pfc_pools, MEDIUM_PFC_POOL_NUM, MEDIUM_OBJ, before_gc);
-  
-  printf("\n\nLARGE PFC INFO:\n\n");
-  pfc_pools_info(sspace, large_pfc_pools, LARGE_PFC_POOL_NUM, LARGE_OBJ, before_gc);
+  printf("\n\nPFC INFO:\n\n");
+  pfc_pools_info(sspace, before_gc);
   
   printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
   free_lists_info(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, ALIGNED_CHUNK);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h Wed Jul 25 03:02:07 2007
@@ -154,7 +154,7 @@
   assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES);
   
   chunk->next = NULL;
-  chunk->status = CHUNK_NEED_ZEROING;
+  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
   chunk->slot_size = slot_size;
   chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk);
   chunk->slot_index = 0;
@@ -170,7 +170,7 @@
   assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size);
   
   chunk->next = NULL;
-  chunk->status = CHUNK_NIL;
+  chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL;
   chunk->slot_size = obj_size;
   chunk->slot_num = 1;
   chunk->slot_index = 0;
@@ -194,130 +194,69 @@
 #define LARGE_GRANULARITY_BITS  7
 #define CHUNK_GRANULARITY_BITS  10
 
-#define SMALL_GRANULARITY       (1 << SMALL_GRANULARITY_BITS)
-#define MEDIUM_GRANULARITY      (1 << MEDIUM_GRANULARITY_BITS)
-#define LARGE_GRANULARITY       (1 << LARGE_GRANULARITY_BITS)
 #define CHUNK_GRANULARITY       (1 << CHUNK_GRANULARITY_BITS)
-
-#define SMALL_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(SMALL_GRANULARITY-1))
-#define SMALL_GRANULARITY_HIGH_MASK   (~SMALL_GRANULARITY_LOW_MASK)
-#define MEDIUM_GRANULARITY_LOW_MASK   ((POINTER_SIZE_INT)(MEDIUM_GRANULARITY-1))
-#define MEDIUM_GRANULARITY_HIGH_MASK  (~MEDIUM_GRANULARITY_LOW_MASK)
-#define LARGE_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(LARGE_GRANULARITY-1))
-#define LARGE_GRANULARITY_HIGH_MASK   (~LARGE_GRANULARITY_LOW_MASK)
 #define CHUNK_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(CHUNK_GRANULARITY-1))
 #define CHUNK_GRANULARITY_HIGH_MASK   (~CHUNK_GRANULARITY_LOW_MASK)
 
-#define SMALL_LOCAL_CHUNK_NUM   (MEDIUM_OBJ_THRESHOLD >> SMALL_GRANULARITY_BITS)
-#define MEDIUM_LOCAL_CHUNK_NUM  ((LARGE_OBJ_THRESHOLD - MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS)
+#define SMALL_IS_LOCAL_ALLOC   TRUE
+#define MEDIUM_IS_LOCAL_ALLOC  TRUE
+#define LARGE_IS_LOCAL_ALLOC  FALSE
 
-#define SMALL_SIZE_ROUNDUP(size)    (size)
-#define MEDIUM_SIZE_ROUNDUP(size)   (((size) + MEDIUM_GRANULARITY-1) & MEDIUM_GRANULARITY_HIGH_MASK)
-#define LARGE_SIZE_ROUNDUP(size)    (((size) + LARGE_GRANULARITY-1) & LARGE_GRANULARITY_HIGH_MASK)
+#define NORMAL_SIZE_ROUNDUP(size, seg)  (((size) + seg->granularity-1) & seg->gran_high_mask)
 #define SUPER_OBJ_TOTAL_SIZE(size)  (sizeof(Chunk_Header) + (size))
 #define SUPER_SIZE_ROUNDUP(size)    ((SUPER_OBJ_TOTAL_SIZE(size) + CHUNK_GRANULARITY-1) & CHUNK_GRANULARITY_HIGH_MASK)
 
-#define SMALL_SIZE_TO_INDEX(size)   (((size) >> SMALL_GRANULARITY_BITS) - 1)
-#define MEDIUM_SIZE_TO_INDEX(size)  ((((size)-MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS) - 1)
-#define LARGE_SIZE_TO_INDEX(size)   ((((size)-LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS) - 1)
+#define NORMAL_SIZE_TO_INDEX(size, seg) ((((size)-(seg)->size_min) >> (seg)->gran_shift_bits) - 1)
 #define ALIGNED_CHUNK_SIZE_TO_INDEX(size)     (((size) >> NORMAL_CHUNK_SHIFT_COUNT) - 1)
 #define UNALIGNED_CHUNK_SIZE_TO_INDEX(size)   (((size) >> CHUNK_GRANULARITY_BITS) - 1)
 
-#define SMALL_INDEX_TO_SIZE(index)  (((index) + 1) << SMALL_GRANULARITY_BITS)
-#define MEDIUM_INDEX_TO_SIZE(index) ((((index) + 1) << MEDIUM_GRANULARITY_BITS) + MEDIUM_OBJ_THRESHOLD)
-#define LARGE_INDEX_TO_SIZE(index)  ((((index) + 1) << LARGE_GRANULARITY_BITS) + LARGE_OBJ_THRESHOLD)
+#define NORMAL_INDEX_TO_SIZE(index, seg)  ((((index) + 1) << (seg)->gran_shift_bits) + (seg)->size_min)
 #define ALIGNED_CHUNK_INDEX_TO_SIZE(index)    (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT)
 #define UNALIGNED_CHUNK_INDEX_TO_SIZE(index)  (((index) + 1) << CHUNK_GRANULARITY_BITS)
 
-#define SMALL_PFC_STEAL_NUM   3
-#define MEDIUM_PFC_STEAL_NUM  3
-#define LARGE_PFC_STEAL_NUM   3
 
-#define SMALL_PFC_STEAL_THRESHOLD   3
-#define MEDIUM_PFC_STEAL_THRESHOLD  3
-#define LARGE_PFC_STEAL_THRESHOLD   3
+#define PFC_STEAL_NUM   3
+#define PFC_STEAL_THRESHOLD   3
 
+#define SIZE_SEGMENT_NUM  3
+typedef struct Size_Segment {
+  unsigned int size_min;
+  unsigned int size_max;
+  unsigned int seg_index;
+  Boolean local_alloc;
+  unsigned int chunk_num;
+  unsigned int gran_shift_bits;
+  POINTER_SIZE_INT granularity;
+  POINTER_SIZE_INT gran_low_mask;
+  POINTER_SIZE_INT gran_high_mask;
+} Size_Segment;
 
-inline Chunk_Header *sspace_get_small_pfc(Sspace *sspace, unsigned int index)
-{
-  Pool *pfc_pool = sspace->small_pfc_pools[index];
-  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
-  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
-  return chunk;
-}
-inline void sspace_put_small_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
-{
-  assert(chunk);
-  
-  Pool *pfc_pool = sspace->small_pfc_pools[index];
-  pool_put_entry(pfc_pool, chunk);
-}
-
-inline Chunk_Header *sspace_get_medium_pfc(Sspace *sspace, unsigned int index)
-{
-  Pool *pfc_pool = sspace->medium_pfc_pools[index];
-  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
-  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
-  return chunk;
-}
-inline void sspace_put_medium_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
-{
-  assert(chunk);
-  
-  Pool *pfc_pool = sspace->medium_pfc_pools[index];
-  pool_put_entry(pfc_pool, chunk);
-}
 
-inline Chunk_Header *sspace_get_large_pfc(Sspace *sspace, unsigned int index)
+inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index)
 {
-  Pool *pfc_pool = sspace->large_pfc_pools[index];
+  Pool *pfc_pool = sspace->pfc_pools[seg_index][index];
   Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
   assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
   return chunk;
 }
-inline void sspace_put_large_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
-{
-  assert(chunk);
-  
-  Pool *pfc_pool = sspace->large_pfc_pools[index];
-  pool_put_entry(pfc_pool, chunk);
-}
-
-/*
-inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int size)
-{
-  assert(size <= SUPER_OBJ_THRESHOLD);
-  
-  if(size > LARGE_OBJ_THRESHOLD)
-    return sspace_get_large_pfc(sspace, size);
-  else if(size > MEDIUM_OBJ_THRESHOLD)
-    return sspace_get_medium_pfc(sspace, size);
-  return sspace_get_small_pfc(sspace, size);
-}
-*/
 
-inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int size)
+inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk)
 {
-  assert(size <= SUPER_OBJ_THRESHOLD);
+  unsigned int size = chunk->slot_size;
+  assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
   
+  Size_Segment **size_segs = sspace->size_segments;
   chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
-  unsigned int index;
   
-  if(size > LARGE_OBJ_THRESHOLD){
-    assert(!(size & LARGE_GRANULARITY_LOW_MASK));
-    assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD));
-    index = LARGE_SIZE_TO_INDEX(size);
-    sspace_put_large_pfc(sspace, chunk, index);
-  } else if(size > MEDIUM_OBJ_THRESHOLD){
-    assert(!(size & MEDIUM_GRANULARITY_LOW_MASK));
-    assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD));
-    index = MEDIUM_SIZE_TO_INDEX(size);
-    sspace_put_medium_pfc(sspace, chunk, index);
-  } else {
-    assert(!(size & SMALL_GRANULARITY_LOW_MASK));
-    assert(size <= MEDIUM_OBJ_THRESHOLD);
-    index = SMALL_SIZE_TO_INDEX(size);
-    sspace_put_small_pfc(sspace, chunk, index);
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size <= size_segs[i]->size_max){
+      assert(!(size & size_segs[i]->gran_low_mask));
+      assert(size > size_segs[i]->size_min);
+      unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
+      Pool *pfc_pool = sspace->pfc_pools[i][index];
+      pool_put_entry(pfc_pool, chunk);
+      return;
+    }
   }
 }
 
@@ -328,9 +267,7 @@
 extern Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace);
 extern Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size);
 extern Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk);
-extern Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index);
-extern Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index);
-extern Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index);
+extern Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int index);
 
 extern void zeroing_free_chunk(Free_Chunk *chunk);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp Wed Jul 25 03:02:07 2007
@@ -17,6 +17,18 @@
 #include "sspace_mark_sweep.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj)
+{
+  unsigned int index_in_word;
+  volatile POINTER_SIZE_INT *p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT color_word = *p_color_word;
+  POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word;
+  
+  return color_word & mark_color;
+}
+
 static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
 {
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
@@ -31,6 +43,7 @@
 
 static FORCE_INLINE void scan_object(Collector *collector, Partial_Reveal_Object *p_obj)
 {
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   if(!object_has_ref_field(p_obj)) return;
   
   REF *p_ref;
@@ -173,4 +186,9 @@
   collector->trace_stack = NULL;
   
   return;
+}
+
+void trace_obj_in_ms_marking(Collector *collector, void *p_obj)
+{
+  trace_object(collector, (Partial_Reveal_Object *)p_obj);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp Wed Jul 25 03:02:07 2007
@@ -16,6 +16,7 @@
 
 #include "sspace_mark_sweep.h"
 #include "sspace_verify.h"
+#include "gc_ms.h"
 #include "../gen/gen.h"
 #include "../thread/collector.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
@@ -64,7 +65,7 @@
 void mark_sweep_sspace(Collector *collector)
 {
   GC *gc = collector->gc;
-  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  Sspace *sspace = gc_get_sspace(gc);
   
   unsigned int num_active_collectors = gc->num_active_collectors;
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp Wed Jul 25 03:02:07 2007
@@ -171,7 +171,7 @@
     //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
 #endif
     chunk_pad_last_index_word((Chunk_Header*)chunk, mark_mask_in_table);
-    sspace_put_pfc(sspace, chunk, chunk->slot_size);
+    sspace_put_pfc(sspace, chunk);
   }
   /* the rest: chunks with free rate < 0.1. we don't use them */
 #ifdef SSPACE_VERIFY

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp Wed Jul 25 03:02:07 2007
@@ -47,8 +47,12 @@
 void sspace_verify_init(GC *gc)
 {
   gc_in_verify = gc;
-  
+
+#ifndef ONLY_SSPACE_IN_HEAP
   POINTER_SIZE_INT heap_size = gc_gen_total_memory_size((GC_Gen*)gc);
+#else
+  POINTER_SIZE_INT heap_size = gc_ms_total_memory_size((GC_MS*)gc);
+#endif
   card_num = heap_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
   POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num;
   
@@ -268,7 +272,7 @@
   
   clear_verify_cards();
   
-  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  Sspace *sspace = gc_get_sspace(gc);
   Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
   Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
   POINTER_SIZE_INT total_live_obj = 0;
@@ -314,7 +318,7 @@
 /*
 void sspace_verify_super_obj(GC *gc)
 {
-  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  Sspace *sspace = gc_get_sspace(gc);
   Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
   Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Wed Jul 25 03:02:07 2007
@@ -24,6 +24,7 @@
 #include "../mark_compact/mspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../common/space_tuner.h"
+#include "../mark_sweep/sspace.h"
 
 unsigned int MINOR_COLLECTORS = 0;
 unsigned int MAJOR_COLLECTORS = 0;
@@ -99,11 +100,14 @@
   collector_reset_weakref_sets(collector);
 #endif
 
+#ifndef ONLY_SSPACE_IN_HEAP
   /*For LOS_Shrink and LOS_Extend*/
   if(collector->gc->tuner->kind != TRANS_NOTHING){
     collector->non_los_live_obj_size = 0;
     collector->los_live_obj_size = 0;
   }
+#endif
+
   collector->result = TRUE;
   return;
 }
@@ -238,11 +242,19 @@
 
 struct GC_Gen;
 unsigned int gc_get_processor_num(GC_Gen*);
+#ifdef ONLY_SSPACE_IN_HEAP
+struct GC_MS;
+unsigned int gc_ms_get_processor_num(GC_MS *gc);
+#endif
 
 void collector_initialize(GC* gc)
 {
   //FIXME::
+#ifndef ONLY_SSPACE_IN_HEAP
   unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc);
+#else
+  unsigned int num_processors = gc_ms_get_processor_num((GC_MS*)gc);
+#endif
   
   unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); 
 
@@ -259,6 +271,10 @@
     collector->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i;
     collector->gc = gc;
     collector_init_thread(collector);
+
+#ifdef ONLY_SSPACE_IN_HEAP
+    collector_init_free_chunk_list(collector);
+#endif
     
     gc->collectors[i] = collector;
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Wed Jul 25 03:02:07 2007
@@ -26,6 +26,7 @@
 
 struct Block_Header;
 struct Stealable_Stack;
+struct Chunk_Header;
 struct Free_Chunk_List;
 
 #define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS  8
@@ -40,6 +41,7 @@
   void *ceiling;
   void *end;
   void *alloc_block;
+  Chunk_Header ***local_chunks;
   Space* alloc_space;
   GC* gc;
   VmThreadHandle thread_handle;   /* This thread; */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Wed Jul 25 03:02:07 2007
@@ -68,6 +68,7 @@
     return NULL;
   }
 
+assert((((POINTER_SIZE_INT)p_targ_obj) % GC_OBJECT_ALIGNMENT) == 0);
 #ifdef USE_32BITS_HASHCODE
   if(obj_is_set_hashcode){
     memcpy(p_targ_obj, p_obj, size-GC_OBJECT_ALIGNMENT);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h Wed Jul 25 03:02:07 2007
@@ -47,6 +47,7 @@
   void *ceiling;
   void* end;
   Block *alloc_block;
+  Chunk_Header ***local_chunks;
   Space* alloc_space;
   GC   *gc;
   VmThreadHandle thread_handle;   /* This thread; */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Wed Jul 25 03:02:07 2007
@@ -20,6 +20,7 @@
 
 #include "mutator.h"
 #include "../trace_forward/fspace.h"
+#include "../mark_sweep/sspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
 struct GC_Gen;
@@ -41,7 +42,11 @@
     mutator->obj_with_fin = finref_get_free_block(gc);
   else
     mutator->obj_with_fin = NULL;
-       
+
+#ifdef ONLY_SSPACE_IN_HEAP
+  allocator_init_local_chunks((Allocator*)mutator);
+#endif
+  
   lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
 
   mutator->next = (Mutator *)gc->mutator_list;
@@ -63,6 +68,10 @@
 
   alloc_context_reset((Allocator*)mutator);
 
+#ifdef ONLY_SSPACE_IN_HEAP
+  allocactor_destruct_local_chunks((Allocator*)mutator);
+#endif
+
   if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */
     pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
     mutator->rem_set = NULL;
@@ -114,4 +123,5 @@
   }  
   return;
 }
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Wed Jul 25 03:02:07 2007
@@ -32,6 +32,7 @@
   void* ceiling;
   void* end;
   void* alloc_block;
+  Chunk_Header ***local_chunks;
   Space* alloc_space;
   GC* gc;
   VmThreadHandle thread_handle;   /* This thread; */
@@ -39,8 +40,6 @@
   
   Vector_Block* rem_set;
   Vector_Block* obj_with_fin;
-  Chunk_Header **small_chunks;
-  Chunk_Header **medium_chunks;
   Mutator* next;  /* The gc info area associated with the next active thread. */
 } Mutator;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp Wed Jul 25 03:02:07 2007
@@ -19,9 +19,8 @@
  */
 
 #include "gc_thread.h"
-
 #include "../gen/gen.h"
-
+#include "../mark_sweep/gc_ms.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
 //#define GC_OBJ_SIZE_STATISTIC
@@ -73,14 +72,19 @@
   gc_alloc_statistic_obj_distrubution(size);
 #endif
 
+#ifndef ONLY_SSPACE_IN_HEAP
   if ( size > GC_OBJ_SIZE_THRESHOLD )
     p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
-  else{
+  else
     p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
-  }
-  
+#else
+  p_obj = (Managed_Object_Handle)gc_ms_alloc(size, allocator);
+#endif
+
   if( p_obj == NULL )
     return NULL;
+
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
     
   obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah);
   
@@ -111,9 +115,14 @@
  
   /* Try to allocate an object from the current Thread Local Block */
   Managed_Object_Handle p_obj;
+#ifndef ONLY_SSPACE_IN_HEAP
   p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator);
+#else
+  p_obj = (Managed_Object_Handle)gc_ms_fast_alloc(size, allocator);
+#endif
   if(p_obj == NULL) return NULL;
-   
+
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah);
   
   return p_obj;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Wed Jul 25 03:02:07 2007
@@ -19,6 +19,7 @@
  */
 
 #include "fspace.h"
+#include "../gen/gen.h"
 
 Boolean NOS_PARTIAL_FORWARD = FALSE;
 
@@ -75,6 +76,11 @@
   fspace->num_collections = 0;
   fspace->time_collections = 0;
   fspace->survive_ratio = 0.2f;
+  fspace->last_alloced_size = 0;
+  fspace->accumu_alloced_size = 0;  
+  fspace->total_alloced_size = 0;
+  fspace->last_surviving_size = 0;
+  fspace->period_surviving_size = 0;
   
   fspace->gc = gc;
   gc_set_nos((GC_Gen*)gc, (Space*)fspace);
@@ -98,7 +104,7 @@
   fspace_destruct_blocks(fspace);
   STD_FREE(fspace);   
 }
- 
+
 void fspace_reset_for_allocation(Fspace* fspace)
 { 
   unsigned int first_idx = fspace->first_block_idx;
@@ -106,7 +112,7 @@
   unsigned int marked_last_idx = 0;
   Boolean is_major_collection = !gc_match_kind(fspace->gc, MINOR_COLLECTION);
   Boolean gen_mode = gc_is_gen_mode();
-
+  
   if(  is_major_collection || 
          NOS_PARTIAL_FORWARD == FALSE || !gen_mode)            
   {
@@ -176,14 +182,13 @@
 
 void collector_execute_task(GC* gc, TaskType task_func, Space* space);
 
-#include "../gen/gen.h"
 unsigned int mspace_free_block_idx;
 
 /* world is stopped when starting fspace_collection */      
 void fspace_collection(Fspace *fspace)
 {
   fspace->num_collections++;  
-  
+
   GC* gc = fspace->gc;
   mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp Wed Jul 25 03:02:07 2007
@@ -38,7 +38,7 @@
     Block_Header* alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]);
     
     allocator_init_free_block(allocator, alloc_block);
-            
+
     return TRUE;
   }
 
@@ -84,5 +84,6 @@
   return p_return;
   
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp Wed Jul 25 03:02:07 2007
@@ -46,6 +46,7 @@
 
 static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
 {
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   if (!object_has_ref_field(p_obj)) return;
     
   REF *p_ref;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp Wed Jul 25 03:02:07 2007
@@ -37,6 +37,7 @@
 
 static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
 {
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   if (!object_has_ref_field_before_scan(p_obj)) return;
     
   REF *p_ref;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.cpp Wed Jul 25 03:02:07 2007
@@ -68,7 +68,9 @@
   heap_verifier_metadata->new_objects_pool  = sync_pool_create();  
   heap_verifier_metadata->hashcode_pool_before_gc = sync_pool_create();
   heap_verifier_metadata->hashcode_pool_after_gc = sync_pool_create();
-  
+  heap_verifier_metadata->obj_with_fin_pool= sync_pool_create();
+  heap_verifier_metadata->finalizable_obj_pool= sync_pool_create();
+
   verifier_metadata = heap_verifier_metadata;
   heap_verifier->heap_verifier_metadata = heap_verifier_metadata;
   return;
@@ -90,6 +92,9 @@
   sync_pool_destruct(metadata->new_objects_pool);  
   sync_pool_destruct(metadata->hashcode_pool_before_gc);
   sync_pool_destruct(metadata->hashcode_pool_after_gc);
+  
+  sync_pool_destruct(metadata->obj_with_fin_pool);
+  sync_pool_destruct(metadata->finalizable_obj_pool);
 
   for(unsigned int i=0; i<metadata->num_alloc_segs; i++){
     assert(metadata->segments[i]);
@@ -160,9 +165,14 @@
   }
 }
 
-Pool* verifier_copy_pool_reverse_order(Pool* source_pool)
+void verifier_remove_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack)
+{
+  verifier_clear_pool(working_pool, free_pool, is_vector_stack);
+  sync_pool_destruct(working_pool);
+}
+
+void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool)
 {
-  Pool* dest_pool = sync_pool_create();
   pool_iterator_init(source_pool);
   Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
   
@@ -176,5 +186,33 @@
     pool_put_entry(dest_pool, dest_set);
     dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
   }
-  return dest_pool;
+  return ;
+}
+
+/*copy dest pool to source pool, ignore NULL slot*/
+void verifier_copy_pool(Pool* dest_pool, Pool* source_pool)
+{
+  Pool* temp_pool = sync_pool_create();
+  
+  Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
+  pool_iterator_init(source_pool);
+  while(Vector_Block *source_set = pool_iterator_next(source_pool)){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set);
+    while( !vector_block_iterator_end(source_set, iter)){
+      assert(!vector_block_is_full(dest_set));
+      if(*iter)  vector_block_add_entry(dest_set, *iter);
+      iter = vector_block_iterator_advance(source_set, iter);
+    }
+    pool_put_entry(temp_pool, dest_set);
+    dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
+  }
+  
+  dest_set = NULL;
+  pool_iterator_init(temp_pool);
+  while(dest_set = pool_iterator_next(temp_pool)){
+    pool_put_entry(dest_pool, dest_set);
+  }
+  
+  sync_pool_destruct(temp_pool);
+  return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.h Wed Jul 25 03:02:07 2007
@@ -46,6 +46,9 @@
   Pool* hashcode_pool_after_gc;
 
   Pool* new_objects_pool;
+
+  Pool* obj_with_fin_pool;
+  Pool* finalizable_obj_pool;
 } Heap_Verifier_Metadata;
 
 extern Heap_Verifier_Metadata* verifier_metadata;
@@ -56,7 +59,10 @@
 Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool);
 
 void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack);
-Pool* verifier_copy_pool_reverse_order(Pool* source_pool);
+void verifier_remove_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack);
+void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool);
+void verifier_copy_pool(Pool* dest_pool, Pool* source_pool);
+
 
 inline Vector_Block* verifier_free_set_pool_get_entry(Pool* free_pool)
 {

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp Wed Jul 25 03:02:07 2007
@@ -171,6 +171,7 @@
       /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/
       //assert(p_obj != NULL);  
       if(p_obj == NULL) continue;
+      if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue;
       verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
     } 
     obj_set = pool_iterator_next(obj_set_pool);
@@ -203,21 +204,23 @@
 void verifier_scan_resurrect_objects(Heap_Verifier* heap_verifier)
 {
   GC_Gen* gc    =  (GC_Gen*)heap_verifier->gc;
+  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
   verifier_update_info_before_resurrect(heap_verifier);
 #ifndef BUILD_IN_REFERENT
   heap_verifier->gc_verifier->is_tracing_resurrect_obj = TRUE;
   if(heap_verifier->is_before_gc){
-    verifier_trace_objsets(heap_verifier, gc->finref_metadata->obj_with_fin_pool);
+    verifier_copy_pool(verifier_metadata->obj_with_fin_pool, gc->finref_metadata->obj_with_fin_pool);
+    verifier_trace_objsets(heap_verifier, verifier_metadata->obj_with_fin_pool);
   }else{
 	  if(!heap_verifier->gc_verifier->is_before_fallback_collection){
       verify_live_finalizable_obj(heap_verifier, gc->finref_metadata->obj_with_fin_pool);
-      Pool* finalizable_obj_pool = verifier_copy_pool_reverse_order(gc->finref_metadata->finalizable_obj_pool);
-      verifier_trace_objsets(heap_verifier, finalizable_obj_pool);
-      verifier_clear_pool(finalizable_obj_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE);
-      sync_pool_destruct(finalizable_obj_pool);
+      verifier_copy_pool_reverse_order(verifier_metadata->finalizable_obj_pool, gc->finref_metadata->finalizable_obj_pool);
+      verifier_trace_objsets(heap_verifier, verifier_metadata->finalizable_obj_pool);
+      verifier_clear_pool(verifier_metadata->finalizable_obj_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE);
     }else{
-      verifier_trace_objsets(heap_verifier, gc->finref_metadata->obj_with_fin_pool);	
+      verifier_trace_objsets(heap_verifier, verifier_metadata->obj_with_fin_pool );	
     }
+    verifier_clear_pool(verifier_metadata->obj_with_fin_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE);
   }
   heap_verifier->gc_verifier->is_tracing_resurrect_obj = FALSE;
   verifier_update_info_after_resurrect(heap_verifier);
@@ -225,10 +228,14 @@
 }
 
 void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier);
-
+void verifier_scan_prepare()
+{ 
+  verifier_reset_hash_distance(); 
+}
 void verifier_scan_live_objects(Heap_Verifier* heap_verifier)
 {
   Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
+  verifier_scan_prepare();
   verifier_trace_rootsets(heap_verifier, verifier_metadata->root_set_pool);
   verifier_scan_resurrect_objects(heap_verifier);
   verifier_scan_unreachable_objects(heap_verifier);
@@ -413,6 +420,7 @@
   heap_verifier->live_obj_scanner = verifier_scan_live_objects;
   heap_verifier->all_obj_scanner   = verifier_scan_all_objects;
 }
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp Wed Jul 25 03:02:07 2007
@@ -73,6 +73,7 @@
 
 void verify_live_finalizable_obj(Heap_Verifier* heap_verifier, Pool* live_finalizable_objs_pool)
 {
+  if(heap_verifier->gc_is_gen_mode) return;
   pool_iterator_init(live_finalizable_objs_pool);
   Vector_Block* live_fin_objs = pool_iterator_next(live_finalizable_objs_pool);
   while(live_fin_objs){
@@ -255,19 +256,21 @@
   return obj_hash_info;
 }
 #else 
+#define GCGEN_HASH_MASK 0x1fc
 inline Object_Hashcode_Inform* verifier_copy_hashcode(Partial_Reveal_Object* p_obj, Heap_Verifier* heap_verifier, Boolean is_before_gc)  
 {
   hash_obj_distance ++;
   
-  if(!hashcode_is_set(p_obj))  return NULL;
+  Obj_Info_Type info = get_obj_info_raw(p_obj);
+
+  int hash = info & GCGEN_HASH_MASK;
+
+  if(!hash)  return NULL;
 
   GC_Verifier* gc_verifier = heap_verifier->gc_verifier;  
   if(is_before_gc) gc_verifier->num_hash_before_gc++;
   else gc_verifier->num_hash_after_gc++;
 
-  Obj_Info_Type info = get_obj_info_raw(p_obj);
-
-  int hash = info & GCGEN_HASH_MASK;
   unsigned int size = sizeof(Object_Hashcode_Inform);
   Object_Hashcode_Inform* obj_hash_info = (Object_Hashcode_Inform*) STD_MALLOC(size);
   assert(obj_hash_info);
@@ -525,6 +528,9 @@
   verify_gc_reset(heap_verifier);  
   verifier_set_fallback_collection(heap_verifier->gc_verifier, FALSE);  
 }
+
+void verifier_reset_hash_distance()
+{ hash_obj_distance = 0;}
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h Wed Jul 25 03:02:07 2007
@@ -85,7 +85,7 @@
 
 void verify_gc_effect(Heap_Verifier* heap_verifier);
 
-
+void verifier_reset_hash_distance();
 
 inline unsigned int verifier_get_gc_collect_kind(GC_Verifier* gc_verifier)
 {  return gc_verifier->gc_collect_kind;  }
@@ -96,4 +96,5 @@
 {  gc_verifier->is_before_fallback_collection = is_before_fallback;  }
 
 #endif
+
 



Mime
View raw message