harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r597313 [1/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ jni/ mark_compact/ mark_sweep/ thread/ trace_forward/ utils/ verify/
Date Thu, 22 Nov 2007 06:40:40 GMT
Author: xli
Date: Wed Nov 21 22:40:31 2007
New Revision: 597313

URL: http://svn.apache.org/viewvc?rev=597313&view=rev
Log:
 HARMONY-4325 : [drlvm][gc] Tick project concurrent GC improvement

Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h Wed Nov 21 22:40:31 2007
@@ -74,13 +74,12 @@
   }
 
   /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    REF* p_ref = (REF*)offset_get_ref(offset_scanner, p_obj);
-    if (p_ref == NULL) break; /* terminating ref slot */
-  
-    slot_fix(p_ref);
-    offset_scanner = offset_next_ref(offset_scanner);
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+            
+  for(unsigned int i=0; i<num_refs; i++){
+     REF* p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);        
+     slot_fix(p_ref);  
   }
 
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Wed Nov 21 22:40:31 2007
@@ -371,6 +371,8 @@
     gc_finish_concurrent_mark(gc);
   }
   
+  gc->in_collection = TRUE;
+  
   /* this has to be done after all mutators are suspended */
   gc_reset_mutator_context(gc);
   
@@ -382,15 +384,8 @@
   gc_ms_reclaim_heap((GC_MS*)gc);
 #endif
 
-  /* FIXME:: clear root set here to support verify. */
-#ifdef COMPRESS_REFERENCE
-  gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool);
-#endif
-
   gc_reset_interior_pointer_table();
   
-  gc_metadata_verify(gc, FALSE);
-  
   collection_end_time = time_now(); 
 
 #ifndef USE_MARK_SWEEP_GC
@@ -411,6 +406,11 @@
     gc_start_concurrent_mark(gc);
 #endif
 
+  /* Clear rootset pools here rather than in each collection algorithm */
+  gc_clear_rootset(gc);
+  
+  gc_metadata_verify(gc, FALSE);
+  
   if(!IGNORE_FINREF ){
     INFO2("gc.process", "GC: finref process after collection ...\n");
     gc_put_finref_to_vm(gc);
@@ -431,6 +431,8 @@
   if(USE_CONCURRENT_GC) gc_update_collection_scheduler(gc, mutator_time, mark_time);
   
   vm_reclaim_native_objs();
+  gc->in_collection = FALSE;
+  
   vm_resume_threads_after();
   assert(hythread_is_suspend_enabled());
   hythread_set_suspend_disable(disable_count);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Wed Nov 21 22:40:31 2007
@@ -224,19 +224,6 @@
   return iterator+1;
 }
 
-/* original design */
-inline int *init_object_scanner (Partial_Reveal_Object *obj) 
-{
-  GC_VTable_Info *gcvt = obj_get_gcvt(obj);  
-  return gcvt->gc_ref_offset_array;
-}
-
-inline void *offset_get_ref(int *offset, Partial_Reveal_Object *obj) 
-{    return (*offset == 0)? NULL: (void*)((Byte*) obj + *offset); }
-
-inline int *offset_next_ref (int *offset) 
-{  return offset + 1; }
-
 /****************************************/
 
 inline Boolean obj_is_marked_in_vt(Partial_Reveal_Object *obj) 
@@ -402,13 +389,13 @@
 struct Collection_Scheduler;
 
 typedef struct GC{
-//heap allocation bases for a segmented heap
-  void* alloc_heap_start[3];
+  void* physical_start;
   void* heap_start;
   void* heap_end;
   POINTER_SIZE_INT reserved_heap_size;
   POINTER_SIZE_INT committed_heap_size;
   unsigned int num_collections;
+  Boolean in_collection;
   int64 time_collections;
   float survive_ratio;
   
@@ -439,7 +426,7 @@
   
   /* FIXME:: this is wrong! root_set belongs to mutator */
   Vector_Block* root_set;
-  Vector_Block* weak_root_set;
+  Vector_Block* weakroot_set;
   Vector_Block* uncompressed_root_set;
 
   Space_Tuner* tuner;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h Wed Nov 21 22:40:31 2007
@@ -28,7 +28,6 @@
 #ifndef FORCE_INLINE
 #define FORCE_INLINE inline
 #endif
-
 /* CONST_MARK_BIT is used in mark_scan in vt, no matter MARK_BIT_FLIPPING used or not. 
    MARK_BIT_FLIPPING is used in oi for marking and forwarding in non-gen nursery forwarding
    (the marking is for those objects not in nos.)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Wed Nov 21 22:40:31 2007
@@ -77,7 +77,7 @@
   gc_metadata.collector_remset_pool = sync_pool_create();
   gc_metadata.collector_repset_pool = sync_pool_create();
   gc_metadata.dirty_obj_snaptshot_pool = sync_pool_create();
-  gc_metadata.weak_roots_pool = sync_pool_create();
+  gc_metadata.weakroot_pool = sync_pool_create();
 #ifdef USE_32BITS_HASHCODE  
   gc_metadata.collector_hashcode_pool = sync_pool_create();
 #endif
@@ -100,7 +100,7 @@
   sync_pool_destruct(metadata->collector_remset_pool);
   sync_pool_destruct(metadata->collector_repset_pool);
   sync_pool_destruct(metadata->dirty_obj_snaptshot_pool);
-  sync_pool_destruct(metadata->weak_roots_pool);
+  sync_pool_destruct(metadata->weakroot_pool);
 #ifdef USE_32BITS_HASHCODE  
   sync_pool_destruct(metadata->collector_hashcode_pool);
 #endif
@@ -168,7 +168,7 @@
 
 extern Boolean IS_MOVE_COMPACT;
 
-static void gc_update_repointed_sets(GC* gc, Pool* pool)
+static void gc_update_repointed_sets(GC* gc, Pool* pool, Boolean double_fix)
 {
   GC_Metadata* metadata = gc->metadata;
   
@@ -200,8 +200,16 @@
            * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
            * since those which can be scanned in MOS & NOS must have been set fw bit in oi.
            */
-          assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc));
-          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+          p_obj = obj_get_fw_in_oi(p_obj);
+          assert(address_belongs_to_gc_heap(p_obj, gc));
+          /* Only major collection in MS Gen GC might need double_fix.
+           * Double fixing happens when both forwarding and compaction happen.
+           */
+          if(double_fix && obj_is_fw_in_oi(p_obj)){
+            p_obj = obj_get_fw_in_oi(p_obj);
+            assert(address_belongs_to_gc_heap(p_obj, gc));
+          }
+          write_slot(p_ref, p_obj);
         }
       }
     }
@@ -211,22 +219,18 @@
   return;
 }
 
-void gc_fix_rootset(Collector* collector)
-{  
-  GC* gc = collector->gc;  
-  GC_Metadata* metadata = gc->metadata;
+void gc_fix_rootset(Collector* collector, Boolean double_fix)
+{
+  GC* gc = collector->gc;
 
-  gc_update_weak_roots_pool(gc);
+  gc_update_weak_roots(gc, double_fix);
 
   /* MINOR_COLLECTION doesn't need rootset update, but need reset */
   if( !gc_match_kind(gc, MINOR_COLLECTION)){
-    gc_update_repointed_sets(gc, metadata->gc_rootset_pool);
+    gc_update_repointed_sets(gc, gc->metadata->gc_rootset_pool, double_fix);
 #ifndef BUILD_IN_REFERENT
-    gc_update_finref_repointed_refs(gc);
+    gc_update_finref_repointed_refs(gc, double_fix);
 #endif
-  } else {
-    gc_set_pool_clear(metadata->gc_rootset_pool);
-    gc_set_pool_clear(metadata->weak_roots_pool);
   }
 
 #ifdef COMPRESS_REFERENCE
@@ -263,11 +267,11 @@
      only after we know we are not going to fallback. */
     // gc->root_set = NULL;
 
-  if(vector_block_is_empty(gc->weak_root_set))
-    pool_put_entry(free_set_pool, gc->weak_root_set);
+  if(vector_block_is_empty(gc->weakroot_set))
+    pool_put_entry(free_set_pool, gc->weakroot_set);
   else
-    pool_put_entry(metadata->weak_roots_pool, gc->weak_root_set);
-  gc->weak_root_set = NULL;
+    pool_put_entry(metadata->weakroot_pool, gc->weakroot_set);
+  gc->weakroot_set = NULL;
   
   if(!gc_is_gen_mode()) return;
 
@@ -338,10 +342,10 @@
   gc->root_set = free_set_pool_get_entry(&gc_metadata);
   assert(vector_block_is_empty(gc->root_set));
 
-  assert(pool_is_empty(gc_metadata.weak_roots_pool));
-  assert(gc->weak_root_set == NULL);
-  gc->weak_root_set = free_set_pool_get_entry(&gc_metadata);
-  assert(vector_block_is_empty(gc->weak_root_set));
+  assert(pool_is_empty(gc_metadata.weakroot_pool));
+  assert(gc->weakroot_set == NULL);
+  gc->weakroot_set = free_set_pool_get_entry(&gc_metadata);
+  assert(vector_block_is_empty(gc->weakroot_set));
 
 #ifdef COMPRESS_REFERENCE
   assert(pool_is_empty(gc_metadata.gc_uncompressed_rootset_pool));
@@ -357,6 +361,7 @@
 {
   gc_reset_interior_pointer_table();
   gc_set_pool_clear(gc->metadata->gc_rootset_pool);
+  gc_set_pool_clear(gc->metadata->weakroot_pool);
 #ifdef COMPRESS_REFERENCE
   gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool);
 #endif
@@ -462,6 +467,7 @@
 
   
 }
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h Wed Nov 21 22:40:31 2007
@@ -44,7 +44,7 @@
   Pool* mutator_remset_pool; /* list of remsets generated by app during execution */
   Pool* collector_remset_pool; /* list of remsets generated by gc during collection */
   Pool* collector_repset_pool; /* list of repointed ref slot sets */
-  Pool* weak_roots_pool; /* list of short weak roots */
+  Pool* weakroot_pool; /* list of short weak roots */
 #ifdef USE_32BITS_HASHCODE
   Pool* collector_hashcode_pool;
 #endif
@@ -62,12 +62,12 @@
 void gc_set_rootset(GC* gc);
 void gc_reset_rootset(GC* gc);
 void gc_clear_rootset(GC* gc);
-void gc_fix_rootset(Collector* collector);
+void gc_fix_rootset(Collector* collector, Boolean double_fix);
 void gc_clear_remset(GC* gc);
 void gc_reset_snaptshot(GC* gc);
 
-void identify_dead_weak_roots(GC *gc, Pool *pool);
-void gc_update_weak_roots_pool(GC *gc);
+void gc_identify_dead_weak_roots(GC *gc);
+void gc_update_weak_roots(GC *gc, Boolean double_fix);
 
 void gc_clear_remset(GC* gc);
 
@@ -122,7 +122,7 @@
   return block;
 }
 
-inline void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
+inline void mutator_remset_add_entry(Mutator* mutator, REF* p_ref)
 {
   assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
 
@@ -214,14 +214,14 @@
   //assert(is_short_weak == FALSE); //Currently no need for short_weak_roots
   assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); 
   
-  Vector_Block* weak_root_set = gc->weak_root_set;
-  vector_block_add_entry(weak_root_set, (POINTER_SIZE_INT)p_ref);
+  Vector_Block* weakroot_set = gc->weakroot_set;
+  vector_block_add_entry(weakroot_set, (POINTER_SIZE_INT)p_ref);
   
-  if( !vector_block_is_full(weak_root_set)) return;
+  if( !vector_block_is_full(weakroot_set)) return;
 
-  pool_put_entry(gc_metadata.weak_roots_pool, weak_root_set);
-  gc->weak_root_set = free_set_pool_get_entry(&gc_metadata);  
-  assert(gc->weak_root_set);
+  pool_put_entry(gc_metadata.weakroot_pool, weakroot_set);
+  gc->weakroot_set = free_set_pool_get_entry(&gc_metadata);  
+  assert(gc->weakroot_set);
 }
 
 #ifdef COMPRESS_REFERENCE

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h Wed Nov 21 22:40:31 2007
@@ -108,10 +108,14 @@
 
 inline int vm_create_thread(int (*func)(void*), void *data)
 { 
-  hythread_t ret_thread = (hythread_t)STD_CALLOC(1, hythread_get_struct_size());;
+  hythread_t ret_thread = (hythread_t)STD_CALLOC(1,hythread_get_struct_size());
   assert(ret_thread);
-  return (int)hythread_create_ex(ret_thread, get_gc_thread_group(), 0, 0, NULL,
-                    (hythread_entrypoint_t)func, data);
+  
+  UDATA stacksize = 0;
+  UDATA priority = 5;
+  
+  return (int)hythread_create_ex(ret_thread, get_gc_thread_group(), stacksize, priority, NULL,
+                              (hythread_entrypoint_t)func, data);
 }
 
 inline void *atomic_casptr(volatile void **mem, void *with, const void *cmp) 
@@ -187,8 +191,8 @@
   if(result == 0) result = TRUE;
   else result = FALSE;  
 #endif /* ifdef _WINDOWS_ else */
-//assert that memory was released
-  assert(result);
+
+  assert(result); /* expect that memory was really released */
   return result;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h Wed Nov 21 22:40:31 2007
@@ -65,7 +65,7 @@
 struct Allocator;
 typedef void *(*Space_Alloc_Func)(unsigned, Allocator *);
 
-inline POINTER_SIZE_INT space_committed_size(Space* space){ return space->committed_heap_size;}
+inline POINTER_SIZE_INT space_committed_size(Space* space){ return space ? space->committed_heap_size : 0; }
 inline void* space_heap_start(Space* space){ return space->heap_start; }
 inline void* space_heap_end(Space* space){ return space->heap_end; }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp Wed Nov 21 22:40:31 2007
@@ -72,8 +72,7 @@
 
   }else{ /* scan non-array object */
     
-    unsigned int num_refs = object_ref_field_num(p_obj);
-    
+    unsigned int num_refs = object_ref_field_num(p_obj);    
     int* ref_iterator = object_ref_iterator_init(p_obj);
     
     for(unsigned int i=0; i<num_refs; i++){  

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h Wed Nov 21 22:40:31 2007
@@ -22,6 +22,7 @@
 #include "../mark_sweep/gc_ms.h"
 #include "../mark_sweep/sspace_mark_sweep.h"
 
+
 inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj)
 {
   /*

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp Wed Nov 21 22:40:31 2007
@@ -19,10 +19,12 @@
 #include "gc_metadata.h"
 #include "object_status.h"
 
-void identify_dead_weak_roots(GC *gc, Pool *pool)
+void gc_identify_dead_weak_roots(GC *gc)
 {
-  pool_iterator_init(pool);
-  while(Vector_Block *block = pool_iterator_next(pool)){
+  Pool *weakroot_pool = gc->metadata->weakroot_pool;
+  
+  pool_iterator_init(weakroot_pool);
+  while(Vector_Block *block = pool_iterator_next(weakroot_pool)){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)*iter;
@@ -49,31 +51,42 @@
 extern Boolean IS_MOVE_COMPACT;
 
 /* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
-void gc_update_weak_roots_pool(GC *gc)
+void gc_update_weak_roots(GC *gc, Boolean double_fix)
 {
   GC_Metadata* metadata = gc->metadata;
-  Pool *pool = metadata->weak_roots_pool;
+  Pool *weakroot_pool = metadata->weakroot_pool;
   Partial_Reveal_Object** p_ref;
   Partial_Reveal_Object *p_obj;
   
-  pool_iterator_init(pool);
-  while(Vector_Block *repset = pool_iterator_next(pool)){
+  pool_iterator_init(weakroot_pool);
+  while(Vector_Block *repset = pool_iterator_next(weakroot_pool)){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
     for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
       p_ref = (Partial_Reveal_Object**)*iter;
       p_obj = *p_ref;
-      if(!p_obj){  // reference has been cleared
+      if(!p_obj || !obj_need_move(gc, p_obj)){  // reference has been cleared or not moved
         continue;
       }
 
-      if(obj_need_move(gc, p_obj))  {
-        if(!IS_MOVE_COMPACT){
-          assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS);
-          *p_ref = obj_get_fw_in_oi(p_obj);
-        } else {
-          assert(space_of_addr(gc, (void*)p_obj)->move_object);
-          *p_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_obj));
+      if(IS_MOVE_COMPACT){
+        assert(space_of_addr(gc, p_obj)->move_object);
+        *p_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_obj));
+      } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
+        if(obj_is_fw_in_oi(p_obj)){
+          p_obj = obj_get_fw_in_oi(p_obj);
+          /* Only major collection in MS Gen GC might need double_fix.
+           * Double fixing happens when both forwarding and compaction happen.
+           */
+          if(double_fix && obj_is_fw_in_oi(p_obj)){
+            assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+            p_obj = obj_get_fw_in_oi(p_obj);
+            assert(address_belongs_to_gc_heap(p_obj, gc));
+          }
+          *p_ref = p_obj;
         }
+      } else {
+        assert(obj_is_fw_in_oi(p_obj));
+        *p_ref = obj_get_fw_in_oi(p_obj);
       }
     }
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Wed Nov 21 22:40:31 2007
@@ -112,6 +112,7 @@
 extern void trace_obj_in_nongen_fw(Collector *collector, void *p_ref);
 extern void trace_obj_in_normal_marking(Collector *collector, void *p_obj);
 extern void trace_obj_in_fallback_marking(Collector *collector, void *p_ref);
+extern void trace_obj_in_ms_fallback_marking(Collector *collector, void *p_ref);
 extern void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj);
 extern void trace_obj_in_ms_marking(Collector *collector, void *p_obj);
 
@@ -151,11 +152,14 @@
       }
     } else if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP){
       trace_object = trace_obj_in_ms_marking;
-    } else {  
+    } else {
       trace_object = trace_obj_in_normal_marking;
     }
   } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
-    trace_object = trace_obj_in_fallback_marking;
+    if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP)
+      trace_object = trace_obj_in_ms_fallback_marking;
+    else
+      trace_object = trace_obj_in_fallback_marking;
   } else {
     assert(gc_match_kind(gc, MARK_SWEEP_GC));
     p_ref_or_obj = p_obj;
@@ -674,7 +678,7 @@
  * 1. ms with compaction
  * 2. ms as a mos collection algorithm
  */
-static inline void moving_mark_sweep_update_ref(GC *gc, REF *p_ref)
+static inline void moving_mark_sweep_update_ref(GC *gc, REF *p_ref, Boolean double_fix)
 {
   /* There are only two kinds of p_ref being added into finref_repset_pool:
    * 1. p_ref is in a vector block from one finref pool;
@@ -687,6 +691,14 @@
     Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
     if(obj_is_fw_in_oi(p_old_ref)){
       Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref);
+      /* Only major collection in MS Gen GC might need double_fix.
+       * Double fixing happens when both forwarding and compaction happen.
+       */
+      if(double_fix && obj_is_fw_in_oi(p_new_ref)){
+        assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+        p_new_ref = obj_get_fw_in_oi(p_new_ref);
+        assert(address_belongs_to_gc_heap(p_new_ref, gc));
+      }
       p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
     }
   }
@@ -697,13 +709,22 @@
    * so this assertion will fail.
    * But for sure p_obj here must be an one needing moving.
    */
-  write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+  p_obj = obj_get_fw_in_oi(p_obj);
+  /* Only major collection in MS Gen GC might need double_fix.
+   * Double fixing happens when both forwarding and compaction happen.
+   */
+  if(double_fix && obj_is_fw_in_oi(p_obj)){
+    assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(address_belongs_to_gc_heap(p_obj, gc));
+  }
+  write_slot(p_ref, p_obj);
 }
 
 extern Boolean IS_MOVE_COMPACT;
 
 /* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
-static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool)
+static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
   REF *p_ref;
@@ -725,7 +746,7 @@
         move_compaction_update_ref(gc, p_ref);
       } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
         if(obj_is_fw_in_oi(p_obj))
-          moving_mark_sweep_update_ref(gc, p_ref);
+          moving_mark_sweep_update_ref(gc, p_ref, double_fix);
       } else {
         assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
         write_slot(p_ref , obj_get_fw_in_oi(p_obj));
@@ -735,7 +756,7 @@
   }
 }
 
-void gc_update_finref_repointed_refs(GC *gc)
+void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix)
 {
   assert(!gc_match_kind(gc, MINOR_COLLECTION));
   
@@ -743,10 +764,10 @@
   Pool *repset_pool = metadata->repset_pool;
   Pool *fallback_ref_pool = metadata->fallback_ref_pool;
   
-  nondestructively_fix_finref_pool(gc, repset_pool, TRUE);
+  nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix);
   if(!pool_is_empty(fallback_ref_pool)){
     assert(IS_FALLBACK_COMPACTION);
-    nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE);
+    nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix);
   }
 }
 
@@ -782,4 +803,6 @@
   finref_copy_pool(finalizable_obj_pool, finalizable_obj_pool_copy, gc);
   finref_copy_pool_to_rootset(gc, finalizable_obj_pool_copy);
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Wed Nov 21 22:40:31 2007
@@ -93,7 +93,7 @@
 extern void gc_put_finref_to_vm(GC *gc);
 extern void put_all_fin_on_exit(GC *gc);
 
-extern void gc_update_finref_repointed_refs(GC *gc);
+extern void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix);
 extern void gc_activate_finref_threads(GC *gc);
 
 void gc_copy_finaliable_obj_to_rootset(GC *gc);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp Wed Nov 21 22:40:31 2007
@@ -39,7 +39,7 @@
     Mutator *mutator = (Mutator *)gc_get_tls();
     assert( addr_belongs_to_nos(p_target) && !addr_belongs_to_nos(p_slot)); 
             
-    mutator_remset_add_entry(mutator, (Partial_Reveal_Object**)p_slot);
+    mutator_remset_add_entry(mutator, (REF*)p_slot);
   }
   return;
 }
@@ -51,7 +51,7 @@
 
   Mutator *mutator = (Mutator *)gc_get_tls();
   
-  Partial_Reveal_Object **p_slot; 
+  REF* p_slot; 
   /* scan array object */
   if (object_is_array((Partial_Reveal_Object*)p_object)) {
     Partial_Reveal_Object* array = (Partial_Reveal_Object*)p_object;
@@ -59,8 +59,8 @@
     
     int32 array_length = vector_get_length((Vector_Handle) array);
     for (int i = 0; i < array_length; i++) {
-      p_slot = (Partial_Reveal_Object **)vector_get_element_address_ref((Vector_Handle) array, i);
-      if( *p_slot != NULL && addr_belongs_to_nos(*p_slot)){
+      p_slot = (REF*)vector_get_element_address_ref((Vector_Handle) array, i);
+      if( read_slot(p_slot) != NULL && addr_belongs_to_nos(read_slot(p_slot))){
         mutator_remset_add_entry(mutator, p_slot);
       }
     }   
@@ -69,14 +69,14 @@
 
   /* scan non-array object */
   Partial_Reveal_Object* p_obj =  (Partial_Reveal_Object*)p_object;   
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    p_slot = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
-    if (p_slot == NULL) break;  
-    if( addr_belongs_to_nos(*p_slot)){
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+            
+  for(unsigned int i=0; i<num_refs; i++){
+    p_slot = object_ref_iterator_get(ref_iterator+i, p_obj);        
+    if( addr_belongs_to_nos(read_slot(p_slot))){
       mutator_remset_add_entry(mutator, p_slot);
     }
-    offset_scanner = offset_next_ref(offset_scanner);
   }
 
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Wed Nov 21 22:40:31 2007
@@ -79,7 +79,7 @@
 
 void *alloc_large_pages(size_t size, const char *hint);
 
-void gc_gen_initial_verbose_info(GC_Gen *gc);
+void gc_gen_init_verbose(GC_Gen *gc);
 void gc_gen_initialize(GC_Gen *gc_gen, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size)
 {
   TRACE2("gc.process", "GC: GC_Gen heap init ... \n");
@@ -123,8 +123,7 @@
   void *reserved_base;
   void *reserved_end;
   void *nos_base;
-  // allocation base
-  void* alloc_reserved_base;
+  void* physical_start;
 
 #ifdef STATIC_NOS_MAPPING
 
@@ -138,27 +137,21 @@
     DIE2("gc.base","Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.");
     exit(0);
   }
-  //set the allocation heap base for the nos segment
-  gc_gen->alloc_heap_start[0] = nos_base;
   reserved_end = (void*)((POINTER_SIZE_INT)nos_base + nos_reserve_size);
   
   void *los_mos_base = (void*)((POINTER_SIZE_INT)nos_base - los_mos_reserve_size);
   assert(!((POINTER_SIZE_INT)los_mos_base % SPACE_ALLOC_UNIT));
-  
-  alloc_reserved_base = vm_reserve_mem(los_mos_base, los_mos_reserve_size);
-  
-  while( !alloc_reserved_base || alloc_reserved_base >= nos_base){
+  reserved_base = vm_reserve_mem(los_mos_base, los_mos_reserve_size);
+  while( !reserved_base || reserved_base >= nos_base){
     los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT);
     if(los_mos_base < RESERVE_BOTTOM){
       DIE2("gc.base","Static NOS mapping: Can't reserve memory at address"<<reserved_base<<" for specified size "<<los_mos_size);
       exit(0);      
     }
-	
-    alloc_reserved_base = vm_reserve_mem(los_mos_base, los_mos_reserve_size);
+    reserved_base = vm_reserve_mem(los_mos_base, los_mos_reserve_size);
   }
-	//set the allocation heap base for mos+los
-	gc_gen->alloc_heap_start[1] = alloc_reserved_base;
-	reserved_base = alloc_reserved_base;
+  physical_start = reserved_base;
+  
 #else /* NON_STATIC_NOS_MAPPING */
 
   reserved_base = NULL;
@@ -175,18 +168,15 @@
   
   if(reserved_base == NULL){
     Boolean max_size_reduced = FALSE;
-	
-
-    alloc_reserved_base = vm_reserve_mem(NULL, max_heap_size + SPACE_ALLOC_UNIT);
-	
-    while( !alloc_reserved_base ){
+    reserved_base = vm_reserve_mem(NULL, max_heap_size + SPACE_ALLOC_UNIT);
+    while( !reserved_base ){
       max_size_reduced = TRUE;
       max_heap_size -= SPACE_ALLOC_UNIT;
-	  
-      alloc_reserved_base = vm_reserve_mem(NULL, max_heap_size + SPACE_ALLOC_UNIT);
+      reserved_base = vm_reserve_mem(NULL, max_heap_size + SPACE_ALLOC_UNIT);
     }
-	//set the allocation heap base for the contiguous heap
-    gc_gen->alloc_heap_start[0] = alloc_reserved_base;
+    
+    physical_start = reserved_base;
+    
     if(max_size_reduced){
       WARN2("gc.base","Max heap size: can't be reserved, reduced to "<< max_heap_size/MB<<" MB according to virtual memory limitation.");
     }
@@ -195,8 +185,7 @@
       DIE2("gc.base","Heap size: invalid, please reimput a smaller \"ms\" paramenter!");
       exit(0);
     }
-	
-    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)alloc_reserved_base, SPACE_ALLOC_UNIT);
+    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
     assert(!((POINTER_SIZE_INT)reserved_base % SPACE_ALLOC_UNIT));
   }
   
@@ -210,10 +199,9 @@
 
   HEAP_NULL = (POINTER_SIZE_INT)reserved_base;
   
+  gc_gen->physical_start = physical_start;
   gc_gen->heap_start = reserved_base;
   gc_gen->heap_end = reserved_end;
-  
-  
 #ifdef STATIC_NOS_MAPPING
   gc_gen->reserved_heap_size = los_mos_reserve_size + nos_reserve_size;
 #else
@@ -255,63 +243,35 @@
   gc_gen_stats_initialize(gc_gen);
 #endif
 
-  gc_gen_initial_verbose_info(gc_gen);
+  gc_gen_init_verbose(gc_gen);
   return;
 }
 
 void gc_gen_destruct(GC_Gen *gc_gen)
 {
   TRACE2("gc.process", "GC: GC_Gen heap destruct ......");
-
-  POINTER_SIZE_INT unmap_size, mos_los_size;
-
-  unmap_size = 0;
-  mos_los_size = 0;
-  Space *nos = gc_gen->nos;
-  gc_nos_destruct(gc_gen);
-
-  // if nos is statically mapped, unmap nos
-  #ifdef STATIC_NOS_MAPPING
-	vm_unmap_mem(gc_gen->alloc_heap_start[0], space_committed_size(nos));
-  #else
-	unmap_size = space_committed_size(nos);
-  #endif
-  gc_gen->nos = NULL;
   
-  Space *mos = gc_gen->mos;
+  gc_nos_destruct(gc_gen);
   gc_mos_destruct(gc_gen);
-  #ifndef STATIC_NOS_MAPPING
-	unmap_size = unmap_size + space_committed_size(mos);
-  #else
-	mos_los_size = space_committed_size(mos);
-  #endif
-
-  gc_gen->mos = NULL;
-  
+  POINTER_SIZE_INT los_size = 0;
   if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
-    Space *los = gc_gen->los;
+    los_size = space_committed_size((Space*)gc_gen->los);
     gc_los_destruct(gc_gen);
-	//if nos is static mapped, unmap nos+los 
-	#ifdef STATIC_NOS_MAPPING
-		mos_los_size = mos_los_size + space_committed_size(los);
-		vm_unmap_mem(gc_gen->alloc_heap_start[1], mos_los_size);
-	#else
-		unmap_size = unmap_size + space_committed_size(los);
-	#endif
-    gc_gen->los = NULL;
-  }
-  else
-  {
-    // unmap the mos
-	#ifdef STATIC_NOS_MAPPING
-		vm_unmap_mem(gc_gen->alloc_heap_start[1], mos_los_size);
-	#endif
   }
 
- //unmap whole heap if nos is not statically mapped
 #ifndef STATIC_NOS_MAPPING
-  vm_unmap_mem(gc_gen->alloc_heap_start[0], unmap_size);
-#endif
+  /* without static mapping, the heap is release as a whole. 
+     We cannot use reserve_heap_size because perhaps only part of it is committed.  */
+  vm_unmap_mem(gc_gen->physical_start, gc_gen_total_memory_size(gc_gen));
+
+#else  /* otherwise, release the spaces separately */
+
+  int mos_size = space_committed_size((Space*)gc_gen->mos);
+  int nos_size = space_committed_size((Space*)gc_gen->nos);
+  vm_unmap_mem(gc_gen->physical_start, los_size + mos_size);  /* los+mos */
+  vm_unmap_mem(nos_boundary, nos_size);  /* nos */
+
+#endif /* !STATIC_NOS_MAPPING */
 
 #ifdef GC_GEN_STATS
   gc_gen_stats_destruct(gc_gen);
@@ -459,7 +419,7 @@
     
     }else if(!strcmp(major_algo, "MAJOR_MARK_SWEEP")){
       MAJOR_ALGO = MAJOR_MARK_SWEEP;
-    
+      is_collector_local_alloc = FALSE;
     }else{
      WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
       
@@ -969,7 +929,7 @@
     <<"\nGC: NOS size: "<<verbose_print_size(gc->nos->committed_heap_size)<<", free size:"<<verbose_print_size(blocked_space_free_mem_size((Blocked_Space*)gc->nos))<<"\n");
 }
 
-inline void gc_gen_initial_verbose_info(GC_Gen *gc)
+inline void gc_gen_init_verbose(GC_Gen *gc)
 {
   INFO2("gc.base","GC_Gen initial:"
     <<"\nmax heap size: "<<verbose_print_size(max_heap_size_bytes)
@@ -993,7 +953,7 @@
   INFO2("gc.base", "GC: All Collection info: "
     <<"\nGC: total nos alloc obj size: "<<verbose_print_size(stats->total_size_nos_alloc)
     <<"\nGC: total los alloc obj num: "<<stats->obj_num_los_alloc
-    <<"\nGC: total nos alloc obj size:"<<verbose_print_size(stats->total_size_los_alloc)
+    <<"\nGC: total los alloc obj size:"<<verbose_print_size(stats->total_size_los_alloc)
     <<"\nGC: total collection num: "<<gc->num_collections
     <<"\nGC: minor collection num: "<<stats->num_minor_collections
     <<"\nGC: major collection num: "<<stats->num_major_collections
@@ -1001,6 +961,5 @@
     <<"\nGC: total appliction execution time: "<<stats->total_mutator_time<<"\n");
 #endif
 }
-
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Wed Nov 21 22:40:31 2007
@@ -57,13 +57,13 @@
 
 typedef struct GC_Gen {
   /* <-- First couple of fields overloaded as GC */
-  // heap allocation bases for segmented heap
-  void* alloc_heap_start[3];
+  void* physical_start;
   void* heap_start;
   void* heap_end;
   POINTER_SIZE_INT reserved_heap_size;
   POINTER_SIZE_INT committed_heap_size;
   unsigned int num_collections;
+  Boolean in_collection;
   int64 time_collections;
   float survive_ratio;  
   
@@ -94,7 +94,7 @@
 
   /* FIXME:: this is wrong! root_set belongs to mutator */
   Vector_Block* root_set;
-  Vector_Block* weak_root_set;
+  Vector_Block* weakroot_set;
   Vector_Block* uncompressed_root_set;
   
   //For_LOS_extend
@@ -134,7 +134,7 @@
 void gc_gen_destruct(GC_Gen *gc);
 void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time);
 void gc_gen_space_verbose_info(GC_Gen *gc);
-void gc_gen_initial_verbose_info(GC_Gen *gc);
+void gc_gen_init_verbose(GC_Gen *gc);
 void gc_gen_wrapup_verbose(GC_Gen* gc);
                         
 inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
@@ -201,7 +201,6 @@
 extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
-
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Wed Nov 21 22:40:31 2007
@@ -25,7 +25,7 @@
 
 #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<1)
 /*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/
-#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB)
+#define MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK (36*MB)
 
 struct Mspace;
 void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio);
@@ -246,11 +246,11 @@
       if(fspace->num_collections != 1) assert(minor_surviving_size == mspace->last_alloced_size);
   
       float k = Tslow * fspace->num_collections/fspace->time_collections;
-      float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
+      float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK ));
       float free_ratio_threshold = mini_free_ratio(k, m);
 
-      if(SMax > GC_MOS_MIN_EXTRA_REMAIN_SIZE)
-        free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+      if(SMax > MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK )
+        free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK  ) + MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK  );
       else
         free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * SMax);
 
@@ -310,9 +310,22 @@
   /*If total free is smaller than one block, there is no room for us to adjust*/
   if(total_free < GC_BLOCK_SIZE_BYTES)  return FALSE;
 
-  /* predict NOS + NOS*ratio = total_free_size */
+  /*To reserve some MOS space to avoid fallback situation. 
+   *But we need ensure nos has at least one block.
+   *We have such fomula here:
+   *NOS_SIZE + NOS_SIZE * anti_fall_back_ratio + NOS_SIZE * survive_ratio = TOTAL_FREE*/
+  POINTER_SIZE_INT anti_fallback_size_in_mos;
+  float ratio_of_anti_fallback_size_to_nos = 0.25f;
   POINTER_SIZE_INT nos_reserve_size;
-  nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio));
+  anti_fallback_size_in_mos = (POINTER_SIZE_INT)(((float)total_free * ratio_of_anti_fallback_size_to_nos)/(1.0f + ratio_of_anti_fallback_size_to_nos + fspace->survive_ratio));
+  if(anti_fallback_size_in_mos > MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK ){
+    /*If the computed anti_fallback_size_in_mos is too large, we reset it back to MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK .*/
+    anti_fallback_size_in_mos = MOS_EXTRA_REMAIN_SIZE_TO_ANTI_FALLBACK ;
+    /*Here, anti_fallback_size_in_mos must be smaller than TOTAL_FREE*/
+    nos_reserve_size = (POINTER_SIZE_INT)(((float)(total_free - anti_fallback_size_in_mos))/(1.0f + fspace->survive_ratio)); 
+  }else{
+    nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + ratio_of_anti_fallback_size_to_nos + fspace->survive_ratio));
+  }
   /*NOS should not be zero, if there is only one block in non-los, i.e. in the former if sentence,
     *if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero
     *and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/
@@ -321,16 +334,6 @@
 #ifdef STATIC_NOS_MAPPING
   if(nos_reserve_size > fspace->reserved_heap_size) nos_reserve_size = fspace->reserved_heap_size;
 #endif  
-  /*To reserve some MOS space to avoid fallback situation. 
-   *But we need ensure nos has at least one block */
-  POINTER_SIZE_INT reserve_in_mos = GC_MOS_MIN_EXTRA_REMAIN_SIZE;
-  while (reserve_in_mos >= GC_BLOCK_SIZE_BYTES){
-    if(nos_reserve_size >= reserve_in_mos + GC_BLOCK_SIZE_BYTES){
-      nos_reserve_size -= reserve_in_mos;    
-      break;
-    }
-    reserve_in_mos >>= 1;
-  }
 
   new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, GC_BLOCK_SIZE_BYTES); 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp Wed Nov 21 22:40:31 2007
@@ -37,9 +37,9 @@
     return (jint)tls_gc_offset;
 }
 
-JNIEXPORT jlong JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
+JNIEXPORT jobject JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
 {
-    return (jlong)nos_boundary;
+    return (jobject)nos_boundary;
 }
 
 JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Wed Nov 21 22:40:31 2007
@@ -79,13 +79,12 @@
   }
 
   /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    REF *p_ref = (REF*)offset_get_ref(offset_scanner, p_obj);
-    if (p_ref == NULL) break; /* terminating ref slot */
-  
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+            
+  for(unsigned int i=0; i<num_refs; i++){
+    REF* p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);        
     scan_slot(collector, p_ref);
-    offset_scanner = offset_next_ref(offset_scanner);
   }
 
 #ifndef BUILD_IN_REFERENT
@@ -232,6 +231,9 @@
   fspace_block_iterate_init((Fspace*)((GC_Gen*)collector->gc)->nos);
 }
 #endif
+
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Wed Nov 21 22:40:31 2007
@@ -161,15 +161,14 @@
   }
 
   /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    REF* p_ref = (REF*)offset_get_ref(offset_scanner, p_obj);
-    if (p_ref == NULL) break; /* terminating ref slot */
-  
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+            
+  for(unsigned int i=0; i<num_refs; i++){
+    REF* p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);        
     Partial_Reveal_Object*  p_element = read_slot(p_ref);
     if((p_element > start_address) && (p_element < end_address))
       write_slot(p_ref, (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff));
-    offset_scanner = offset_next_ref(offset_scanner);
   }
 
   return;
@@ -242,9 +241,9 @@
 #endif
   
 #ifndef BUILD_IN_REFERENT
-  gc_update_finref_repointed_refs(gc);
+  gc_update_finref_repointed_refs(gc, FALSE);
 #endif
-  gc_reupdate_repointed_sets(gc, gc->metadata->weak_roots_pool, start_address, end_address, addr_diff);
+  gc_reupdate_repointed_sets(gc, gc->metadata->weakroot_pool, start_address, end_address, addr_diff);
 
   update_rootset_interior_pointer();
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Wed Nov 21 22:40:31 2007
@@ -204,7 +204,7 @@
       gc_update_weakref_ignore_finref(gc);
     }
 #endif
-    identify_dead_weak_roots(gc, gc->metadata->weak_roots_pool);
+    gc_identify_dead_weak_roots(gc);
 
     /* let other collectors go */
     num_marking_collectors++; 
@@ -254,7 +254,7 @@
   if( ++old_num == num_active_collectors ){
     /* last collector's world here */
     lspace_fix_repointed_refs(collector, lspace);   
-    gc_fix_rootset(collector);
+    gc_fix_rootset(collector, FALSE);
     if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    
     num_fixing_collectors++; 
   }
@@ -291,9 +291,6 @@
     TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
     return;
   }
-
-  gc_set_pool_clear(gc->metadata->gc_rootset_pool);
-  gc_set_pool_clear(gc->metadata->weak_roots_pool);
   
   TRACE2("gc.process", "GC: collector[0]  finished");
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Wed Nov 21 22:40:31 2007
@@ -443,7 +443,7 @@
       gc_update_weakref_ignore_finref(gc);
     }
 #endif
-    identify_dead_weak_roots(gc, gc->metadata->weak_roots_pool);
+    gc_identify_dead_weak_roots(gc);
 
     if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc);
     assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
@@ -504,7 +504,7 @@
   /*last collector's world here */
   if( ++old_num == num_active_collectors ){
     lspace_fix_repointed_refs(collector, lspace);
-    gc_fix_rootset(collector);
+    gc_fix_rootset(collector, FALSE);
     gc_init_block_for_sliding_compact(gc, mspace);
     /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4
       *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually.
@@ -575,9 +575,6 @@
   
   /* Leftover: **************************************************
    */
-  
-  gc_set_pool_clear(gc->metadata->gc_rootset_pool);
-  gc_set_pool_clear(gc->metadata->weak_roots_pool);
   
   TRACE2("gc.process", "GC: collector[0]  finished");
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Wed Nov 21 22:40:31 2007
@@ -65,7 +65,11 @@
 {
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
   
-  sspace_collection(gc_ms_get_sspace(gc));
+  Sspace *sspace = gc_ms_get_sspace(gc);
+  
+  sspace_collection(sspace);
+  
+  sspace_reset_after_collection(sspace);
   
   if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Wed Nov 21 22:40:31 2007
@@ -29,11 +29,13 @@
 
 typedef struct GC_MS {
   /* <-- First couple of fields overloaded as GC */
+  void* physical_start;
   void *heap_start;
   void *heap_end;
   POINTER_SIZE_INT reserved_heap_size;
   POINTER_SIZE_INT committed_heap_size;
   unsigned int num_collections;
+  Boolean in_collection;
   int64 time_collections;
   float survive_ratio;
   
@@ -64,6 +66,7 @@
   
   /* FIXME:: this is wrong! root_set belongs to mutator */
   Vector_Block *root_set;
+  Vector_Block *weakroot_set;
   Vector_Block *uncompressed_root_set;
   
   //For_LOS_extend

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp Wed Nov 21 22:40:31 2007
@@ -121,27 +121,6 @@
   allocator->local_chunks = local_chunks;
 }
 
-void allocator_clear_local_chunks(Allocator *allocator, Boolean reuse_pfc)
-{
-  Sspace *sspace = gc_get_sspace(allocator->gc);
-  Size_Segment **size_segs = sspace->size_segments;
-  Chunk_Header ***local_chunks = allocator->local_chunks;
-  
-  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
-    if(!size_segs[i]->local_alloc){
-      assert(!local_chunks[i]);
-      continue;
-    }
-    Chunk_Header **chunks = local_chunks[i];
-    assert(chunks);
-    for(unsigned int j = size_segs[i]->chunk_num; j--;){
-      if(chunks[j] && reuse_pfc)
-        sspace_put_pfc(sspace, chunks[j]);
-      chunks[j] = NULL;
-    }
-  }
-}
-
 void allocactor_destruct_local_chunks(Allocator *allocator)
 {
   Sspace *sspace = gc_get_sspace(allocator->gc);
@@ -173,12 +152,54 @@
   STD_FREE(local_chunks);
 }
 
+static void allocator_clear_local_chunks(Allocator *allocator)
+{
+  Sspace *sspace = gc_get_sspace(allocator->gc);
+  Size_Segment **size_segs = sspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(!size_segs[i]->local_alloc){
+      assert(!local_chunks[i]);
+      continue;
+    }
+    Chunk_Header **chunks = local_chunks[i];
+    assert(chunks);
+    for(unsigned int j = size_segs[i]->chunk_num; j--;){
+      if(chunks[j])
+        sspace_put_pfc(sspace, chunks[j]);
+      chunks[j] = NULL;
+    }
+  }
+}
+
+static void gc_clear_mutator_local_chunks(GC *gc)
+{
+#ifdef USE_MARK_SWEEP_GC
+  /* release local chunks of each mutator in unique mark-sweep GC */
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    allocator_clear_local_chunks((Allocator*)mutator);
+    mutator = mutator->next;
+  }
+#endif
+}
+
+void gc_clear_collector_local_chunks(GC *gc)
+{
+  if(!gc_match_kind(gc, MAJOR_COLLECTION)) return;
+  /* release local chunks of each collector in gen GC */
+  for(unsigned int i = gc->num_collectors; i--;){
+    allocator_clear_local_chunks((Allocator*)gc->collectors[i]);
+  }
+}
+
 #ifdef USE_MARK_SWEEP_GC
 void sspace_set_space_statistic(Sspace *sspace)
 {
-  GC_MS* gc = (GC_MS*)sspace->gc;
+  GC_MS *gc = (GC_MS*)sspace->gc;
 
-  for(unsigned int i=0; i<gc->num_collectors; ++i){
+  for(unsigned int i = 0; i < gc->num_collectors; ++i){
     sspace->surviving_obj_num += gc->collectors[i]->live_obj_num;
     sspace->surviving_obj_size += gc->collectors[i]->live_obj_size;
   }
@@ -193,6 +214,9 @@
   GC *gc = sspace->gc;
   sspace->num_collections++;
   
+  gc_clear_mutator_local_chunks(gc);
+  gc_clear_collector_local_chunks(gc);
+  
 #ifdef SSPACE_ALLOC_INFO
   sspace_alloc_info_summary();
 #endif
@@ -207,7 +231,8 @@
   }
   if(sspace->need_compact || gc_match_kind(gc, MAJOR_COLLECTION))
     sspace->need_fix = TRUE;
-  //printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", sspace->need_compact ? "SWEEP COMPACT" : "MARK SWEEP");
+
+  //printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", sspace->need_compact ? "COMPACT" : "NO COMPACT");
 #ifdef SSPACE_VERIFY
   sspace_verify_before_collection(gc);
   sspace_verify_vtable_mark(gc);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h Wed Nov 21 22:40:31 2007
@@ -81,8 +81,6 @@
 void *sspace_thread_local_alloc(unsigned size, Allocator *allocator);
 void *sspace_alloc(unsigned size, Allocator *allocator);
 
-void sspace_reset_for_allocation(Sspace *sspace);
-
 void sspace_collection(Sspace *sspace);
 
 void allocator_init_local_chunks(Allocator *allocator);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp Wed Nov 21 22:40:31 2007
@@ -203,6 +203,8 @@
   p_obj = sspace_try_alloc(size, allocator);
   if(p_obj)  return p_obj;
   
+  if(allocator->gc->in_collection) return NULL;
+  
   vm_gc_lock_enum();
   /* after holding lock, try if other thread collected already */
   p_obj = sspace_try_alloc(size, allocator);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h Wed Nov 21 22:40:31 2007
@@ -37,20 +37,6 @@
   return (Boolean)(table[word_index] & (cur_alloc_color << index_in_word));
 }
 
-#ifdef _DEBUG
-static Boolean slot_is_free_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
-{
-  assert(!slot_is_alloc_in_table(table, slot_index));
-  
-  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
-  unsigned int word_index = color_bits_index / BITS_PER_WORD;
-  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
-  
-  return !(table[word_index] & cur_alloc_color << index_in_word);
-  
-}
-#endif
-
 inline unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word)
 {
   unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word;
@@ -177,9 +163,6 @@
   ++chunk->alloc_num;
   assert(chunk->base);
   void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
-#ifdef _DEBUG  
-  slot_is_free_in_table(table, slot_index);
-#endif
   alloc_slot_in_table(table, slot_index);
   if(chunk->status & CHUNK_NEED_ZEROING)
     memset(p_obj, 0, chunk->slot_size);
@@ -187,15 +170,15 @@
   sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
 #endif
 
+  if(p_obj && gc_is_concurrent_mark_phase())
+    obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size);
+
 #ifdef ENABLE_FRESH_CHUNK_ALLOC
   if(chunk->status & CHUNK_FRESH){
     ++slot_index;
     chunk->slot_index = (slot_index < chunk->slot_num) ? slot_index : MAX_SLOT_INDEX;
   } else
 #endif
-
-  if(p_obj && gc_is_concurrent_mark_phase()) obj_mark_black_in_table((Partial_Reveal_Object*)p_obj,chunk->slot_size);
-
     chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num);
   if(chunk->slot_index == MAX_SLOT_INDEX){
     chunk->status = CHUNK_USED | CHUNK_NORMAL;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp Wed Nov 21 22:40:31 2007
@@ -16,6 +16,7 @@
  */
 
 #include "sspace_chunk.h"
+#include "sspace_verify.h"
 
 #define NUM_ALIGNED_FREE_CHUNK_BUCKET   (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT)
 #define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS)
@@ -124,29 +125,11 @@
     free_chunk_list_clear(&unaligned_free_chunk_lists[i]);
   
   free_chunk_list_clear(&hyper_free_chunk_list);
-
-#ifdef USE_MARK_SWEEP_GC
-  /* release local chunks of each mutator in unique mark-sweep GC */
-  Mutator *mutator = gc->mutator_list;
-  while(mutator){
-    allocator_clear_local_chunks((Allocator*)mutator, FALSE);
-    mutator = mutator->next;
-  }
-#endif
-}
-
-void gc_clear_collector_local_chunks(GC *gc)
-{
-  assert(gc_match_kind(gc, MAJOR_COLLECTION));
-  /* release local chunks of each collector in gen GC */
-  for(unsigned int i = gc->num_collectors; i--;){
-    allocator_clear_local_chunks((Allocator*)gc->collectors[i], TRUE);
-  }
 }
 
 /* Simply put the free chunk to the according list
  * Don't merge continuous free chunks
- * The merging job is taken by sweeping
+ * The merging job is executed in merging phase
  */
 static void list_put_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
 {
@@ -164,6 +147,29 @@
   ++list->chunk_num;
   unlock(list->lock);
 }
+/* The difference between this and the above normal one is this func needn't hold the list lock.
+ * This is for the calling in partitioning chunk functions.
+ * Please refer to the comments of sspace_get_hyper_free_chunk().
+ */
+static void list_put_hyper_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+
+  /* lock(list->lock);
+   * the list lock must have been held like in getting a free chunk and partitioning it
+   * or needn't be held like in sspace initialization and the merging phase
+   */
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  if(!list->tail)
+    list->tail = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  //unlock(list->lock);
+}
 
 static Free_Chunk *free_list_get_head(Free_Chunk_List *list)
 {
@@ -189,14 +195,14 @@
   assert(!(chunk_size % CHUNK_GRANULARITY));
   
   if(chunk_size > HYPER_OBJ_THRESHOLD)
-    list_put_free_chunk(sspace->hyper_free_chunk_list, chunk);
+    list_put_hyper_free_chunk(sspace->hyper_free_chunk_list, chunk);
   else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
     list_put_free_chunk(&sspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
   else
     list_put_free_chunk(&sspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
 }
 
-static Free_Chunk *partition_normal_free_chunk(Sspace *sspace, Free_Chunk *chunk)
+static inline Free_Chunk *partition_normal_free_chunk(Sspace *sspace, Free_Chunk *chunk)
 {
   assert(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES);
   
@@ -224,14 +230,18 @@
  * the first one's size is chunk_size
  * the second will be inserted into free chunk list according to its size
  */
-static void partition_abnormal_free_chunk(Sspace *sspace,Free_Chunk *chunk, unsigned int chunk_size)
+static inline Free_Chunk *partition_abnormal_free_chunk(Sspace *sspace,Free_Chunk *chunk, unsigned int chunk_size)
 {
   assert(CHUNK_SIZE(chunk) > chunk_size);
   
-  Free_Chunk *back_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk + chunk_size);
-  back_chunk->adj_next = chunk->adj_next;
-  chunk->adj_next = (Chunk_Header_Basic*)back_chunk;
-  sspace_put_free_chunk(sspace, back_chunk);
+  Free_Chunk *new_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk->adj_next - chunk_size);
+  assert(chunk < new_chunk);
+  
+  new_chunk->adj_next = chunk->adj_next;
+  chunk->adj_next = (Chunk_Header_Basic*)new_chunk;
+  sspace_put_free_chunk(sspace, chunk);
+  new_chunk->status = CHUNK_FREE;
+  return new_chunk;
 }
 
 Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace)
@@ -303,7 +313,7 @@
       chunk = free_list_get_head(list);
     if(chunk){
       if(search_size > chunk_size)
-        partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+        chunk = partition_abnormal_free_chunk(sspace, chunk, chunk_size);
       zeroing_free_chunk(chunk);
       return chunk;
     }
@@ -323,7 +333,7 @@
       chunk = free_list_get_head(list);
     if(chunk){
       if(index > UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size))
-        partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+        chunk = partition_abnormal_free_chunk(sspace, chunk, chunk_size);
       zeroing_free_chunk(chunk);
       return chunk;
     }
@@ -340,6 +350,7 @@
   
   Free_Chunk_List *list = sspace->hyper_free_chunk_list;
   lock(list->lock);
+  
   Free_Chunk *prev_chunk = NULL;
   Free_Chunk *chunk = list->head;
   while(chunk){
@@ -358,16 +369,29 @@
     prev_chunk = chunk;
     chunk = chunk->next;
   }
-  unlock(list->lock);
+  
+  /* unlock(list->lock);
+   * We move this unlock to the end of this func for the following reason.
+   * A case might occur that two allocator are asking for a hyper chunk at the same time,
+   * and there is only one chunk in the list and it can satify the requirements of both of them.
+   * If allocator 1 gets the list lock first, it will get the unique chunk and releases the lock here.
+   * And then allocator 2 holds the list lock after allocator 1 releases it,
+   * it will found there is no hyper chunk in the list and return NULL.
+   * In fact the unique hyper chunk is large enough.
+   * If allocator 1 chops down one piece and put back the rest into the list, allocator 2 will be satisfied.
+   * So we will get a wrong info here if we release the lock here, which makes us invoke GC much earlier than needed.
+   */
   
   if(chunk){
     if(is_normal_chunk)
       chunk = partition_normal_free_chunk(sspace, chunk);
     else if(CHUNK_SIZE(chunk) > chunk_size)
-      partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+      chunk = partition_abnormal_free_chunk(sspace, chunk, chunk_size);
     if(!is_normal_chunk)
       zeroing_free_chunk(chunk);
   }
+  
+  unlock(list->lock);
   
   return chunk;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h Wed Nov 21 22:40:31 2007
@@ -301,8 +301,9 @@
 inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk)
 {
   unsigned int size = chunk->slot_size;
-  assert(chunk->base && chunk->alloc_num);
   assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
+  assert(chunk->base && chunk->alloc_num);
+  assert(chunk->alloc_num < chunk->slot_num);
   assert(chunk->slot_index < chunk->slot_num);
   
   Size_Segment **size_segs = sspace->size_segments;
@@ -319,6 +320,20 @@
   }
 }
 
+inline void sspace_rebuild_chunk_chain(Sspace *sspace)
+{
+  Chunk_Header_Basic *sspace_ceiling = (Chunk_Header_Basic*)space_heap_end((Space*)sspace);
+  Chunk_Header_Basic *prev_chunk = (Chunk_Header_Basic*)space_heap_start((Space*)sspace);
+  Chunk_Header_Basic *chunk = prev_chunk->adj_next;
+  prev_chunk->adj_prev = NULL;
+  
+  while(chunk < sspace_ceiling){
+    chunk->adj_prev = prev_chunk;
+    prev_chunk = chunk;
+    chunk = chunk->adj_next;
+  }
+}
+
 
 extern void sspace_init_chunks(Sspace *sspace);
 extern void sspace_clear_chunk_list(GC *gc);
@@ -337,7 +352,6 @@
 
 extern void zeroing_free_chunk(Free_Chunk *chunk);
 
-extern void allocator_clear_local_chunks(Allocator *allocator, Boolean reuse_pfc);
 extern void gc_clear_collector_local_chunks(GC *gc);
 
 extern void sspace_collect_free_chunks_to_list(Sspace *sspace, Free_Chunk_List *list);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp Wed Nov 21 22:40:31 2007
@@ -29,7 +29,7 @@
   float free_mem_ratio = (float)free_mem_size / sspace->committed_heap_size;
 
 #ifdef USE_MARK_SWEEP_GC
-  if((free_mem_ratio > SSPACE_COMPACT_RATIO) && (sspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
+  if(!gc_mark_is_concurrent() && (free_mem_ratio > SSPACE_COMPACT_RATIO) && (sspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
 #else
   if(gc_match_kind(sspace->gc, MAJOR_COLLECTION)){
 #endif
@@ -226,5 +226,6 @@
     }
   }
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp Wed Nov 21 22:40:31 2007
@@ -19,12 +19,32 @@
 #include "../finalizer_weakref/finalizer_weakref.h"
 
 static Sspace *sspace_in_fallback_marking;
-static FORCE_INLINE Boolean obj_mark(Partial_Reveal_Object *obj)
+
+
+static FORCE_INLINE Boolean obj_mark_black(Partial_Reveal_Object *obj)
 {
-  if(obj_belongs_to_space(obj, (Space*)sspace_in_fallback_marking))
-    return obj_mark_black_in_table(obj);
-  else
+  if(obj_belongs_to_space(obj, (Space*)sspace_in_fallback_marking)){
+    Boolean marked_by_self = obj_mark_black_in_table(obj);
+
+#ifndef USE_MARK_SWEEP_GC
+    /* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
+     * If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
+     */
+    if(marked_by_self){
+      Obj_Info_Type oi = obj->obj_info;
+      Obj_Info_Type new_oi = oi & DUAL_MARKBITS_MASK;
+      while(new_oi != oi){
+        Obj_Info_Type temp = (Obj_Info_Type)atomic_cas32((volatile Obj_Info_Type*)get_obj_info_addr(obj), new_oi, oi);
+        if(temp == oi) break;
+        oi = obj->obj_info;
+        new_oi = oi & DUAL_MARKBITS_MASK;
+      }
+    }
+#endif
+    return marked_by_self;
+  } else {
     return obj_mark_in_vt(obj);
+  }
 }
 
 static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
@@ -47,7 +67,7 @@
     write_slot(p_ref, p_obj);
   }
   
-  if(!obj_mark(p_obj))
+  if(!obj_mark_black(p_obj))
     return;
   
   if(!object_has_ref_field(p_obj)) return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp Wed Nov 21 22:40:31 2007
@@ -29,10 +29,28 @@
 
 static FORCE_INLINE Boolean obj_mark_black(Partial_Reveal_Object *obj)
 {
-  if(obj_belongs_to_space(obj, (Space*)sspace_in_marking))
-    return obj_mark_black_in_table(obj);
-  else
+  if(obj_belongs_to_space(obj, (Space*)sspace_in_marking)){
+    Boolean marked_by_self = obj_mark_black_in_table(obj);
+
+#ifndef USE_MARK_SWEEP_GC
+    /* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
+     * If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
+     */
+    if(marked_by_self){
+      Obj_Info_Type oi = obj->obj_info;
+      Obj_Info_Type new_oi = oi & DUAL_MARKBITS_MASK;
+      while(new_oi != oi){
+        Obj_Info_Type temp = (Obj_Info_Type)atomic_cas32((volatile Obj_Info_Type*)get_obj_info_addr(obj), new_oi, oi);
+        if(temp == oi) break;
+        oi = obj->obj_info;
+        new_oi = oi & DUAL_MARKBITS_MASK;
+      }
+    }
+#endif
+    return marked_by_self;
+  } else {
     return obj_mark_in_vt(obj);
+  }
 }
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp Wed Nov 21 22:40:31 2007
@@ -33,6 +33,9 @@
 
 static Chunk_Header_Basic *volatile next_chunk_for_fixing;
 
+
+/******************** General interfaces for Mark-Sweep-Compact ***********************/
+
 static void ops_color_flip(void)
 {
   POINTER_SIZE_INT temp = cur_alloc_color;
@@ -71,30 +74,26 @@
 }
 
 
-static void sspace_init_chunk_for_ref_fixing(Sspace *sspace)
-{
-  next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)sspace);
-  next_chunk_for_fixing->adj_prev = NULL;
-}
+/******************** Interfaces for Forwarding ***********************/
 
 static void nos_init_block_for_forwarding(GC_Gen *gc_gen)
 { blocked_space_block_iterator_init((Blocked_Space*)gc_get_nos(gc_gen)); }
 
 static inline void block_forward_live_objects(Collector *collector, Sspace *sspace, Block_Header *cur_block)
 {
-  void *start_pos;
-  Partial_Reveal_Object *p_obj = block_get_first_marked_object(cur_block, &start_pos);
+  Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)cur_block->base;
+  Partial_Reveal_Object *block_end = (Partial_Reveal_Object*)cur_block->free;
   
-  while(p_obj ){
-    assert(obj_is_marked_in_vt(p_obj));
+  for(; p_obj < block_end; p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + vm_object_size(p_obj))){
+    if(!obj_is_marked_in_vt(p_obj)) continue;
+    
     obj_clear_dual_bits_in_vt(p_obj);
     Partial_Reveal_Object *p_target_obj = collector_forward_object(collector, p_obj); /* Could be implemented with a optimized function */
     if(!p_target_obj){
-      assert(collector->gc->collect_result == FALSE);
+      assert(collector->result == FALSE);
       printf("Out of mem in forwarding nos!\n");
       exit(0);
     }
-    p_obj = block_get_next_marked_object(cur_block, &start_pos);
   }
 }
 
@@ -110,7 +109,59 @@
   }
 }
 
-static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk)
+
+/******************** Interfaces for Ref Fixing ***********************/
+
+static void sspace_init_chunk_for_ref_fixing(Sspace *sspace)
+{
+  next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)sspace);
+  next_chunk_for_fixing->adj_prev = NULL;
+}
+
+static inline void slot_double_fix(REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if(!p_obj) return;
+  
+  if(obj_is_fw_in_oi(p_obj)){
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_obj);
+    if(obj_is_fw_in_oi(p_obj)){
+      p_obj = obj_get_fw_in_oi(p_obj);
+      assert(p_obj);
+    }
+    write_slot(p_ref, p_obj);
+  }
+}
+
+static inline void object_double_fix_ref_slots(Partial_Reveal_Object *p_obj)
+{
+  if(!object_has_ref_field(p_obj)) return;
+  
+  /* scan array object */
+  if(object_is_array(p_obj)){
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    assert(!obj_is_primitive_array(p_obj));
+    
+    int32 array_length = array->array_len;
+    REF *p_refs = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for(int i = 0; i < array_length; i++){
+      slot_double_fix(p_refs + i);
+    }
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);    
+  int* ref_iterator = object_ref_iterator_init(p_obj);
+    
+  for(unsigned int i=0; i<num_refs; i++){  
+    REF * p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
+    slot_double_fix(p_ref);
+  }    
+}
+
+static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk, Boolean double_fix)
 {
   /* Init field slot_index and depad the last index word in table for fixing */
   chunk->slot_index = 0;
@@ -128,7 +179,10 @@
     unsigned int slot_size = chunk->slot_size;
     Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0);
     for(unsigned int i = alloc_num; i--;){
-      object_fix_ref_slots(p_obj);
+      if(double_fix)
+        object_double_fix_ref_slots(p_obj);
+      else
+        object_fix_ref_slots(p_obj);
 #ifdef SSPACE_VERIFY
       sspace_verify_fix_in_compact();
 #endif
@@ -138,7 +192,10 @@
     while(alloc_num){
       Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk);
       assert(p_obj);
-      object_fix_ref_slots(p_obj);
+      if(double_fix)
+        object_double_fix_ref_slots(p_obj);
+      else
+        object_fix_ref_slots(p_obj);
 #ifdef SSPACE_VERIFY
       sspace_verify_fix_in_compact();
 #endif
@@ -152,29 +209,34 @@
   }
 }
 
-static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk)
+static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk, Boolean double_fix)
 {
-  object_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
+  if(double_fix)
+    object_double_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
+  else
+    object_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
 #ifdef SSPACE_VERIFY
   sspace_verify_fix_in_compact();
 #endif
 }
 
-static void sspace_fix_repointed_refs(Collector *collector, Sspace *sspace)
+static void sspace_fix_repointed_refs(Collector *collector, Sspace *sspace, Boolean double_fix)
 {
   Chunk_Header_Basic *chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE);
   
   while(chunk){
     if(chunk->status & CHUNK_NORMAL)
-      normal_chunk_fix_repointed_refs((Chunk_Header*)chunk);
+      normal_chunk_fix_repointed_refs((Chunk_Header*)chunk, double_fix);
     else if(chunk->status & CHUNK_ABNORMAL)
-      abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk);
+      abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk, double_fix);
     
     chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE);
   }
 }
 
 
+/******************** Main body of Mark-Sweep-Compact ***********************/
+
 static volatile unsigned int num_marking_collectors = 0;
 static volatile unsigned int num_sweeping_collectors = 0;
 static volatile unsigned int num_compacting_collectors = 0;
@@ -194,11 +256,13 @@
   /* Pass 1: **************************************************
      Mark all live objects in heap ****************************/
   atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
-  
-  if(gc_match_kind(gc, FALLBACK_COLLECTION))
-    sspace_fallback_mark_scan(collector, sspace);
-  else
-    sspace_mark_scan(collector, sspace);
+
+  if(!gc_mark_is_concurrent()){  
+    if(gc_match_kind(gc, FALLBACK_COLLECTION))
+      sspace_fallback_mark_scan(collector, sspace);
+    else
+      sspace_mark_scan(collector, sspace);
+  }
   
   unsigned int old_num = atomic_inc32(&num_marking_collectors);
   if( ++old_num == num_active_collectors ){
@@ -214,7 +278,7 @@
       gc_update_weakref_ignore_finref(gc);
     }
 #endif
-    identify_dead_weak_roots(gc, gc->metadata->weak_roots_pool);
+    gc_identify_dead_weak_roots(gc);
     gc_init_chunk_for_sweep(gc, sspace);
     /* let other collectors go */
     num_marking_collectors++;
@@ -233,17 +297,14 @@
     sspace_sweep_time(FALSE, sspace->need_compact);
 #endif
     ops_color_flip();
-#ifdef SSPACE_CHUNK_INFO
-    sspace_chunks_info(sspace, TRUE);
-#endif
 #ifdef SSPACE_VERIFY
     sspace_verify_after_sweep(gc);
 #endif
 
-    sspace_merge_free_chunks(gc, sspace);
-    
-    if(gc_match_kind(gc, MAJOR_COLLECTION))
+    if(gc_match_kind(gc, MAJOR_COLLECTION)){
+      sspace_merge_free_chunks(gc, sspace);
       nos_init_block_for_forwarding((GC_Gen*)gc);
+    }
     if(sspace->need_compact)
       sspace_init_pfc_pool_iterator(sspace);
     if(sspace->need_fix)
@@ -256,12 +317,17 @@
   /* Optional Pass: *******************************************
      Forward live obj in nos to mos (sspace) ******************/
   if(gc_match_kind(gc, MAJOR_COLLECTION)){
-    atomic_cas32( &num_forwarding_collectors, 0, num_active_collectors);
+    atomic_cas32( &num_forwarding_collectors, 0, num_active_collectors+1);
     
     collector_forward_nos_to_sspace(collector, sspace);
     
-    atomic_inc32(&num_forwarding_collectors);
-    while(num_forwarding_collectors != num_active_collectors);
+    old_num = atomic_inc32(&num_forwarding_collectors);
+    if( ++old_num == num_active_collectors ){
+      gc_clear_collector_local_chunks(gc);
+      num_forwarding_collectors++;
+    }
+    
+    while(num_forwarding_collectors != num_active_collectors + 1);
   }
   
   /* Optional Pass: *******************************************
@@ -274,7 +340,8 @@
     /* If we need forward nos to mos, i.e. in major collection, an extra fixing phase after compaction is needed. */
     old_num = atomic_inc32(&num_compacting_collectors);
     if( ++old_num == num_active_collectors ){
-      sspace_remerge_free_chunks(gc, sspace);
+      if(gc_match_kind(gc, MAJOR_COLLECTION))
+        sspace_remerge_free_chunks(gc, sspace);
       /* let other collectors go */
       num_compacting_collectors++;
     }
@@ -286,7 +353,12 @@
   if(sspace->need_fix){
     atomic_cas32( &num_fixing_collectors, 0, num_active_collectors);
     
-    sspace_fix_repointed_refs(collector, sspace);
+    /* When we forwarded nos AND compacted sspace,
+     * we need double fix object slots,
+     * because some objects are forwarded from nos to mos and compacted into another chunk afterwards.
+     */
+    Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && sspace->need_compact;
+    sspace_fix_repointed_refs(collector, sspace, double_fix);
     
     atomic_inc32(&num_fixing_collectors);
     while(num_fixing_collectors != num_active_collectors);
@@ -298,20 +370,19 @@
   /* Leftover: *************************************************/
   
   if(sspace->need_fix){
-    gc_fix_rootset(collector);
+    Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && sspace->need_compact;
+    gc_fix_rootset(collector, double_fix);
 #ifdef SSPACE_TIME
     sspace_fix_time(FALSE);
 #endif
   }
   
-  //gc->root_set = NULL;  // FIXME:: should be placed to a more appopriate place
-  gc_set_pool_clear(gc->metadata->gc_rootset_pool);
+  if(!gc_match_kind(gc, MAJOR_COLLECTION))
+    sspace_merge_free_chunks(gc, sspace);
+
 #ifdef USE_MARK_SWEEP_GC
   sspace_set_space_statistic(sspace);
 #endif 
-
-  if(gc_match_kind(gc, MAJOR_COLLECTION))
-    gc_clear_collector_local_chunks(gc);
 
 #ifdef SSPACE_VERIFY
   sspace_verify_after_collection(gc);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h Wed Nov 21 22:40:31 2007
@@ -22,7 +22,7 @@
 #include "sspace_verify.h"
 
 #define PFC_REUSABLE_RATIO 0.1
-#define SSPACE_COMPACT_RATIO 0.15
+#define SSPACE_COMPACT_RATIO 0.06
 
 inline Boolean chunk_is_reusable(Chunk_Header *chunk)
 { return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; }
@@ -316,20 +316,15 @@
     return FALSE;
 }
 
-inline Boolean obj_is_alloc_color_in_table(Partial_Reveal_Object *obj)
+inline Boolean obj_is_alloc_in_color_table(Partial_Reveal_Object *obj)
 {
   POINTER_SIZE_INT *p_color_word;
   unsigned int index_in_word;
   p_color_word = get_color_word_in_table(obj, index_in_word);
   POINTER_SIZE_INT current_word = *p_color_word;
-  POINTER_SIZE_INT obj_alloc_color_bit_in_word = cur_alloc_color<< index_in_word;
+  POINTER_SIZE_INT obj_alloc_color_bit_in_word = cur_alloc_color << index_in_word;
   
-
-  if(current_word & obj_alloc_color_bit_in_word)
-    return TRUE;
-  else
-    return FALSE;
-
+  return (Boolean)(current_word & obj_alloc_color_bit_in_word);
 }
 
 inline Boolean obj_need_take_snaptshot(Partial_Reveal_Object *obj)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp Wed Nov 21 22:40:31 2007
@@ -253,6 +253,13 @@
   free_chunk_list.head = NULL;
   free_chunk_list.tail = NULL;
   
+  /* If a new chunk is partitioned from a bigger one in the forwarding phase,
+   * its adj_prev has not been set yet.
+   * And the adj_prev field of the chunk next to it will be wrong either.
+   * So a rebuilding operation is needed here.
+   */
+  sspace_rebuild_chunk_chain(sspace);
+  
   /* Collect free chunks from sspace free chunk lists to one list */
   sspace_collect_free_chunks_to_list(sspace, &free_chunk_list);
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Wed Nov 21 22:40:31 2007
@@ -30,6 +30,7 @@
 unsigned int MINOR_COLLECTORS = 0;
 unsigned int MAJOR_COLLECTORS = 0;
 static volatile unsigned int live_collector_num = 0;
+Boolean is_collector_local_alloc = TRUE;
 
 void collector_restore_obj_info(Collector* collector)
 {

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?rev=597313&r1=597312&r2=597313&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Wed Nov 21 22:40:31 2007
@@ -29,6 +29,8 @@
 struct Chunk_Header;
 struct Free_Chunk_List;
 
+extern Boolean is_collector_local_alloc;
+
 #define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS  8
 #define NORMAL_SIZE_SEGMENT_GRANULARITY (1 << NORMAL_SIZE_SEGMENT_GRANULARITY_BITS)
 #define NORMAL_SIZE_SEGMENT_NUM (GC_OBJ_SIZE_THRESHOLD / NORMAL_SIZE_SEGMENT_GRANULARITY)



Mime
View raw message