harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r559382 [1/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ thread/ trace_forward/ verify/
Date Wed, 25 Jul 2007 10:02:11 GMT
Author: xli
Date: Wed Jul 25 03:02:07 2007
New Revision: 559382

URL: http://svn.apache.org/viewvc?view=rev&rev=559382
Log:
HARMONY-4531 : performance improvement patch and code refactoring. It also fixed a couple of bugs. (The main design in this patch is to include mark-sweep LOS for minor collection and always slide-compact LOS for major collection.

Removed:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp
Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h Wed Jul 25 03:02:07 2007
@@ -24,6 +24,7 @@
 #include "gc_common.h"
 #include "compressed_ref.h"
 extern Boolean IS_MOVE_COMPACT;
+extern void* los_boundary;
 
 inline void slot_fix(REF* p_ref)
 {
@@ -31,15 +32,21 @@
   if(!p_obj) return;
 
   if(IS_MOVE_COMPACT){
-    if(obj_is_moved(p_obj))
+    /* This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. */
+    //if(obj_is_moved(p_obj)) 
+    /*Fixme: los_boundery ruined the modularity of gc_common.h*/
+    if(p_obj < los_boundary){
+      write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+    }else{
       *p_ref = obj_get_fw_in_table(p_obj);
+    }
   }else{
-    if(obj_is_fw_in_oi(p_obj) && obj_is_moved(p_obj)){
+    if(obj_is_fw_in_oi(p_obj)){
       /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
        * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
+       * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.
        * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
-       * since those which can be scanned in MOS & NOS must have been set fw bit in oi.
-       */
+       * since those which can be scanned in MOS & NOS must have been set fw bit in oi.  */
       assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS);
       write_slot(p_ref, obj_get_fw_in_oi(p_obj));
     }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h Wed Jul 25 03:02:07 2007
@@ -77,7 +77,7 @@
 #define OFFSET_TABLE_SIZE_WORDS (OFFSET_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD)
 #define OBJECT_INDEX_TO_OFFSET_TABLE(p_obj)   (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> SECTOR_SIZE_SHIFT_COUNT)
 
-#define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES)
+#define GC_BLOCK_HEADER_SIZE_BYTES ((OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES  + GC_OBJECT_ALIGN_MASK ) & (~GC_OBJECT_ALIGN_MASK))
 #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES)
 #define GC_BLOCK_BODY(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_HEADER_SIZE_BYTES))
 /*LOS_Shrink: We have some fake block headers when trying to compute mos object target, 
@@ -353,5 +353,6 @@
 #endif
 
 #endif //#ifndef _BLOCK_H_
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Wed Jul 25 03:02:07 2007
@@ -23,6 +23,7 @@
 #include "../thread/mutator.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../gen/gen.h"
+#include "../mark_sweep/gc_ms.h"
 #include "../common/space_tuner.h"
 #include "interior_pointer.h"
 
@@ -279,7 +280,11 @@
   gc->cause = gc_cause;
   gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
 
+  gc_gen_update_space_before_gc((GC_Gen*)gc);
+
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_compute_space_tune_size_before_marking(gc, gc_cause);
+#endif
 
 #ifdef MARK_BIT_FLIPPING
   if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip();
@@ -301,7 +306,11 @@
 
   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
 
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_gen_reclaim_heap((GC_Gen*)gc);
+#else
+  gc_ms_reclaim_heap((GC_MS*)gc);
+#endif
   
   gc_reset_interior_pointer_table();
 
@@ -310,9 +319,11 @@
   int64 pause_time = time_now() - start_time;  
   gc->time_collections += pause_time;
 
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_adjust_heap_size(gc, pause_time);
 
   gc_gen_adapt((GC_Gen*)gc, pause_time);
+#endif
 
   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
   
@@ -326,12 +337,15 @@
 #endif
   }
 
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_space_tuner_reset(gc);
-
+  gc_gen_update_space_after_gc((GC_Gen*)gc);
   gc_assign_free_area_to_mutators(gc);
+#endif
 
   vm_reclaim_native_objs();  
   vm_resume_threads_after();
   return;
 }
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Wed Jul 25 03:02:07 2007
@@ -72,6 +72,8 @@
 
 #define USE_32BITS_HASHCODE
 
+//#define ONLY_SSPACE_IN_HEAP
+
 typedef void (*TaskType)(void*);
 
 enum Collection_Algorithm{
@@ -100,7 +102,8 @@
   MINOR_COLLECTION = 0x1,
   MAJOR_COLLECTION = 0x2,
   FALLBACK_COLLECTION = 0x4,
-  EXTEND_COLLECTION = 0x8
+  EXTEND_COLLECTION = 0x8,
+  UNIQUE_SWEEP_COLLECTION = 0x10
 };
 
 extern Boolean IS_FALLBACK_COMPACTION;  /* only for mark/fw bits debugging purpose */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Wed Jul 25 03:02:07 2007
@@ -23,6 +23,7 @@
 #include "compressed_ref.h"
 
 #include "../gen/gen.h"
+#include "../mark_sweep/gc_ms.h"
 #include "interior_pointer.h"
 #include "../thread/collector.h"
 #include "../verify/verify_live_heap.h"
@@ -42,7 +43,13 @@
 int gc_init() 
 {      
   assert(p_global_gc == NULL);
-  GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
+
+#ifndef ONLY_SSPACE_IN_HEAP
+  unsigned int gc_struct_size = sizeof(GC_Gen);
+#else
+  unsigned int gc_struct_size = sizeof(GC_MS);
+#endif
+  GC* gc = (GC*)STD_MALLOC(gc_struct_size);
   assert(gc);
   memset(gc, 0, sizeof(GC));  
   p_global_gc = gc;
@@ -52,8 +59,15 @@
   gc_tls_init();
 
   gc_metadata_initialize(gc); /* root set and mark stack */
-  
+
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
+#else
+  gc_ms_initialize((GC_MS*)gc, min_heap_size_bytes, max_heap_size_bytes);
+#endif
+  
+  set_native_finalizer_thread_flag(!IGNORE_FINREF);
+  set_native_ref_enqueue_thread_flag(!IGNORE_FINREF);
 
 #ifndef BUILD_IN_REFERENT
   gc_finref_metadata_initialize(gc);
@@ -69,7 +83,13 @@
 void gc_wrapup() 
 { 
   GC* gc =  p_global_gc;
+
+#ifndef ONLY_SSPACE_IN_HEAP
   gc_gen_destruct((GC_Gen*)gc);
+#else
+  gc_ms_destruct((GC_MS*)gc);
+#endif
+
   gc_metadata_destruct(gc); /* root set and mark stack */
 #ifndef BUILD_IN_REFERENT
   gc_finref_metadata_destruct(gc);
@@ -154,20 +174,32 @@
 void gc_thread_kill(void* gc_info)
 {  mutator_destruct(p_global_gc, gc_info);  }
 
-int64 gc_free_memory() 
+int64 gc_free_memory()
 {
+#ifndef ONLY_SSPACE_IN_HEAP
   return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
+#else
+  return (int64)gc_ms_free_memory_size((GC_MS*)p_global_gc);
+#endif
 }
 
 /* java heap size.*/
 int64 gc_total_memory() 
 {
-  return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); 
+#ifndef ONLY_SSPACE_IN_HEAP
+  return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc));
+#else
+  return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
+#endif
 }
 
 int64 gc_max_memory() 
 {
-  return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); 
+#ifndef ONLY_SSPACE_IN_HEAP
+  return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc));
+#else
+  return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
+#endif
 }
 
 int64 gc_get_collection_count()
@@ -233,6 +265,10 @@
 #else //USE_32BITS_HASHCODE
 int32 gc_get_hashcode(Managed_Object_Handle p_object)
 {
+#ifdef ONLY_SSPACE_IN_HEAP
+  return (int32)p_object;
+#endif
+
   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object;
   if(!p_obj) return 0;
   assert(address_belongs_to_gc_heap(p_obj, p_global_gc));
@@ -288,7 +324,11 @@
     // data structures in not consistent for heap iteration
     if (!JVMTI_HEAP_ITERATION) return;
 
+#ifndef ONLY_SSPACE_IN_HEAP
     gc_gen_iterate_heap((GC_Gen *)p_global_gc);
+#else
+    gc_ms_iterate_heap((GC_MS*)p_global_gc);
+#endif
 }
 
 void gc_set_mutator_block_flag()

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Wed Jul 25 03:02:07 2007
@@ -177,21 +177,26 @@
       iter = vector_block_iterator_advance(root_set,iter);
 
       Partial_Reveal_Object* p_obj = read_slot(p_ref);
-      if(IS_MOVE_COMPACT){
-        if(obj_is_moved(p_obj))
-          *p_ref = obj_get_fw_in_table(p_obj);
-      } else {
-        if( // obj_is_fw_in_oi(p_obj) && //NOTE:: we removed the minor_copy algorithm at the moment, so we don't need this check
-            obj_is_moved(p_obj)){
-          /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
-           * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
-           * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
-           * since those which can be scanned in MOS & NOS must have been set fw bit in oi.
-           */
-          assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc));
-          write_slot(p_ref , obj_get_fw_in_oi(p_obj));
+        if(IS_MOVE_COMPACT){
+        /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/
+        //if(obj_is_moved(p_obj)) 
+          /*Fixme: los_boundery ruined the modularity of gc_common.h*/
+          if(p_obj < los_boundary){
+            write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+          }else{
+            *p_ref = obj_get_fw_in_table(p_obj);
+          }
+        }else{
+          if(obj_is_fw_in_oi(p_obj)){
+            /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
+             * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
+             * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.
+             * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
+             * since those which can be scanned in MOS & NOS must have been set fw bit in oi.  */
+            assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS);
+            write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+          }
         }
-      }
     }
     root_set = pool_iterator_next(pool);
   } 
@@ -366,5 +371,6 @@
   
   return;  
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h Wed Jul 25 03:02:07 2007
@@ -38,10 +38,18 @@
   unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
-  /*Size allocted after last collection. */
-  POINTER_SIZE_INT alloced_size;
-  /*For_statistic*/  
-  POINTER_SIZE_INT surviving_size;
+
+  /* Size allocted since last minor collection. */
+  volatile uint64 last_alloced_size;
+  /* Size allocted since last major collection. */
+  uint64 accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  uint64 total_alloced_size;
+
+  /* Size survived from last collection. */
+  uint64 last_surviving_size;
+  /* Size survived after a certain period. */
+  uint64 period_surviving_size;  
 }Space;
 
 inline POINTER_SIZE_INT space_committed_size(Space* space){ return space->committed_heap_size;}
@@ -71,10 +79,19 @@
   unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
-  /*Size allocted after last collection. */
-  POINTER_SIZE_INT alloced_size;
-  /*For_statistic*/  
-  POINTER_SIZE_INT surviving_size;
+
+  /* Size allocted since last minor collection. */
+  volatile uint64 last_alloced_size;
+  /* Size allocted since last major collection. */
+  uint64 accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  uint64 total_alloced_size;
+
+  /* Size survived from last collection. */
+  uint64 last_surviving_size;
+  /* Size survived after a certain period. */
+  uint64 period_surviving_size;  
+
   /* END of Space --> */
 
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h Wed Jul 25 03:02:07 2007
@@ -64,7 +64,7 @@
 }
 
 inline int hashcode_gen(void* addr)
-{ return (int)(POINTER_SIZE_INT)addr; }
+{ return (int)(((POINTER_SIZE_INT)addr) >> 2); }
 
 typedef struct Hashcode_Buf{
   Seq_List* list;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp Wed Jul 25 03:02:07 2007
@@ -37,6 +37,7 @@
 static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
 {
   vm_notify_obj_alive( (void *)p_obj);
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   if( !object_has_ref_field(p_obj) ) return;
   
   REF *p_ref;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp Wed Jul 25 03:02:07 2007
@@ -29,39 +29,38 @@
 Space* gc_get_los(GC_Gen* gc);
 float mspace_get_expected_threshold_ratio(Mspace* mspace);
 POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace);
-    
+
+/* Calculate speed of allocation and waste memory of specific space respectively, 
+  * then decide whether to execute a space tuning according to the infomation.*/
 void gc_decide_space_tune(GC* gc, unsigned int cause)
 {
   Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
   Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
   Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);  
   Space_Tuner* tuner = gc->tuner;
-  //debug_adjust
-  assert(fspace->free_block_idx >= fspace->first_block_idx);
-  unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES;
-  fspace->alloced_size = nos_alloc_size;
-  /*Fixme: LOS_Adaptive: There should be a condition here, that fspace->collection_num != 0*/
-  mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio);
-  /*For_statistic alloc speed: Speed could be represented by sum of alloced size.
-   *The right of this time los/mos alloc speed is the biggest.
-   */
-  tuner->speed_los = lspace->alloced_size;
-  tuner->speed_los = (tuner->speed_los + tuner->old_speed_los) >> 1;
-  tuner->speed_mos = mspace->alloced_size;
-  tuner->speed_mos = (tuner->speed_mos + tuner->old_speed_mos) >> 1;
-  
-  /*For_statistic wasted memory*/
-  POINTER_SIZE_INT curr_used_los = lspace->surviving_size + lspace->alloced_size;
-  POINTER_SIZE_INT curr_wast_los = 0;
+
+  tuner->speed_los = lspace->accumu_alloced_size;
+  tuner->speed_los = (tuner->speed_los + tuner->last_speed_los) >> 1;
+  /*The possible survivors from the newly allocated NOS should be counted into the speed of MOS*/
+  tuner->speed_mos = mspace->accumu_alloced_size +   (uint64)((float)fspace->last_alloced_size * fspace->survive_ratio);;
+  tuner->speed_mos = (tuner->speed_mos + tuner->last_speed_mos) >> 1;
+  tuner->speed_nos = fspace->accumu_alloced_size;
+  tuner->speed_nos = (tuner->speed_nos + tuner->last_speed_nos) >> 1;
+  
+  /*Statistic wasted memory*/
+  uint64 curr_used_los = lspace->last_surviving_size + lspace->last_alloced_size;
+  uint64 curr_wast_los = 0;
   if(gc->cause != GC_CAUSE_LOS_IS_FULL) curr_wast_los =  lspace->committed_heap_size - curr_used_los;
-  tuner->wast_los += curr_wast_los;
+  tuner->wast_los += (POINTER_SIZE_INT)curr_wast_los;
   
-  POINTER_SIZE_INT curr_used_mos = mspace->surviving_size + mspace->alloced_size;
+  uint64 curr_used_mos = 
+                                mspace->period_surviving_size + mspace->accumu_alloced_size + (uint64)(fspace->last_alloced_size * fspace->survive_ratio);
   float expected_mos_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
-  POINTER_SIZE_INT expected_mos = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio);
-  POINTER_SIZE_INT curr_wast_mos = 0;
+  uint64 expected_mos = (uint64)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio);
+  uint64 curr_wast_mos = 0;
   if(expected_mos > curr_used_mos) curr_wast_mos = expected_mos - curr_used_mos;
   tuner->wast_mos += curr_wast_mos;
+
   tuner->current_dw = ABS_DIFF(tuner->wast_mos, tuner->wast_los);
 
   /*For_statistic ds in heuristic*/
@@ -86,6 +85,14 @@
 extern POINTER_SIZE_INT min_los_size_bytes;
 extern POINTER_SIZE_INT min_none_los_size_bytes;
 
+/*Open this macro if we want to tune the space size according to allocation speed computed in major collection.
+  *By default, we will use allocation speed computed in minor collection. */
+//#define SPACE_TUNE_BY_MAJOR_SPEED
+
+
+/* The tuning size computing before marking is not precise. We only estimate the probable direction of space tuning.
+  * If this function decide to set TRANS_NOTHING, then we just call the normal marking function.
+  * Else, we call the marking function for space tuning.  */
 void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause)
 {
   if(gc_match_kind(gc, MINOR_COLLECTION))  return;
@@ -93,39 +100,50 @@
   gc_decide_space_tune(gc, cause);
   
   Space_Tuner* tuner = gc->tuner;
-  if((tuner->speed_los == 0) && ( tuner->speed_mos == 0)) return;
+  assert((tuner->speed_los != 0) && ( tuner->speed_mos != 0)) ;
   if((!tuner->need_tune) && (!tuner->force_tune)) return;
   
   Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
   Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
   Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);
 
-  POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio);
+  POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->last_surviving_size + lspace->last_alloced_size) * lspace->survive_ratio);
   POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? 
                                                             (lspace->committed_heap_size - los_expect_surviving_sz) : 0);
-  POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio);
+  
+  POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->period_surviving_size + mspace->accumu_alloced_size) * mspace->survive_ratio);
   float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
   POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
   POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)?
                                                             (mos_expect_threshold - mos_expect_survive_sz) : 0);
-  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz;
 
+  POINTER_SIZE_INT non_los_expect_surviving_sz = (POINTER_SIZE_INT)(mos_expect_survive_sz + fspace->last_alloced_size * fspace->survive_ratio);
+  POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size;
+  POINTER_SIZE_INT non_los_expect_free_sz = (non_los_committed_size > non_los_expect_surviving_sz) ? (non_los_committed_size - non_los_expect_surviving_sz):(0) ;
+
+#ifdef SPACE_TUNE_BY_MAJOR_SPEED
+  /*Fixme: tuner->speed_los here should be computed by sliding compact LOS, to be implemented!*/
+  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz;
   float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
   POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
+#else
+  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + non_los_expect_free_sz;
+  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_nos);
+  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
+#endif
+
 
   /*LOS_Extend:*/
   if((new_free_los_sz > los_expect_free_sz) )
   { 
     tuner->kind = TRANS_FROM_MOS_TO_LOS;
     tuner->tuning_size = new_free_los_sz - los_expect_free_sz;
-    lspace->move_object = 0;
   }
   /*LOS_Shrink:*/
   else if(new_free_los_sz < los_expect_free_sz)
   {
     tuner->kind = TRANS_FROM_LOS_TO_MOS;
     tuner->tuning_size = los_expect_free_sz - new_free_los_sz;
-    lspace->move_object = 1;
   }
   /*Nothing*/
   else
@@ -137,7 +155,6 @@
   if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
     tuner->kind = TRANS_NOTHING;
     tuner->tuning_size = 0;
-    lspace->move_object = 0;
   }
 
   /*If los or non-los is already the smallest size, there is no need to tune anymore.
@@ -148,14 +165,13 @@
     assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
     tuner->kind = TRANS_NOTHING;
     tuner->tuning_size = 0;
-    lspace->move_object = 0;
   }
-  
+
+  /*If the strategy upward doesn't decide to extend los, but current GC is caused by los, force an extension here.*/
   if(tuner->force_tune){
     if(tuner->kind != TRANS_FROM_MOS_TO_LOS){
       tuner->kind = TRANS_FROM_MOS_TO_LOS;
       tuner->tuning_size = 0;
-      tuner->reverse_1 = 1;
     }
   }
 
@@ -167,6 +183,7 @@
 
 static POINTER_SIZE_INT non_los_live_obj_size;
 static  POINTER_SIZE_INT los_live_obj_size;
+/* Only when we call the special marking function for space tuning, we can get the accumulation of the sizes. */
 static void gc_compute_live_object_size_after_marking(GC* gc, POINTER_SIZE_INT non_los_size)
 {
   non_los_live_obj_size = 0;
@@ -204,6 +221,12 @@
 
 }
 
+/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. 
+  * Attention1:  The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed.
+  * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in 
+  * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the 
+  * size of extention by this function.
+  * Attention2: The total heap size might extend in this function. */
 static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los)
 {
   Space_Tuner* tuner = gc->tuner;
@@ -219,7 +242,6 @@
   if(lspace_free_size >= failure_size){
     tuner->tuning_size = 0;
     tuner->kind = TRANS_NOTHING;
-    lspace->move_object = 1;
   }else{
     tuner->tuning_size = failure_size -lspace_free_size;
     
@@ -250,25 +272,24 @@
       if(tuner->tuning_size > potential_max_tuning_size){
         tuner->tuning_size = 0;
         tuner->kind = TRANS_NOTHING;
-        lspace->move_object = 0;      
       }else{
         /*We have tuner->tuning_size > max_tuning_size up there.*/
         extend_heap_size = tuner->tuning_size - max_tuning_size;
         blocked_space_extend(fspace, (unsigned int)extend_heap_size);
         gc->committed_heap_size += extend_heap_size;
         tuner->kind = TRANS_FROM_MOS_TO_LOS;
-        lspace->move_object = 1;        
       }
-    } else{
+    }
+    else
+    {
       tuner->kind = TRANS_FROM_MOS_TO_LOS;
-      lspace->move_object = 1;
     }
   }
 
   return;
 }
 
-static void make_sure_tuning_size(GC* gc)
+static void check_tuning_size(GC* gc)
 {
   Space_Tuner* tuner = gc->tuner;
   Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
@@ -277,41 +298,48 @@
 
   POINTER_SIZE_INT los_free_sz =  ((lspace->committed_heap_size > los_live_obj_size) ? 
                                                    (lspace->committed_heap_size - los_live_obj_size) : 0);
+
+#ifdef SPACE_TUNE_BY_MAJOR_SPEED
   float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
   POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
   POINTER_SIZE_INT mos_free_sz = ((mos_expect_threshold > non_los_live_obj_size)?
                                                             (mos_expect_threshold - non_los_live_obj_size) : 0);
   POINTER_SIZE_INT total_free_sz = los_free_sz + mos_free_sz;
-
   float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
   POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio);
+#else
+  POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size;
+  POINTER_SIZE_INT non_los_free_sz = ((non_los_committed_size > non_los_live_obj_size)?
+                                                                (non_los_committed_size - non_los_live_obj_size):0);
+  POINTER_SIZE_INT total_free_sz = los_free_sz + non_los_free_sz;
+  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_nos);
+  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio);
+#endif
 
   /*LOS_Extend:*/
   if((new_free_los_sz > los_free_sz) )
   { 
     tuner->kind = TRANS_FROM_MOS_TO_LOS;
     tuner->tuning_size = new_free_los_sz - los_free_sz;
-    lspace->move_object = 0; //This is necessary, because the flag might be set by gc_compute_space_tune_size_before_marking.
   }
   /*LOS_Shrink:*/
   else if(new_free_los_sz < los_free_sz)
   {
     tuner->kind = TRANS_FROM_LOS_TO_MOS;
     tuner->tuning_size = los_free_sz - new_free_los_sz;
-    lspace->move_object = 1;
   }
   /*Nothing*/
   else
   {
     tuner->tuning_size = 0;
-    tuner->kind = TRANS_NOTHING;//This is necessary, because the original value of kind might not be NOTHING.
+    /*This is necessary, because the original value of kind might not be NOTHING. */
+    tuner->kind = TRANS_NOTHING;
   }
 
   /*If not force tune, and the tuning size is too small, tuner will not take effect.*/
   if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
     tuner->kind = TRANS_NOTHING;
     tuner->tuning_size = 0;
-    lspace->move_object = 0;
   }
 
   /*If los or non-los is already the smallest size, there is no need to tune anymore.
@@ -322,19 +350,19 @@
     assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
     tuner->kind = TRANS_NOTHING;
     tuner->tuning_size = 0;
-    lspace->move_object = 0;
   }
   
   if(tuner->force_tune){
     if(tuner->kind != TRANS_FROM_MOS_TO_LOS){
       tuner->kind = TRANS_FROM_MOS_TO_LOS;
-      tuner->reverse_2 = 1;
+      tuner->reverse = 1;
     }
   }
 
   return;  
 }
 
+/* This is the real function that decide tuning_size, because we have know the total size of living objects after "mark_scan_heap_for_space_tune". */
 void gc_compute_space_tune_size_after_marking(GC *gc)
 {
   Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
@@ -347,7 +375,7 @@
 
   gc_compute_live_object_size_after_marking(gc, non_los_size);
 
-  make_sure_tuning_size(gc);
+  check_tuning_size(gc);
   
   /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
   POINTER_SIZE_INT max_tune_for_min_non_los = 0;
@@ -369,15 +397,11 @@
         tuner->tuning_size = max_tuning_size;
       /*Round down so as not to break max_tuning_size*/
       tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
-      if(tuner->tuning_size == 0){
-        //If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.
-        tuner->kind = TRANS_NOTHING;
-        lspace->move_object = 0;
-      }
+       /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.*/
+      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
     }else{ 
       tuner->tuning_size = 0;
       tuner->kind = TRANS_NOTHING;
-      lspace->move_object = 0;
     }
   }
   /*Not force tune, LOS Shrink*/
@@ -391,28 +415,22 @@
         tuner->tuning_size = max_tuning_size;
       /*Round down so as not to break max_tuning_size*/
       tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
-      if(tuner->tuning_size == 0){
-        tuner->kind = TRANS_NOTHING;
-        lspace->move_object = 0;
-      }
+      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
     }else{
       /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/        
       tuner->tuning_size = 0;
       tuner->kind = TRANS_NOTHING;
-      lspace->move_object = 0;
     }
   }
 
   /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/
   Boolean doforce = TRUE;
   POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
-  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse_2) && (tuner->tuning_size > failure_size) )
+  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) )
     doforce = FALSE;
 
-  /*If force tune*/
-  if( (tuner->force_tune) && (doforce) ){
+  if( (tuner->force_tune) && (doforce) )
     compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los);
-  }
 
   return;
   
@@ -428,10 +446,12 @@
     tuner->need_tune = FALSE;
     tuner->force_tune = FALSE;
 
-    tuner->old_speed_los = tuner->speed_los;
-    tuner->old_speed_mos = tuner->speed_mos;
+    tuner->last_speed_los = tuner->speed_los;
+    tuner->last_speed_mos = tuner->speed_mos;
+    tuner->last_speed_nos = tuner->speed_nos;
     tuner->speed_los = 0;
     tuner->speed_mos = 0;
+    tuner->speed_nos = 0;    
 
     tuner->current_dw  = 0;
     tuner->current_ds = 0;
@@ -444,8 +464,7 @@
       tuner->wast_mos = 0;
     }
     tuner->kind = TRANS_NOTHING;    
-    tuner->reverse_1 = 0;
-    tuner->reverse_2 = 0;
+    tuner->reverse = 0;
   }
   
   return;  
@@ -518,5 +537,6 @@
   STD_FREE(tuner->interim_blocks);
   return;
 }
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h Wed Jul 25 03:02:07 2007
@@ -38,33 +38,33 @@
 
 typedef struct Space_Tuner{
     Transform_Kind kind;
-    /*Fixme: These flags is set if the los tuning status changes in the process of tuning, remove one of them*/
-    Boolean reverse_1;
-    Boolean reverse_2;
+    /*This flags is set if the tuning direction changes in the process of tuning*/
+    Boolean reverse;
     
     POINTER_SIZE_INT tuning_size;
     /*Used for LOS_Shrink*/
     Block_Header* interim_blocks;
     /*This flag is set when tuning strategy decide to tune los size.
-      *i.e. wasted memory is greater than wast_threshold.
-      */
+      *i.e. wasted memory is greater than wast_threshold.  */
     Boolean need_tune;
     /*This flag is set if gc is caused by los alloc failure.*/
     Boolean force_tune;
     
-    /*LOS alloc speed since last major*/
-    POINTER_SIZE_INT speed_los;
-    POINTER_SIZE_INT old_speed_los;
-    /*MOS alloc speed since last major*/
-    POINTER_SIZE_INT speed_mos;
-    POINTER_SIZE_INT old_speed_mos;
-    
+    uint64 speed_los;
+    uint64 last_speed_los;
+
+    uint64 speed_mos;
+    uint64 last_speed_mos;
+
+    uint64 speed_nos;
+    uint64 last_speed_nos;
+        
     /*Total wasted memory of los science last los variation*/
-    POINTER_SIZE_INT wast_los;
+    uint64 wast_los;
     /*Total wasted memory of mos science last los variation*/
-    POINTER_SIZE_INT wast_mos;
+    uint64 wast_mos;
 
-    POINTER_SIZE_INT current_dw;
+    uint64 current_dw;
     /*NOS survive size of last minor, this could be the least meaningful space unit when talking about tuning.*/
     POINTER_SIZE_INT current_ds;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Wed Jul 25 03:02:07 2007
@@ -26,6 +26,7 @@
 #include "../trace_forward/fspace.h"
 #include "../los/lspace.h"
 #include "../gen/gen.h"
+#include "../mark_sweep/gc_ms.h"
 #include "../common/space_tuner.h"
 
 Boolean IGNORE_FINREF = FALSE;
@@ -35,8 +36,10 @@
 static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj)
 {
   /*
-   * The first condition is for supporting switch between nongen and gen minor collection
-   * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi
+   * The first condition is for supporting switch between nongen and gen minor collection.
+   * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi.
+   * The second condition is for supporting partially forwarding NOS.
+   * In partially forwarding situation live objects in the non-forwarding half NOS will only be marked but not forwarded.
    */
   return obj_belongs_to_nos(p_obj) && !obj_is_marked_or_fw_in_oi(p_obj);
 }
@@ -51,10 +54,23 @@
 {
   return !obj_is_marked_in_vt(p_obj);
 }
-// clear the two least significant bits of p_obj first
+
+#ifdef ONLY_SSPACE_IN_HEAP
+extern Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
+static inline Boolean obj_is_dead_in_unique_sweep_gc(Partial_Reveal_Object * p_obj)
+{
+  return !obj_is_marked_in_table(p_obj);
+}
+#endif
+
 static inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj)
 {
   assert(p_obj);
+
+#ifdef ONLY_SSPACE_IN_HEAP
+  return obj_is_dead_in_unique_sweep_gc(p_obj);
+#endif
+
   if(gc_match_kind(gc, MINOR_COLLECTION)){
     if(gc_is_gen_mode())
       return obj_is_dead_in_gen_minor_gc(p_obj);
@@ -68,11 +84,16 @@
 static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj)
 {
   if(!obj_belongs_to_nos(p_obj)) return FALSE;
-  return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+  return forward_first_half ? (p_obj < object_forwarding_boundary) : (p_obj>=object_forwarding_boundary);
 }
 static inline Boolean obj_need_move(GC *gc, Partial_Reveal_Object *p_obj)
 {
   assert(!gc_obj_is_dead(gc, p_obj));
+
+#ifdef ONLY_SSPACE_IN_HEAP
+  Sspace *sspace = gc_ms_get_sspace((GC_MS*)gc);
+  return sspace->move_object;
+#endif
   
   if(gc_is_gen_mode() && gc_match_kind(gc, MINOR_COLLECTION))
     return fspace_obj_to_be_forwarded(p_obj);
@@ -85,22 +106,23 @@
 {
   finref_reset_repset(gc);
   pool_iterator_init(pool);
-  while(Vector_Block *block = pool_iterator_next(pool)){
+  Vector_Block *block = pool_iterator_next(pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF*)iter;
+      REF *p_ref = (REF*)iter;
       Partial_Reveal_Object* p_obj = read_slot(p_ref);
       if(*p_ref && obj_need_move(gc, p_obj))
         finref_repset_add_entry(gc, p_ref);
     }
+    block = pool_iterator_next(pool);
   }
   finref_put_repset(gc);
 }
 
-static inline void fallback_update_fw_ref(REF* p_ref)
+static inline void fallback_update_fw_ref(REF *p_ref)
 {
-  if(!IS_FALLBACK_COMPACTION)
-    return;
+  assert(IS_FALLBACK_COMPACTION);
   
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
@@ -120,11 +142,12 @@
   
   gc_reset_finalizable_objects(gc);
   pool_iterator_init(obj_with_fin_pool);
-  while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){
+  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
+  while(block){
     unsigned int block_has_ref = 0;
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF *)iter;
+      REF *p_ref = (REF*)iter;
       if(IS_FALLBACK_COMPACTION)
         fallback_update_fw_ref(p_ref);  // in case that this collection is FALLBACK_COLLECTION
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
@@ -143,10 +166,12 @@
     }
     if(!block_has_ref)
       vector_block_clear(block);
+    
+    block = pool_iterator_next(obj_with_fin_pool);
   }
   gc_put_finalizable_objects(gc);
   
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_add_repset_from_pool(gc, obj_with_fin_pool);
 }
 
@@ -155,15 +180,16 @@
 extern void trace_obj_in_normal_marking(Collector *collector, void *p_obj);
 extern void trace_obj_in_fallback_marking(Collector *collector, void *p_ref);
 extern void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj);
+extern void trace_obj_in_ms_marking(Collector *collector, void *p_obj);
 
 
 typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj);
-// clear the two least significant bits of p_obj first
-// add p_ref to repset
-static inline void resurrect_obj_tree(Collector *collector, REF* p_ref)
+
+// Resurrect the obj tree whose root is the obj which p_ref points to
+static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
 {
   GC *gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
+  GC_Metadata *metadata = gc->metadata;
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   assert(p_obj && gc_obj_is_dead(gc, p_obj));
   
@@ -182,7 +208,7 @@
       trace_object = trace_obj_in_space_tune_marking;
       unsigned int obj_size = vm_object_size(p_obj);
 #ifdef USE_32BITS_HASHCODE
-      obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0;
+      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
 #endif
       if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
         collector->non_los_live_obj_size += obj_size;
@@ -194,9 +220,12 @@
       trace_object = trace_obj_in_normal_marking;
     }
     obj_mark_in_vt(p_obj);
-  } else {
-    assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+  } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
     trace_object = trace_obj_in_fallback_marking;
+  } else {
+    assert(gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION));
+    p_ref_or_obj = p_obj;
+    trace_object = trace_obj_in_ms_marking;
   }
   
   collector->trace_stack = free_task_pool_get_entry(metadata);
@@ -208,11 +237,12 @@
   while(task_block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
     while(!vector_block_iterator_end(task_block, iter)){
-      void* p_ref_or_obj = (void *)*iter;
+      void *p_ref_or_obj = (void*)*iter;
       assert((gc_match_kind(gc, MINOR_COLLECTION | FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj)
-              || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj));
+              || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj)
+              || (gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION) && p_ref_or_obj));
       trace_object(collector, p_ref_or_obj);
-      if(collector->result == FALSE)  break; /* force return */
+      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
       
       iter = vector_block_iterator_advance(task_block, iter);
     }
@@ -244,13 +274,15 @@
   
   DURING_RESURRECTION = TRUE;
   
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_reset_repset(gc);
+  
   pool_iterator_init(finalizable_obj_pool);
-  while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){
+  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF *)iter;
+      REF *p_ref = (REF*)iter;
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
       
@@ -258,7 +290,7 @@
        * Because it is outside heap, we can't update in ref fixing.
        * In minor collection p_ref of the root dead obj is automatically updated while tracing.
        */
-      if(!gc_match_kind(gc, MINOR_COLLECTION))
+      if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
         finref_repset_add_entry(gc, p_ref);
       
       /* Perhaps obj has been resurrected by previous resurrections */
@@ -275,9 +307,13 @@
         return; /* force return */
       }
     }
+    
+    block = pool_iterator_next(finalizable_obj_pool);
   }
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_put_repset(gc);
+  
   metadata->pending_finalizers = TRUE;
   
   DURING_RESURRECTION = FALSE;
@@ -287,16 +323,17 @@
 
 static void identify_dead_refs(GC *gc, Pool *pool)
 {
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_reset_repset(gc);
   pool_iterator_init(pool);
-  while(Vector_Block *block = pool_iterator_next(pool)){
+  Vector_Block *block = pool_iterator_next(pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF*)iter;
+      REF *p_ref = (REF*)iter;
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
-      REF* p_referent_field = obj_get_referent_field(p_obj);
+      REF *p_referent_field = obj_get_referent_field(p_obj);
       if(IS_FALLBACK_COMPACTION)
         fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
@@ -306,20 +343,24 @@
         continue;
       }
       if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
-        if(obj_need_move(gc, p_referent))
+        if(obj_need_move(gc, p_referent)){
           if(gc_match_kind(gc, MINOR_COLLECTION)){
             assert(obj_is_fw_in_oi(p_referent));
             write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent)));
           } else {
             finref_repset_add_entry(gc, p_referent_field);
           }
+        }
         *p_ref = (REF)NULL;
         continue;
       }
       *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
     }
+    
+    block = pool_iterator_next(pool);
   }
-  if(!gc_match_kind(gc, MINOR_COLLECTION)){
+  
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, pool);
   }
@@ -347,7 +388,7 @@
 
 /*
  * The reason why we don't use identify_dead_refs() to implement this function is
- * that we will differentiate phanref from softref & weakref in the future.
+ * that we will differentiate phanref from weakref in the future.
  */
 static void identify_dead_phanrefs(Collector *collector)
 {
@@ -355,17 +396,18 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *phanref_pool = metadata->phanref_pool;
   
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_reset_repset(gc);
 //  collector_reset_repset(collector);
   pool_iterator_init(phanref_pool);
-  while(Vector_Block *block = pool_iterator_next(phanref_pool)){
+  Vector_Block *block = pool_iterator_next(phanref_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
       Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
       assert(p_obj);
-      REF* p_referent_field = obj_get_referent_field(p_obj);
+      REF *p_referent_field = obj_get_referent_field(p_obj);
       if(IS_FALLBACK_COMPACTION)
       fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
@@ -376,7 +418,7 @@
       }
       if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
         if(obj_need_move(gc, p_referent))
-           if(gc_match_kind(gc, MINOR_COLLECTION)){
+          if(gc_match_kind(gc, MINOR_COLLECTION)){
             assert(obj_is_fw_in_oi(p_referent));
             write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent)));
           } else {
@@ -394,9 +436,10 @@
        * resurrect_obj_tree(collector, p_referent_field);
        */
     }
+    block = pool_iterator_next(phanref_pool);
   }
 //  collector_put_repset(collector);
-  if(!gc_match_kind(gc, MINOR_COLLECTION)){
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, phanref_pool);
   }
@@ -407,7 +450,8 @@
   Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
   Pool *free_pool = gc->finref_metadata->free_pool;
   
-  while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){
+  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       assert(*iter);
@@ -417,6 +461,7 @@
     }
     vector_block_clear(block);
     pool_put_entry(free_pool, block);
+    block = pool_get_entry(finalizable_obj_pool);
   }
 }
 
@@ -424,7 +469,8 @@
 {
   Pool *free_pool = gc->finref_metadata->free_pool;
   
-  while(Vector_Block *block = pool_get_entry(ref_pool)){
+  Vector_Block *block = pool_get_entry(ref_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
@@ -434,6 +480,7 @@
     }
     vector_block_clear(block);
     pool_put_entry(free_pool, block);
+    block = pool_get_entry(ref_pool);
   }
 }
 
@@ -453,8 +500,11 @@
   put_dead_weak_refs_to_vm(gc, metadata->weakref_pool);
   put_dead_weak_refs_to_vm(gc, metadata->phanref_pool);
   
-  if(/*IS_FALLBACK_COMPACTION && */!pool_is_empty(metadata->fallback_ref_pool))
+  /* This is a major collection after resurrection fallback */
+  if(!pool_is_empty(metadata->fallback_ref_pool)){
     put_dead_weak_refs_to_vm(gc, metadata->fallback_ref_pool);
+  }
+  
   metadata->pending_weakrefs = TRUE;
 }
 
@@ -464,15 +514,13 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
   Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
-  Vector_Block *obj_with_fin_block = pool_get_entry(obj_with_fin_pool);
-  assert(obj_with_fin_block);
-  
-  Boolean pending_finalizers = FALSE;
-  
-  while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){
+  Vector_Block *obj_with_fin_block = finref_get_free_block(gc);
+    
+  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF*)iter;
+      REF *p_ref = (REF*)iter;
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
       /* Perhaps obj has been resurrected by previous resurrections */
@@ -482,12 +530,14 @@
           p_obj = read_slot(p_ref);
         }
       }
-      gc_add_finalizer(gc, obj_with_fin_block, p_obj);  // Perhaps p_obj has been forwarded, so we use *p_ref rather than p_obj
+      /* Perhaps obj_with_fin_block has been allocated with a new free block if it is full */
+      obj_with_fin_block = gc_add_finalizer(gc, obj_with_fin_block, p_obj);
     }
+    block = pool_get_entry(finalizable_obj_pool);
   }
   
   pool_put_entry(obj_with_fin_pool, obj_with_fin_block);
-  metadata->pending_finalizers = pending_finalizers;
+  metadata->pending_finalizers = FALSE;
 }
 
 static void dead_weak_refs_fallback(GC *gc, Pool *ref_pool)
@@ -497,22 +547,26 @@
   Pool *fallback_ref_pool = metadata->fallback_ref_pool;
   
   Vector_Block *fallback_ref_block = finref_get_free_block(gc);
-  while(Vector_Block *block = pool_get_entry(ref_pool)){
+  Vector_Block *block = pool_get_entry(ref_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Partial_Reveal_Object *p_obj = read_slot((REF*)iter);
+      /* Perhaps fallback_ref_block has been allocated with a new free block if it is full */
       if(p_obj)
-        finref_add_fallback_ref(gc, fallback_ref_block, p_obj);
+        fallback_ref_block = finref_add_fallback_ref(gc, fallback_ref_block, p_obj);
       iter = vector_block_iterator_advance(block, iter);
     }
     vector_block_clear(block);
     pool_put_entry(free_pool, block);
+    block = pool_get_entry(ref_pool);
   }
   
   pool_put_entry(fallback_ref_pool, fallback_ref_block);
 }
 
-/* Record softrefs and weakrefs whose referents are dead.
+/* Record softrefs and weakrefs whose referents are dead
+ * so that we can update their addr and put them to VM.
  * In fallback collection these refs will not be considered for enqueueing again,
  * since their referent fields have been cleared by identify_dead_refs().
  */
@@ -523,17 +577,21 @@
   if(!softref_pool_is_empty(gc) || !weakref_pool_is_empty(gc))
     metadata->pending_weakrefs = TRUE;
   
+  /* We only use fallback_ref_pool in resurrection fallback so it must be empty */
+  assert(pool_is_empty(metadata->fallback_ref_pool));
+  
   dead_weak_refs_fallback(gc, metadata->softref_pool);
   dead_weak_refs_fallback(gc, metadata->weakref_pool);
   
   gc_clear_weakref_pools(gc);
 }
 
+/* Deal with resurrection fallback */
 static void resurrection_fallback_handler(GC *gc)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
   
-  /* Repset pool should be empty, because we don't add anthing to this pool in Minor Collection. */
+  /* Repset pool should be empty, because we don't add anything to this pool in Minor Collection. */
   assert(pool_is_empty(metadata->repset_pool));
   
   finalizable_objs_fallback(gc);
@@ -571,6 +629,7 @@
   gc_set_weakref_sets(gc);
   gc_clear_weakref_pools(gc);
 }
+
 void gc_put_finref_to_vm(GC *gc)
 {
   put_dead_refs_to_vm(gc);
@@ -582,6 +641,7 @@
   Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool;
   Pool *free_pool = gc->finref_metadata->free_pool;
   
+  /* Because we are manipulating obj_with_fin_pool, GC lock must be hold in case that GC happens */
   vm_gc_lock_enum();
   /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer
    * could be fixed as this:
@@ -591,7 +651,9 @@
   lock(gc->mutator_list_lock);
   gc_set_obj_with_fin(gc);
   unlock(gc->mutator_list_lock);
-  while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
+  
+  Vector_Block *block = pool_get_entry(obj_with_fin_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
@@ -601,22 +663,25 @@
     }
     vector_block_clear(block);
     pool_put_entry(free_pool, block);
+    block = pool_get_entry(obj_with_fin_pool);
   }
+  
   vm_gc_unlock_enum();
 }
 
 static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
 {
-  while(Vector_Block *block = pool_get_entry(pool)){
+  Vector_Block *block = pool_get_entry(pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      REF* p_ref = (REF*)iter;
+      REF *p_ref = (REF*)iter;
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
-      REF* p_referent_field = obj_get_referent_field(p_obj);
+      REF *p_referent_field = obj_get_referent_field(p_obj);
       if(IS_FALLBACK_COMPACTION)
         fallback_update_fw_ref(p_referent_field);
-      Partial_Reveal_Object* p_referent = read_slot(p_referent_field);
+      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
       
       if(!p_referent){  // referent field has been cleared
         *p_ref = (REF)NULL;
@@ -635,6 +700,7 @@
       }
       *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
     }
+    block = pool_get_entry(pool);
   }
 }
 
@@ -642,27 +708,40 @@
 {
   Finref_Metadata *metadata = gc->finref_metadata;
   
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_reset_repset(gc);
   update_referent_field_ignore_finref(gc, metadata->softref_pool);
   update_referent_field_ignore_finref(gc, metadata->weakref_pool);
   update_referent_field_ignore_finref(gc, metadata->phanref_pool);
-  if(!gc_match_kind(gc, MINOR_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
     finref_put_repset(gc);
 }
 
-static void move_compaction_update_ref(GC *gc, REF* p_ref)
-{
-  /* If p_ref belongs to heap, it must be a referent field pointer */
-  if(address_belongs_to_gc_heap((void *)p_ref, gc) && (space_of_addr(gc, p_ref))->move_object){
+extern void* los_boundary;
+/* Move compaction needs special treament when updating referent field */
+static inline void move_compaction_update_ref(GC *gc, REF *p_ref)
+{
+  /* There are only two kinds of p_ref being added into finref_repset_pool:
+   * 1. p_ref is in a vector block from one finref pool;
+   * 2. p_ref is a referent field.
+   * So if p_ref belongs to heap, it must be a referent field pointer.
+   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
+   */
+//  if(address_belongs_to_gc_heap(p_ref, gc) && !address_belongs_to_space(p_ref, gc_get_los((GC_Gen*)gc))){ 
+// && space_of_addr(gc, p_ref)->move_object //comment this out because all spaces are movable in major collection.
+  if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
     unsigned int offset = get_gc_referent_offset();
-    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_ref - offset);
+    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
     Partial_Reveal_Object *p_new_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_old_ref));
     p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
   }
-  Partial_Reveal_Object* p_obj = read_slot(p_ref);
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
   assert(space_of_addr(gc, (void*)p_obj)->move_object);
-  *p_ref = obj_get_fw_in_table(p_obj);
+//  if(obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc)))
+  if(p_obj < los_boundary)
+    write_slot(p_ref , obj_get_fw_in_oi(p_obj));
+  else
+    *p_ref = obj_get_fw_in_table(p_obj);
 }
 
 extern Boolean IS_MOVE_COMPACT;
@@ -671,7 +750,7 @@
 static void destructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
-  REF* p_ref;
+  REF *p_ref;
   Partial_Reveal_Object *p_obj;
   
   /* NOTE:: this is destructive to the root sets. */
@@ -703,12 +782,13 @@
 static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
-  REF* p_ref;
+  REF *p_ref;
   Partial_Reveal_Object *p_obj;
   
   /* NOTE:: this is nondestructive to the root sets. */
   pool_iterator_init(pool);
-  while(Vector_Block *repset = pool_iterator_next(pool)){
+  Vector_Block *repset = pool_iterator_next(pool);
+  while(repset){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
     for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
       if(pointer_addr_in_pool)
@@ -725,6 +805,7 @@
         move_compaction_update_ref(gc, p_ref);
       }
     }
+    repset = pool_iterator_next(pool);
   }
 }
 
@@ -732,13 +813,15 @@
 {
   assert(!gc_match_kind(gc, MINOR_COLLECTION));
   
-  Finref_Metadata* metadata = gc->finref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   Pool *repset_pool = metadata->repset_pool;
   Pool *fallback_ref_pool = metadata->fallback_ref_pool;
   
   destructively_fix_finref_pool(gc, repset_pool, TRUE);
-  if(IS_FALLBACK_COMPACTION && !pool_is_empty(fallback_ref_pool))
+  if(!pool_is_empty(fallback_ref_pool)){
+    assert(IS_FALLBACK_COMPACTION);
     nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE);
+  }
 }
 
 void gc_activate_finref_threads(GC *gc)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Wed Jul 25 03:02:07 2007
@@ -47,7 +47,7 @@
  * }
  */
 
-inline REF* obj_get_referent_field(Partial_Reveal_Object *p_obj)
+inline REF *obj_get_referent_field(Partial_Reveal_Object *p_obj)
 {
   assert(p_obj);
   return (REF*)(( Byte*)p_obj+get_gc_referent_offset());

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Wed Jul 25 03:02:07 2007
@@ -118,8 +118,8 @@
 }
 
 
-/* called when there is no Vector_Block in finref_metadata->free_pool
- * extend the pool by a segment
+/* Extending the pool by a segment
+ * Called when there is no vector block in finref_metadata->free_pool
  */
 Vector_Block *finref_metadata_extend(void)
 {
@@ -157,13 +157,6 @@
   return block;
 }
 
-/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata
- * shrink the free pool by half
- */
-static void finref_metadata_shrink(GC *gc)
-{
-}
-
 
 /* reset obj_with_fin vector block of each mutator */
 static void gc_reset_obj_with_fin(GC *gc)
@@ -216,7 +209,7 @@
   unsigned int num_active_collectors = gc->num_active_collectors;
   for(unsigned int i = 0; i < num_active_collectors; i++)
   {
-    Collector* collector = gc->collectors[i];
+    Collector *collector = gc->collectors[i];
     pool_put_entry(metadata->softref_pool, collector->softref_set);
     pool_put_entry(metadata->weakref_pool, collector->weakref_set);
     pool_put_entry(metadata->phanref_pool, collector->phanref_set);
@@ -241,7 +234,9 @@
   assert(metadata->finalizable_obj_set == NULL);
   assert(metadata->repset == NULL);
   
-  while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
+  /* Extract empty blocks in obj_with_fin_pool and put them into free_pool */
+  Vector_Block *block = pool_get_entry(obj_with_fin_pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     if(vector_block_iterator_end(block, iter)){
       vector_block_clear(block);
@@ -249,6 +244,7 @@
     } else {
       pool_put_entry(finalizable_obj_pool, block);
     }
+    block = pool_get_entry(obj_with_fin_pool);
   }
   assert(pool_is_empty(obj_with_fin_pool));
   metadata->obj_with_fin_pool = finalizable_obj_pool;
@@ -258,59 +254,60 @@
 }
 
 
-static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, POINTER_SIZE_INT value)
+static inline Vector_Block *finref_metadata_add_entry(GC *gc, Vector_Block *vector_block_in_use, Pool *pool, POINTER_SIZE_INT value)
 {
   assert(vector_block_in_use);
+  assert(!vector_block_is_full(vector_block_in_use));
   assert(value);
-
-  Vector_Block* block = vector_block_in_use;
-  vector_block_add_entry(block, value);
   
-  if(!vector_block_is_full(block)) return;
+  vector_block_add_entry(vector_block_in_use, value);
   
-  pool_put_entry(pool, block);
-  vector_block_in_use = finref_get_free_block(gc);
+  if(!vector_block_is_full(vector_block_in_use))
+    return vector_block_in_use;
+  
+  pool_put_entry(pool, vector_block_in_use);
+  return finref_get_free_block(gc);
 }
 
 void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *p_obj)
 {
   GC *gc = mutator->gc;
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
+  mutator->obj_with_fin = finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
 }
 
 /* This function is only used by resurrection fallback */
-void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj)
+Vector_Block *gc_add_finalizer(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_obj)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
+  return finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
 }
 
 void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *p_obj)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
+  metadata->finalizable_obj_set = finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
 }
 
 void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref)
 {
   GC *gc = collector->gc;
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+  collector->softref_set = finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
 }
 
 void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref)
 {
   GC *gc = collector->gc;
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+  collector->weakref_set = finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
 }
 
 void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref)
 {
   GC *gc = collector->gc;
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+  collector->phanref_set = finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
 }
 
 void finref_repset_add_entry(GC *gc, REF* p_ref)
@@ -318,29 +315,32 @@
   assert(*p_ref);
   assert(read_slot(p_ref));
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, metadata->repset, metadata->repset_pool, (POINTER_SIZE_INT)p_ref);
+  metadata->repset = finref_metadata_add_entry(gc, metadata->repset, metadata->repset_pool, (POINTER_SIZE_INT)p_ref);
 }
 
 /* This function is only used by resurrection fallback */
-void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj)
+Vector_Block *finref_add_fallback_ref(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_obj)
 {
   assert(p_obj);
   Finref_Metadata *metadata = gc->finref_metadata;
-  finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
+  return finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj));
 }
 
-static inline Boolean pool_has_no_ref(Pool *pool)
+static Boolean pool_has_no_ref(Pool *pool)
 {
   if(pool_is_empty(pool))
     return TRUE;
+  
   pool_iterator_init(pool);
-  while(Vector_Block *block = pool_iterator_next(pool)){
+  Vector_Block *block = pool_iterator_next(pool);
+  while(block){
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       if(*iter)
         return FALSE;
       iter = vector_block_iterator_advance(block, iter);
     }
+    block = pool_iterator_next(pool);
   }
   return TRUE;
 }
@@ -375,7 +375,7 @@
   return pool_has_no_ref(gc->finref_metadata->repset_pool);
 }
 
-static inline void finref_metadata_clear_pool(Pool *pool)
+static void finref_metadata_clear_pool(Pool *pool)
 {
   while(Vector_Block* block = pool_get_entry(pool))
   {
@@ -389,9 +389,4 @@
   finref_metadata_clear_pool(gc->finref_metadata->softref_pool);
   finref_metadata_clear_pool(gc->finref_metadata->weakref_pool);
   finref_metadata_clear_pool(gc->finref_metadata->phanref_pool);
-}
-
-void gc_clear_finref_repset_pool(GC *gc)
-{
-  finref_metadata_clear_pool(gc->finref_metadata->repset_pool);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h Wed Jul 25 03:02:07 2007
@@ -34,8 +34,8 @@
   
   Pool *free_pool;                              // list of free buffers for the five pools below
   
-  Pool *obj_with_fin_pool;                      // list of objects that have finalizer;
-                                                // these objects are added in when they are allocated
+  Pool *obj_with_fin_pool;                      // list of objects that have finalizers
+                                                // these objects are added in when they are being allocated
   Pool *finalizable_obj_pool;                   // temporary buffer for finalizable objects identified during one single GC
   
   Pool *softref_pool;                           // temporary buffer for soft references identified during one single GC
@@ -46,13 +46,13 @@
   
   Pool *fallback_ref_pool;                      // temporary buffer for weakref needing to be put to vm when resurrection fallback happens
   
-  Vector_Block *finalizable_obj_set;            // buffer for finalizable_objects_pool
+  Vector_Block *finalizable_obj_set;            // buffer for finalizable_obj_pool
   Vector_Block *repset;                         // buffer for repset_pool
   
   Boolean pending_finalizers;                   // there are objects waiting to be finalized
   Boolean pending_weakrefs;                     // there are weak references waiting to be enqueued
   
-  unsigned int gc_referent_offset;              // the referent field's offset in Reference Class
+  unsigned int gc_referent_offset;              // the referent field's offset in Reference Class; it is a constant during VM's liftime
 }Finref_Metadata;
 
 extern unsigned int get_gc_referent_offset(void);
@@ -68,13 +68,13 @@
 extern void gc_reset_finref_metadata(GC *gc);
 
 extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref);
-extern void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *ref);
+extern Vector_Block *gc_add_finalizer(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *ref);
 extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref);
 extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref);
 extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref);
 extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref);
 extern void finref_repset_add_entry(GC *gc, REF* ref);
-extern void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_ref);
+extern Vector_Block *finref_add_fallback_ref(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_ref);
 
 extern Boolean obj_with_fin_pool_is_empty(GC *gc);
 extern Boolean finalizable_obj_pool_is_empty(GC *gc);
@@ -86,6 +86,7 @@
 extern void gc_clear_weakref_pools(GC *gc);
 
 extern Vector_Block *finref_metadata_extend(void);
+/* Every place requesting a free vector block in finref should call this function */
 inline Vector_Block *finref_get_free_block(GC *gc)
 {
   Vector_Block *block = pool_get_entry(gc->finref_metadata->free_pool);
@@ -104,7 +105,8 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   
   assert(!metadata->finalizable_obj_set);
-  metadata->finalizable_obj_set = pool_get_entry(metadata->free_pool);
+  metadata->finalizable_obj_set = finref_get_free_block(gc);
+  assert(metadata->finalizable_obj_set);
 }
 /* called after loop of recording finalizable objects */
 inline void gc_put_finalizable_objects(GC *gc)
@@ -121,7 +123,8 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   
   assert(!metadata->repset);
-  metadata->repset = pool_get_entry(metadata->free_pool);
+  metadata->repset = finref_get_free_block(gc);
+  assert(metadata->repset);
 }
 /* called after loop of recording repointed reference */
 inline void finref_put_repset(GC *gc)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Wed Jul 25 03:02:07 2007
@@ -25,6 +25,7 @@
 #include "../verify/verify_live_heap.h"
 #include "../common/space_tuner.h"
 #include "../common/compressed_ref.h"
+
 #ifdef USE_32BITS_HASHCODE
 #include "../common/hashcode.h"
 #endif
@@ -231,9 +232,6 @@
                                 space_committed_size((Space*)gc_gen->mos) +
                                 space_committed_size((Space*)gc_gen->los);
   
-  set_native_finalizer_thread_flag(!IGNORE_FINREF);
-  set_native_ref_enqueue_thread_flag(!IGNORE_FINREF);
-  
   return;
 }
 
@@ -244,8 +242,8 @@
   Space* los = (Space*)gc_gen->los;
 
   POINTER_SIZE_INT nos_size = space_committed_size(nos);
-  POINTER_SIZE_INT mos_size = space_committed_size(nos);
-  POINTER_SIZE_INT los_size = space_committed_size(nos);
+  POINTER_SIZE_INT mos_size = space_committed_size(mos);
+  POINTER_SIZE_INT los_size = space_committed_size(los);
 
   void* nos_start = nos->heap_start;
   void* mos_start = mos->heap_start;
@@ -270,11 +268,10 @@
 Space* gc_get_nos(GC_Gen* gc){ return (Space*)gc->nos;}
 Space* gc_get_mos(GC_Gen* gc){ return (Space*)gc->mos;}
 Space* gc_get_los(GC_Gen* gc){ return (Space*)gc->los;}
-Space* gc_get_pos(GC_Gen* gc) { return NULL; }
+
 void gc_set_nos(GC_Gen* gc, Space* nos){ gc->nos = (Fspace*)nos;}
 void gc_set_mos(GC_Gen* gc, Space* mos){ gc->mos = (Mspace*)mos;}
 void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
-void gc_set_pos(GC_Gen* gc, Space* pos) {}
 
 void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);}
 void* nos_alloc(unsigned size, Allocator *allocator){return fspace_alloc(size, allocator);}
@@ -296,6 +293,9 @@
   else
     gc->collect_kind = MINOR_COLLECTION;
 
+#ifdef ONLY_SSPACE_IN_HEAP
+  gc->collect_kind = UNIQUE_SWEEP_COLLECTION;
+#endif
   return;
 }
 
@@ -372,15 +372,14 @@
   Fspace* nos = gc->nos;
   Lspace* los = gc->los;
   /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously.
-   *Or, we must adjust heap size
-   */
+   *Or, we must adjust heap size */
   static unsigned int tolerate = 0;
 
   POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size;
   assert(heap_total_size == gc->committed_heap_size);
 
-  assert(nos->surviving_size == 0);  
-  POINTER_SIZE_INT heap_surviving_size = mos->surviving_size + los->surviving_size; 
+  assert(nos->last_surviving_size == 0);  
+  POINTER_SIZE_INT heap_surviving_size = (POINTER_SIZE_INT)(mos->period_surviving_size + los->period_surviving_size);
   assert(heap_total_size > heap_surviving_size);
 
   float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size;
@@ -428,7 +427,8 @@
 }
 
 Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
-
+static unsigned int mspace_num_used_blocks_before_minor;
+static unsigned int mspace_num_used_blocks_after_minor;
 void gc_gen_reclaim_heap(GC_Gen* gc)
 { 
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
@@ -442,13 +442,28 @@
   
   if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
     /* FIXME:: move_object is only useful for nongen_slide_copy */
-    gc->mos->move_object = FALSE;
+    gc->mos->move_object = 0;
+    /* This is for compute mspace->last_alloced_size */
+
+    mspace_num_used_blocks_before_minor = mspace->free_block_idx - mspace->first_block_idx;
     fspace_collection(gc->nos);
-    gc->mos->move_object = TRUE;      
+    mspace_num_used_blocks_after_minor = mspace->free_block_idx - mspace->first_block_idx;
+    assert( mspace_num_used_blocks_before_minor <= mspace_num_used_blocks_after_minor );
+    mspace->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mspace_num_used_blocks_after_minor - mspace_num_used_blocks_before_minor );
+
+    /*If the current minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS*/
+    if(gc->collect_result != FALSE)
+      lspace_collection(gc->los);
+
+    gc->mos->move_object = 1;      
   }else{
     /* process mos and nos together in one compaction */
+    gc->los->move_object = 1;
+
     mspace_collection(gc->mos); /* fspace collection is included */
     lspace_collection(gc->los);
+
+    gc->los->move_object = 0;
   }
 
   if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){
@@ -463,10 +478,12 @@
     gc->collect_kind = FALLBACK_COLLECTION;    
 
     if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc);
-
+    
+    gc->los->move_object = 1;
     mspace_collection(gc->mos); /* fspace collection is included */
     lspace_collection(gc->los);
-    
+    gc->los->move_object = 0;    
+
     IS_FALLBACK_COMPACTION = FALSE;
   }
   
@@ -478,13 +495,47 @@
   
   if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
 
-  /*Fixme: clear root set here to support verify.*/
+  /* FIXME:: clear root set here to support verify. */
 #ifdef COMPRESS_REFERENCE
   gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool);
 #endif
-
   assert(!gc->los->move_object);
   return;
+}
+
+void gc_gen_update_space_before_gc(GC_Gen *gc)
+{
+  /* Update before every GC to avoid the atomic operation in every fspace_alloc_block */
+  assert( gc->nos->free_block_idx >= gc->nos->first_block_idx );
+  gc->nos->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( gc->nos->free_block_idx - gc->nos->first_block_idx );
+
+  gc->nos->accumu_alloced_size += gc->nos->last_alloced_size;
+  gc->los->accumu_alloced_size += gc->los->last_alloced_size;
+}
+
+void gc_gen_update_space_after_gc(GC_Gen *gc)
+{
+  /* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */
+  if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+    gc->mos->accumu_alloced_size += gc->mos->last_alloced_size;
+    /* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated.
+     * But los->last_alloced_size must be reset, because it is accumulated. */
+    gc->los->last_alloced_size = 0;
+  /* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */
+  }else{
+    gc->mos->total_alloced_size += gc->mos->accumu_alloced_size;
+    gc->mos->last_alloced_size = 0;
+    gc->mos->accumu_alloced_size = 0;
+
+    gc->nos->total_alloced_size += gc->nos->accumu_alloced_size;
+    gc->nos->last_alloced_size = 0;
+    gc->nos->accumu_alloced_size = 0;
+
+    gc->los->total_alloced_size += gc->los->accumu_alloced_size;
+    gc->los->last_alloced_size = 0;
+    gc->los->accumu_alloced_size = 0;
+    
+  }
 }
 
 void gc_gen_iterate_heap(GC_Gen *gc)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Wed Jul 25 03:02:07 2007
@@ -155,11 +155,10 @@
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_mos(GC_Gen* gc);
 Space* gc_get_los(GC_Gen* gc);
-Space* gc_get_pos(GC_Gen* gc);
+
 void gc_set_nos(GC_Gen* gc, Space* nos);
 void gc_set_mos(GC_Gen* gc, Space* mos);
 void gc_set_los(GC_Gen* gc, Space* los);
-void gc_set_pos(GC_Gen* gc, Space* pos);
 
 unsigned int gc_get_processor_num(GC_Gen* gc);
 
@@ -173,7 +172,10 @@
 void gc_gen_assign_free_area_to_mutators(GC_Gen* gc);
 
 void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time);
-  
+
+void gc_gen_update_space_before_gc(GC_Gen* gc);
+void gc_gen_update_space_after_gc(GC_Gen* gc);
+
 void gc_gen_mode_adapt_init(GC_Gen *gc);
 
 void gc_gen_iterate_heap(GC_Gen *gc);
@@ -181,5 +183,6 @@
 extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?view=diff&rev=559382&r1=559381&r2=559382
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Wed Jul 25 03:02:07 2007
@@ -196,10 +196,11 @@
 
   POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace);
   POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace);
+  assert(nos_free_size == space_committed_size((Space*)fspace));
   POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
   if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE;
   if(!gc->force_gen_mode){
-    /*For major collection:*/
+    /*Major collection:*/
     if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){
       mspace->time_collections += pause_time;
   
@@ -212,8 +213,7 @@
       
       /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/
       if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){
-        POINTER_SIZE_INT major_surviving_size = space_committed_size((Space*)mspace) - mos_free_size;
-        survive_ratio = (float)major_surviving_size/(float)space_committed_size((Space*)mspace);
+        survive_ratio = (float)mspace->period_surviving_size/(float)mspace->committed_heap_size;
         mspace->survive_ratio = survive_ratio;
       }
       /*If there is no minor collection at all, we must give mspace expected threshold a reasonable value.*/
@@ -223,11 +223,14 @@
         *a conservative and reasonable number to avoid next fall back.
         *In fallback compaction, the survive_ratio of mspace must be 1.*/
       if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) fspace->survive_ratio = 1;
-    /*For minor collection:*/    
-    }else{
+
+    }
+    /*Minor collection:*/    
+    else
+    {
       /*Give a hint to mini_free_ratio. */
       if(fspace->num_collections == 1){
-        /*fixme: This is only set for tuning the first warehouse!*/
+        /*Fixme: This is only set for tuning the first warehouse!*/
         Tslow = pause_time / gc->survive_ratio;
         SMax = (POINTER_SIZE_INT)((float)(gc->committed_heap_size - gc->los->committed_heap_size) * ( 1 - gc->survive_ratio ));
         last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size;
@@ -237,6 +240,9 @@
       POINTER_SIZE_INT free_size_threshold;
 
       POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size;
+      /*If the first GC is caused by LOS, mspace->last_alloced_size should be smaller than this minor_surviving_size
+        *Because the last_total_free_size is not accurate.*/
+      if(fspace->num_collections != 1) assert(minor_surviving_size == mspace->last_alloced_size);
   
       float k = Tslow * fspace->num_collections/fspace->time_collections;
       float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));



Mime
View raw message