harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r637062 [2/3] - in /harmony/enhanced/drlvm/trunk: make/vm/ vm/gc_gen/src/common/ vm/gc_gen/src/finalizer_weakref/ vm/gc_gen/src/gen/ vm/gc_gen/src/jni/ vm/gc_gen/src/los/ vm/gc_gen/src/mark_compact/ vm/gc_gen/src/mark_sweep/ vm/gc_gen/src/m...
Date Fri, 14 Mar 2008 11:21:32 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp Fri Mar 14 04:21:27 2008
@@ -32,9 +32,9 @@
       if(!p_obj){  // reference has been cleared
         continue;
       }
-      if(IS_FALLBACK_COMPACTION) {
+      assert(p_obj->vt_raw);
+      if(collect_is_fallback()) {
           if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
-             //this is unreachable for VTable->jlc(p_obj), but needed by general weak roots
              assert(!obj_is_marked_in_vt(p_obj));
              assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
              p_obj = obj_get_fw_in_oi(p_obj);
@@ -48,8 +48,6 @@
   }
 }
 
-extern Boolean IS_MOVE_COMPACT;
-
 /* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
 void gc_update_weak_roots(GC *gc, Boolean double_fix)
 {
@@ -67,29 +65,33 @@
       if(!p_obj || !obj_need_move(gc, p_obj)){  // reference has been cleared or not moved
         continue;
       }
+      /* following code knows p_obj's space is movable. So mark-sweep is not considered below. */
+      if( collect_is_compact_move()){ /* move-compact uses offset table */
+        if( gc_has_los() && p_obj < los_boundary){
+            p_obj = obj_get_fw_in_oi(p_obj);
+        }else{ /* this is the case with unique move_compact */
+            p_obj = obj_get_fw_in_table(p_obj);
+        }
 
-      if(IS_MOVE_COMPACT){
-        assert(space_of_addr(gc, p_obj)->move_object);
-        *p_ref = obj_get_fw_in_table(p_obj);
-      } else if(gc_match_kind(gc, MC_COLLECTION)){
-        *p_ref = obj_get_fw_in_table(p_obj);
-      } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
+      } else if(collect_is_ms_compact()){ 
+        /* ms-compact does not move all live objects, and sometimes need double-fix */
         if(obj_is_fw_in_oi(p_obj)){
           p_obj = obj_get_fw_in_oi(p_obj);
           /* Only major collection in MS Gen GC might need double_fix.
            * Double fixing happens when both forwarding and compaction happen.
            */
           if(double_fix && obj_is_fw_in_oi(p_obj)){
-            assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+            assert(major_is_marksweep());
             p_obj = obj_get_fw_in_oi(p_obj);
             assert(address_belongs_to_gc_heap(p_obj, gc));
           }
-          *p_ref = p_obj;
         }
-      } else {
+      } else { /* minor collection or slide major compaction */
         assert(obj_is_fw_in_oi(p_obj));
-        *p_ref = obj_get_fw_in_oi(p_obj);
+        p_obj = obj_get_fw_in_oi(p_obj);
       }
+      
+      *p_ref = p_obj;
     }
   }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Fri Mar 14 04:21:27 2008
@@ -56,7 +56,7 @@
 
 static inline void fallback_update_fw_ref(REF *p_ref)
 {
-  assert(IS_FALLBACK_COMPACTION);
+  assert(collect_is_fallback());
   
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
@@ -82,8 +82,8 @@
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       REF *p_ref = (REF*)iter;
-      if(IS_FALLBACK_COMPACTION)
-        fallback_update_fw_ref(p_ref);  // in case that this collection is FALLBACK_COLLECTION
+      if(collect_is_fallback())
+        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       if(!p_obj)
         continue;
@@ -91,7 +91,7 @@
         gc_add_finalizable_obj(gc, p_obj);
         *p_ref = (REF)NULL;
       } else {
-        if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj)){
+        if(collect_is_minor() && obj_need_move(gc, p_obj)){
           assert(obj_is_fw_in_oi(p_obj));
           write_slot(p_ref, obj_get_fw_in_oi(p_obj));
         }
@@ -105,7 +105,7 @@
   }
   gc_put_finalizable_objects(gc);
   
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_add_repset_from_pool(gc, obj_with_fin_pool);
 }
 
@@ -135,23 +135,23 @@
   Trace_Object_Func trace_object;
   
   /* set trace_object() function */
-  if(gc_match_kind(gc, MINOR_COLLECTION)){
-    switch( MINOR_ALGO ){
-    case MINOR_NONGEN_FORWARD_POOL:
-      trace_object = trace_obj_in_nongen_fw;
-      break;
-    case MINOR_GEN_FORWARD_POOL:
-      trace_object = trace_obj_in_gen_fw;
-      break;
-    case MINOR_NONGEN_SEMISPACE_POOL:
-      trace_object = trace_obj_in_nongen_ss;
-      break;
-    case MINOR_GEN_SEMISPACE_POOL:
-      trace_object = trace_obj_in_gen_ss;
-      break;
-    default: assert(0);
+  if(collect_is_minor()){
+    if(gc_is_gen_mode()){
+      if(minor_is_forward())
+        trace_object = trace_obj_in_gen_fw;
+      else if(minor_is_semispace())
+        trace_object = trace_obj_in_gen_ss;
+      else 
+        assert(0);
+    }else{
+      if(minor_is_forward())
+        trace_object = trace_obj_in_nongen_fw;
+      else if(minor_is_semispace())
+        trace_object = trace_obj_in_nongen_ss;
+      else 
+        assert(0);
     }
-  } else if(gc_match_kind(gc, NORMAL_MAJOR_COLLECTION)){
+  } else if(collect_is_major_normal()){
     p_ref_or_obj = p_obj;
     if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
       trace_object = trace_obj_in_space_tune_marking;
@@ -165,18 +165,18 @@
       } else {
         collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
       }
-    } else if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP){
+    } else if(major_is_marksweep()){
       trace_object = trace_obj_in_ms_marking;
     } else {
       trace_object = trace_obj_in_normal_marking;
     }
-  } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
-    if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP)
+  } else if(collect_is_fallback()){
+    if(major_is_marksweep())
       trace_object = trace_obj_in_ms_fallback_marking;
     else
       trace_object = trace_obj_in_fallback_marking;
   } else {
-    assert(gc_match_kind(gc, MARK_SWEEP_GC));
+    assert(major_is_marksweep());
     p_ref_or_obj = p_obj;
     if(!gc_mark_is_concurrent())
       trace_object = trace_obj_in_ms_marking;
@@ -194,8 +194,8 @@
     POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
     while(!vector_block_iterator_end(task_block, iter)){
       void *p_ref_or_obj = (void*)*iter;
-      assert((gc_match_either_kind(gc, MINOR_COLLECTION|FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj)
-              || (gc_match_either_kind(gc, NORMAL_MAJOR_COLLECTION|MS_COLLECTION|MS_COMPACT_COLLECTION) && p_ref_or_obj));
+      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
+              || ((collect_is_major_normal()||major_is_marksweep()) && p_ref_or_obj));
       trace_object(collector, p_ref_or_obj);
       if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
       
@@ -240,7 +240,7 @@
       
       /* Perhaps obj has been resurrected by previous resurrections */
       if(!gc_obj_is_dead(gc, p_obj)){
-        if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj))
+        if(collect_is_minor() && obj_need_move(gc, p_obj))
           write_slot(p_ref, obj_get_fw_in_oi(p_obj));
         continue;
       }
@@ -248,7 +248,7 @@
       resurrect_obj_tree(collector, p_ref);
       if(collector->result == FALSE){
         /* Resurrection fallback happens */
-        assert(gc_match_kind(gc, MINOR_COLLECTION));
+        assert(collect_is_minor());
         return; /* force return */
       }
     }
@@ -260,7 +260,7 @@
    * Because it is outside heap, we can't update it in ref fixing.
    * In minor collection p_ref of the root dead obj is automatically updated while tracing.
    */
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_add_repset_from_pool(gc, finalizable_obj_pool);
   metadata->pending_finalizers = TRUE;
   
@@ -271,7 +271,7 @@
 
 static void identify_dead_refs(GC *gc, Pool *pool)
 {
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_reset_repset(gc);
 
   pool_iterator_init(pool);
@@ -283,7 +283,7 @@
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
       REF *p_referent_field = obj_get_referent_field(p_obj);
-      if(IS_FALLBACK_COMPACTION)
+      if(collect_is_fallback())
         fallback_update_fw_ref(p_referent_field);
         
       Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
@@ -297,7 +297,7 @@
       }
       if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
         if(obj_need_move(gc, p_referent)){
-          if(gc_match_kind(gc, MINOR_COLLECTION)){
+          if(collect_is_minor()){
             assert(obj_is_fw_in_oi(p_referent));
             Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
             write_slot(p_referent_field, p_new_referent);
@@ -308,7 +308,7 @@
               if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                 collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 
 
-          } else if(!gc_match_kind(gc, MS_COLLECTION)){
+          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
             finref_repset_add_entry(gc, p_referent_field);
           }
         }
@@ -324,7 +324,7 @@
     block = pool_iterator_next(pool);
   }
   
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION)){
+  if(collect_need_update_repset()){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, pool);
   }
@@ -333,7 +333,7 @@
 static void identify_dead_softrefs(Collector *collector)
 {
   GC *gc = collector->gc;
-  if(gc_match_kind(gc, MINOR_COLLECTION)){
+  if(collect_is_minor()){
     assert(softref_pool_is_empty(gc));
     return;
   }
@@ -360,7 +360,7 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *phanref_pool = metadata->phanref_pool;
   
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_reset_repset(gc);
 //  collector_reset_repset(collector);
   pool_iterator_init(phanref_pool);
@@ -372,17 +372,17 @@
       Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
       assert(p_obj);
       REF *p_referent_field = obj_get_referent_field(p_obj);
-      if(IS_FALLBACK_COMPACTION)
-      fallback_update_fw_ref(p_referent_field);
-      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
-      
+      if(collect_is_fallback())
+        fallback_update_fw_ref(p_referent_field);
+
+      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
       if(!p_referent){  // referent field has been cleared
         *p_ref = NULL;
         continue;
       }
       if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
-        if(obj_need_move(gc, p_referent))
-          if(gc_match_kind(gc, MINOR_COLLECTION)){
+        if(obj_need_move(gc, p_referent)){
+          if(collect_is_minor()){
             assert(obj_is_fw_in_oi(p_referent));
             Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
             write_slot(p_referent_field, p_new_referent);
@@ -390,9 +390,10 @@
               if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                 collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 
 
-          } else if(!gc_match_kind(gc, MS_COLLECTION)){
+          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
             finref_repset_add_entry(gc, p_referent_field);
           }
+        }
         *p_ref = (REF)NULL;
         continue;
       }
@@ -408,7 +409,7 @@
     block = pool_iterator_next(phanref_pool);
   }
 //  collector_put_repset(collector);
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION)){
+  if(collect_need_update_repset()){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, phanref_pool);
   }
@@ -588,7 +589,7 @@
   resurrect_finalizable_objects(collector);
   gc->collect_result = gc_collection_result(gc);
   if(!gc->collect_result){
-    assert(gc_match_kind(gc, MINOR_COLLECTION));
+    assert(collect_is_minor());
     resurrection_fallback_handler(gc);
     return;
   }
@@ -650,17 +651,17 @@
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
       REF *p_referent_field = obj_get_referent_field(p_obj);
-      if(IS_FALLBACK_COMPACTION)
+      if(collect_is_fallback())
         fallback_update_fw_ref(p_referent_field);
-      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
-      
+        
+      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
       if(!p_referent){  // referent field has been cleared
         *p_ref = (REF)NULL;
         continue;
       }
       if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
         if(obj_need_move(gc, p_referent))
-          if(gc_match_kind(gc, MINOR_COLLECTION)){
+          if(collect_is_minor()){
             assert(obj_is_fw_in_oi(p_referent));
             Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
             write_slot(p_referent_field, p_new_referent);
@@ -684,14 +685,14 @@
 {
   Finref_Metadata *metadata = gc->finref_metadata;
   
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_reset_repset(gc);
-  if(!gc_match_kind(gc, MS_COLLECTION)){
+  if(collect_move_object()){
     update_referent_field_ignore_finref(gc, metadata->softref_pool);
     update_referent_field_ignore_finref(gc, metadata->weakref_pool);
     update_referent_field_ignore_finref(gc, metadata->phanref_pool);
   }
-  if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+  if(collect_need_update_repset())
     finref_put_repset(gc);
 }
 
@@ -743,7 +744,7 @@
        * Double fixing happens when both forwarding and compaction happen.
        */
       if(double_fix && obj_is_fw_in_oi(p_new_ref)){
-        assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+        assert(major_is_marksweep());
         p_new_ref = obj_get_fw_in_oi(p_new_ref);
         assert(address_belongs_to_gc_heap(p_new_ref, gc));
       }
@@ -762,16 +763,14 @@
    * Double fixing happens when both forwarding and compaction happen.
    */
   if(double_fix && obj_is_fw_in_oi(p_obj)){
-    assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+    assert(major_is_marksweep());
     p_obj = obj_get_fw_in_oi(p_obj);
     assert(address_belongs_to_gc_heap(p_obj, gc));
   }
   write_slot(p_ref, p_obj);
 }
 
-extern Boolean IS_MOVE_COMPACT;
-
-/* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
+/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/
 static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
@@ -790,12 +789,12 @@
         p_ref = (REF*)iter;
       p_obj = read_slot(p_ref);
       
-      if(IS_MOVE_COMPACT){
+      if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */
         move_compaction_update_ref(gc, p_ref);
-      } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
+      } else if(collect_is_ms_compact()){
         if(obj_is_fw_in_oi(p_obj))
           moving_mark_sweep_update_ref(gc, p_ref, double_fix);
-      } else {
+      } else { /* major slide compact */
         assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
         write_slot(p_ref , obj_get_fw_in_oi(p_obj));
       }
@@ -806,7 +805,7 @@
 
 void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix)
 {
-  assert(!gc_match_kind(gc, MINOR_COLLECTION));
+  assert(!collect_is_minor());
   
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *repset_pool = metadata->repset_pool;
@@ -814,7 +813,7 @@
   
   nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix);
   if(!pool_is_empty(fallback_ref_pool)){
-    assert(IS_FALLBACK_COMPACTION);
+    assert(collect_is_fallback());
     nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix);
   }
 }
@@ -851,6 +850,8 @@
   finref_copy_pool(finalizable_obj_pool, finalizable_obj_pool_copy, gc);
   finref_copy_pool_to_rootset(gc, finalizable_obj_pool_copy);
 }
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Fri Mar 14 04:21:27 2008
@@ -71,7 +71,7 @@
   }
   switch(type){
     case SOFT_REFERENCE :
-      if(gc_match_kind(collector->gc, MINOR_COLLECTION))
+      if(collect_is_minor())
         scan_slot(collector, p_referent_field);
       else
         collector_add_softref(collector, p_obj);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Fri Mar 14 04:21:27 2008
@@ -426,6 +426,11 @@
   finref_metadata_clear_pool(gc->finref_metadata->phanref_pool);
 }
 
+void gc_clear_finref_repset_pool(GC* gc)
+{
+  finref_metadata_clear_pool(gc->finref_metadata->repset_pool);
+}
+
 Boolean finref_copy_pool(Pool *src_pool, Pool *dest_pool, GC *gc)
 {
   Vector_Block *dest_block = finref_get_free_block(gc);
@@ -440,4 +445,6 @@
   }
  return TRUE;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h Fri Mar 14 04:21:27 2008
@@ -91,6 +91,7 @@
 
 
 extern void gc_clear_weakref_pools(GC *gc);
+extern void gc_clear_finref_repset_pool(GC* gc);
 
 extern Vector_Block *finref_metadata_extend(void);
 /* Every place requesting a free vector block in finref should call this function */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Fri Mar 14 04:21:27 2008
@@ -42,15 +42,30 @@
 POINTER_SIZE_INT MIN_NOS_SIZE = 0;
 POINTER_SIZE_INT MAX_NOS_SIZE = 0;
 
-/* should clean up */
-unsigned int MINOR_ALGO = 0;
-unsigned int MAJOR_ALGO = 0;
-
 Boolean GEN_NONGEN_SWITCH = FALSE;
 
 Boolean JVMTI_HEAP_ITERATION = true;
 
-Boolean gen_mode;
+GC* gc_gen_create()
+{
+  GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));  
+  assert(gc);
+  memset(gc, 0, sizeof(GC_Gen));
+  return gc;
+}
+
+void gc_set_gen_mode(Boolean status)
+{
+  if(status){
+    gc_set_gen_flag(); 
+    gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
+  }else{
+    gc_clear_gen_flag();
+    gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
+  }
+ 
+  HelperClass_set_GenMode(status);   
+}
 
 #ifndef STATIC_NOS_MAPPING
 void* nos_boundary;
@@ -96,7 +111,7 @@
   determine_min_nos_size(gc_gen, min_heap_size);
   
   POINTER_SIZE_INT los_size = 0;
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+  if(major_is_marksweep())
     min_los_size_bytes = 0;
   else
     los_size = determine_los_size(min_heap_size);
@@ -202,7 +217,7 @@
 
 #endif  /* STATIC_NOS_MAPPING else */
 
-  HEAP_NULL = (POINTER_SIZE_INT)reserved_base;
+  HEAP_BASE = (POINTER_SIZE_INT)reserved_base;
   
   gc_gen->physical_start = physical_start;
   gc_gen->heap_start = reserved_base;
@@ -218,7 +233,7 @@
   gc_gen->num_collections = 0;
   gc_gen->time_collections = 0;
   gc_gen->blocks = (Block*)reserved_base;
-  gc_gen->force_major_collect = FALSE;
+  gc_gen->next_collect_force_major = FALSE;
   gc_gen->force_gen_mode = FALSE;
 
   max_heap_size_bytes = max_heap_size;
@@ -232,7 +247,7 @@
                                                 + space_committed_size(gc_get_mos(gc_gen))
                                                 + space_committed_size(gc_get_los(gc_gen));
   
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+  if(!major_is_marksweep()){
     Blocked_Space *nos = (Blocked_Space*)gc_get_nos(gc_gen);
     Blocked_Space *mos = (Blocked_Space*)gc_get_mos(gc_gen);
     /* Connect mos and nos, so that they can be compacted as one space */
@@ -264,7 +279,7 @@
   gc_nos_destruct(gc_gen);
   gc_mos_destruct(gc_gen);
 
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+  if(!major_is_marksweep()){
     los_size = (int)space_committed_size((Space*)gc_gen->los);
     gc_los_destruct(gc_gen);
   }
@@ -305,7 +320,7 @@
 void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size)
 {
   Space *nos;
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+  if(minor_is_semispace()){
     nos = (Space*)sspace_initialize((GC*)gc, start, nos_size, commit_size);
     nos_alloc = sspace_alloc;
   }else{
@@ -314,12 +329,11 @@
   }
   
   gc_set_nos(gc, nos);
-  nos->collect_algorithm = MINOR_ALGO;
 }
 
 void gc_nos_destruct(GC_Gen *gc)
 { 
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     sspace_destruct((Sspace*)gc->nos);
   else
     fspace_destruct((Fspace*)gc->nos); 
@@ -328,7 +342,7 @@
 void gc_mos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT mos_size, POINTER_SIZE_INT commit_size)
 {
   Space *mos;
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+  if(major_is_marksweep()){
     mos = (Space*)wspace_initialize((GC*)gc, start, mos_size, commit_size);
     mos_alloc = wspace_alloc;
   } else {
@@ -336,12 +350,11 @@
     mos_alloc = mspace_alloc;
   }
   gc_set_mos(gc, mos);
-  mos->collect_algorithm = MAJOR_ALGO;
 }
 
 void gc_mos_destruct(GC_Gen *gc)
 {
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+  if(major_is_marksweep())
     wspace_destruct((Wspace*)gc->mos);
   else
     mspace_destruct((Mspace*)gc->mos);
@@ -350,7 +363,7 @@
 void gc_los_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT los_size)
 {
   Space *los;
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+  if(major_is_marksweep()){
     assert(los_size == 0);
     los = NULL;
     los_alloc = wspace_alloc;
@@ -363,7 +376,7 @@
 
 void gc_los_destruct(GC_Gen *gc)
 {
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+  if(!major_is_marksweep())
     lspace_destruct((Lspace*)gc->los);
 }
 
@@ -372,102 +385,86 @@
 Boolean IGNORE_VTABLE_TRACING = FALSE;
 Boolean TRACE_JLC_VIA_VTABLE = FALSE;
 
-unsigned int gc_next_collection_kind(GC_Gen* gc)
+void gc_gen_decide_collection_kind(GC_Gen* gc, unsigned int cause)
 {
-  if(gc->force_major_collect || FORCE_FULL_COMPACT)
-    return MAJOR_COLLECTION;
+  if(gc->next_collect_force_major || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
+    collect_set_major_normal();
   else
-    return MINOR_COLLECTION;
-}
-
-
-void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
-{
-  /* this is for debugging. */
-  gc->last_collect_kind = gc->collect_kind;
-#if defined(USE_MARK_SWEEP_GC)
-  gc->collect_kind = MS_COLLECTION;
-#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
-  gc->collect_kind = MC_COLLECTION;
-#else
-  if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
-    gc->collect_kind = NORMAL_MAJOR_COLLECTION;
-  else
-    gc->collect_kind = MINOR_COLLECTION;
+    collect_set_minor();
     
-  if(IGNORE_VTABLE_TRACING || (gc->collect_kind == MINOR_COLLECTION))
+  if(IGNORE_VTABLE_TRACING || collect_is_minor())
     TRACE_JLC_VIA_VTABLE = FALSE;
   else
     TRACE_JLC_VIA_VTABLE = TRUE;
-#endif
+
   return;
 }
 
-void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo)
+GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los)
 {
-  if(!minor_algo){
-    MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;      
-    gc_disable_gen_mode();
+  GC_PROP = ALGO_POOL_SHARE | ALGO_DEPTH_FIRST;
   
-  }else{
+  /* set default GC properties for generational GC */
+  GC_PROP |= ALGO_HAS_NOS;   
+  
+  /* default is has LOS */
+  GC_PROP |= ALGO_HAS_LOS;
+  
+  Boolean use_default = FALSE;
+
+  if(minor_algo){
     string_to_upper(minor_algo);
      
-    if(!strcmp(minor_algo, "MINOR_NONGEN_FORWARD_POOL")){  
-      MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;
-      gc_disable_gen_mode();
-      
-    }else if(!strcmp(minor_algo, "MINOR_GEN_FORWARD_POOL")){
-      MINOR_ALGO = MINOR_GEN_FORWARD_POOL;
-      gc_enable_gen_mode();
-    
-    }else if(!strcmp(minor_algo, "MINOR_NONGEN_SEMISPACE_POOL")){
-      MINOR_ALGO = MINOR_NONGEN_SEMISPACE_POOL;
-      gc_disable_gen_mode();
-    
-    }else if(!strcmp(minor_algo, "MINOR_GEN_SEMISPACE_POOL")){
-      MINOR_ALGO = MINOR_GEN_SEMISPACE_POOL;
-      gc_enable_gen_mode();
-
+    if(!strcmp(minor_algo, "PARTIAL_FORWARD")){  
+      GC_PROP |= ALGO_COPY_FORWARD;
+    
+    }else if(!strcmp(minor_algo, "SEMI_SPACE")){
+      GC_PROP |= ALGO_COPY_SEMISPACE;
+    
     }else {
       WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
-      MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;      
-      gc_disable_gen_mode();    
+      use_default = TRUE;
     }
   }
+  if(!minor_algo || use_default)
+    GC_PROP |= ALGO_COPY_FORWARD;
   
-  if(!major_algo){
-    MAJOR_ALGO = MAJOR_COMPACT_MOVE;
-    
-  }else{
+
+  use_default = FALSE;
+
+  if(major_algo){
     string_to_upper(major_algo);
 
-    if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){
-      MAJOR_ALGO = MAJOR_COMPACT_SLIDE;
+    if(!strcmp(major_algo, "SLIDE_COMPACT")){
+      GC_PROP |= ALGO_COMPACT_SLIDE;
       
-    }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){
-      MAJOR_ALGO = MAJOR_COMPACT_MOVE;
+    }else if(!strcmp(major_algo, "MOVE_COMPACT")){
+      GC_PROP |= ALGO_COMPACT_MOVE;
+
+    }else if(!strcmp(major_algo, "MARK_SWEEP")){
+      GC_PROP |= ALGO_MARKSWEEP;
     
-    }else if(!strcmp(major_algo, "MAJOR_MARK_SWEEP")){
-      MAJOR_ALGO = MAJOR_MARK_SWEEP;
-      is_collector_local_alloc = FALSE;
     }else{
      WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
-      MAJOR_ALGO = MAJOR_COMPACT_MOVE;
-      
+     use_default = TRUE; 
     }
   }
   
-  return;
-  
+  if(!major_algo || use_default)
+      GC_PROP |= ALGO_COMPACT_MOVE;
+
+  GC* gc = gc_gen_create();
+
+  return gc; 
 }
 
-static Boolean nos_alloc_block(Space* space, Allocator* allocator)
+static Boolean nos_alloc_block(Space* nos, Allocator* allocator)
 {
   Boolean result;
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
-    result = sspace_alloc_block((Sspace*)space, allocator); 
+  if(minor_is_semispace())
+    result = sspace_alloc_block((Sspace*)nos, allocator); 
   else
-    result = fspace_alloc_block((Fspace*)space, allocator);   
+    result = fspace_alloc_block((Fspace*)nos, allocator);   
  
   return result;   
 }
@@ -495,9 +492,9 @@
 
 static void gc_gen_adjust_heap_size(GC_Gen* gc)
 {
-  assert(gc_match_kind((GC*)gc, MAJOR_COLLECTION));
+  assert(collect_is_major());
   
-  if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) return;
+  if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE) return;
   
   Mspace* mos = (Mspace*)gc->mos;
   Blocked_Space* nos = (Blocked_Space*)gc->nos;
@@ -534,8 +531,8 @@
 
   if(new_heap_total_size <= heap_total_size) return;
   /*If there is only small piece of area left not committed, we just merge it into the heap at once*/
-  if(new_heap_total_size + (max_heap_size_bytes >> 5) > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) 
-    new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+  if(new_heap_total_size + (max_heap_size_bytes >> 5) > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE) 
+    new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
 
   adjust_size = new_heap_total_size - heap_total_size;
   assert( !(adjust_size % SPACE_ALLOC_UNIT) );
@@ -600,7 +597,7 @@
 
 static inline void nos_collection(Space *nos)
 { 
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     sspace_collection((Sspace*)nos); 
   else
     fspace_collection((Fspace*)nos); 
@@ -608,7 +605,7 @@
 
 static inline void mos_collection(Space *mos)
 {
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+  if(major_is_marksweep())
     wspace_collection((Wspace*)mos);
   else
     mspace_collection((Mspace*)mos);
@@ -616,7 +613,7 @@
 
 static inline void los_collection(Space *los)
 {
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+  if(!major_is_marksweep())
     lspace_collection((Lspace*)los);
 }
 
@@ -636,7 +633,7 @@
   mos->num_used_blocks = mos_used_space_size((Space*)mos)>> GC_BLOCK_SHIFT_COUNT;
   
   if(los){
-    assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+    assert(!major_is_marksweep());
     los->accumu_alloced_size += los->last_alloced_size;
   }
 }
@@ -648,12 +645,12 @@
   Space *los = gc_get_los(gc);
   
   /* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */
-  if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+  if (collect_is_minor()){
     mos->accumu_alloced_size += mos->last_alloced_size;
     /* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated.
      * But los->last_alloced_size must be reset, because it is accumulated. */
     if(los){
-      assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+      assert(!major_is_marksweep());
       los->last_alloced_size = 0;
     }
   /* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */
@@ -667,7 +664,7 @@
     nos->accumu_alloced_size = 0;
     
     if(los){
-      assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+      assert(!major_is_marksweep());
       los->total_alloced_size += los->accumu_alloced_size;
       los->last_alloced_size = 0;
       los->accumu_alloced_size = 0;
@@ -677,7 +674,7 @@
  
 static void nos_reset_after_collection(Space *nos)
 {
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     sspace_reset_after_collection((Sspace*)nos);
   else
     fspace_reset_after_collection((Fspace*)nos);
@@ -685,19 +682,18 @@
 
 static void nos_prepare_for_collection(Space *nos)
 {
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     sspace_prepare_for_collection((Sspace*)nos);
 }
 
 static void mos_reset_after_collection(Space *mos)
 {
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+  if(!major_is_marksweep())
     mspace_reset_after_collection((Mspace*)mos);
   else
     wspace_reset_after_collection((Wspace*)mos);
 }
 
-Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
 void gc_gen_stats_verbose(GC_Gen* gc);
 
 void gc_gen_reclaim_heap(GC_Gen *gc, int64 gc_start_time)
@@ -709,10 +705,10 @@
   Space *los = gc->los;
   
   
-  if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+  if(verify_live_heap && (!major_is_marksweep()))
     gc_verify_heap((GC*)gc, TRUE);
   
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+  if(!major_is_marksweep()){
     gc_gen_update_space_info_before_gc(gc);
     gc_compute_space_tune_size_before_marking((GC*)gc);
   }
@@ -725,7 +721,7 @@
 
   nos_prepare_for_collection(nos);
 
-  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+  if(collect_is_minor()){
 
     INFO2("gc.process", "GC: start minor collection ...\n");
 
@@ -734,7 +730,7 @@
     
     /* This is for compute mos->last_alloced_size */
     unsigned int mos_used_blocks_before_minor, mos_used_blocks_after_minor; /* only used for non MAJOR_MARK_SWEEP collection */
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+    if(!major_is_marksweep())
       mos_used_blocks_before_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx;
     
     nos_collection(nos);
@@ -743,7 +739,7 @@
     gc_gen_collector_stats_verbose_minor_collection(gc);
 #endif
 
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+    if(!major_is_marksweep()){
       mos_used_blocks_after_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx;
       assert( mos_used_blocks_before_minor <= mos_used_blocks_after_minor );
       ((Blocked_Space*)mos)->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mos_used_blocks_after_minor - mos_used_blocks_before_minor );
@@ -765,13 +761,13 @@
 
     INFO2("gc.process", "GC: start major collection ...\n");
 
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+    if(!major_is_marksweep())
       los->move_object = TRUE;
     
     mos_collection(mos); /* collect mos and nos  together */
     los_collection(los);
     
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+    if(!major_is_marksweep())
       los->move_object = FALSE;
 
 #ifdef GC_GEN_STATS
@@ -782,19 +778,18 @@
     INFO2("gc.process", "GC: end of major collection ...\n");
   }
   
-  if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+  if(gc->collect_result == FALSE && collect_is_minor()){
     
     INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ...");
         
     /* runout mos in minor collection */
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+    if(!major_is_marksweep()){
       assert(((Blocked_Space*)mos)->free_block_idx == ((Blocked_Space*)mos)->ceiling_block_idx + 1);
       ((Blocked_Space*)mos)->num_used_blocks = ((Blocked_Space*)mos)->num_managed_blocks;
     }
     
-    IS_FALLBACK_COMPACTION = TRUE;
     gc_reset_collect_result((GC*)gc);
-    gc->collect_kind = FALLBACK_COLLECTION;
+    GC_PROP |= ALGO_MAJOR_FALLBACK;
 
 #ifdef GC_GEN_STATS
     /*since stats is changed in minor collection, we need to reset stats before fallback collection*/
@@ -805,19 +800,17 @@
     if(gc_is_gen_mode()) 
       gc_clear_remset((GC*)gc);
 
-    if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+    if(verify_live_heap && (!major_is_marksweep()))
       event_gc_collect_kind_changed((GC*)gc);
     
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+    if(!major_is_marksweep())
       los->move_object = TRUE;
 
     mos_collection(mos); /* collect both mos and nos */
     los_collection(los);
-    if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+    if(!major_is_marksweep())
       los->move_object = FALSE;
     
-    IS_FALLBACK_COMPACTION = FALSE;
-
 #ifdef GC_GEN_STATS
     gc->stats->num_fallback_collections++;
     gc_gen_collector_stats_verbose_major_collection(gc);
@@ -834,20 +827,20 @@
   }
   
   nos_reset_after_collection(nos);
-  if(gc_match_kind((GC*)gc, MAJOR_COLLECTION))
+  if(collect_is_major())
     mos_reset_after_collection(mos);
   
-  if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+  if(verify_live_heap && (!major_is_marksweep()))
     gc_verify_heap((GC*)gc, FALSE);
   
-  assert(MAJOR_ALGO == MAJOR_MARK_SWEEP || !los->move_object);
+  assert(major_is_marksweep() || !los->move_object);
 
   int64 pause_time = time_now() - gc_start_time;
   gc->time_collections += pause_time;
   
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ /* adaptations here */
+  if(!major_is_marksweep()){ /* adaptations here */
     
-    if(gc_match_kind((GC*)gc, MAJOR_COLLECTION))
+    if(collect_is_major())
       gc_gen_adjust_heap_size(gc);  /* adjust committed GC heap size */
       
     gc_gen_adapt(gc, pause_time); /* 1. decide next collection kind; 2. adjust nos_boundary */
@@ -944,24 +937,24 @@
     <<"\nGC: GC id: GC["<<gc->num_collections<<"]"
     <<"\nGC: current collection num: "<<gc->num_collections);
 
-  switch(gc->collect_kind) {
-  case MINOR_COLLECTION:
+  if( collect_is_minor()) {
     INFO2("gc.collect","GC: collection type: minor");
 #ifdef GC_GEN_STATS
     INFO2("gc.collect","GC: current minor collection num: "<<gc->stats->num_minor_collections);
 #endif
-    break;
-  case NORMAL_MAJOR_COLLECTION:
+  }else if( collect_is_major_normal() ){
     INFO2("gc.collect","GC: collection type: normal major");
 #ifdef GC_GEN_STATS
     INFO2("gc.collect","GC: current normal major collection num: "<<gc->stats->num_major_collections);
 #endif
-    break;
-  case FALLBACK_COLLECTION:
+
+  }else if( collect_is_fallback() ){
     INFO2("gc.collect","GC: collection type: fallback");
 #ifdef GC_GEN_STATS
     INFO2("gc.collect","GC: current fallback collection num: "<<gc->stats->num_fallback_collections);
 #endif
+  }else{
+    assert(0);  
   }
 
   switch(gc->cause) {
@@ -973,6 +966,9 @@
     break;
   case GC_CAUSE_RUNTIME_FORCE_GC:
     INFO2("gc.collect","GC: collection cause: runtime force gc");
+    break;
+  default:
+    assert(0);
   }
 
   INFO2("gc.collect","GC: pause time: "<<(pause_time>>10)<<"ms"
@@ -987,7 +983,7 @@
     <<"\nGC: LOS size: "<<verbose_print_size(gc->los->committed_heap_size)<<", free size:"<<verbose_print_size(lspace_free_memory_size((Lspace*)gc->los))
     <<"\nGC: MOS size: "<<verbose_print_size(gc->mos->committed_heap_size)<<", free size:"<<verbose_print_size(blocked_space_free_mem_size((Blocked_Space*)gc->mos)) << "\n");
 
-  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+  if(minor_is_semispace()){
     INFO2("gc.space", 
     	"GC: NOS size: "<<verbose_print_size(gc->nos->committed_heap_size)
     	<<", tospace size:"<<verbose_print_size(sspace_tospace_size((Sspace*)gc->nos))
@@ -1008,10 +1004,10 @@
     <<"\ninitial num collectors: "<<gc->num_collectors
     <<"\ninitial nos size: "<<verbose_print_size(gc->nos->committed_heap_size)
     <<"\nnos collection algo: "
-    <<((gc->nos->collect_algorithm==MINOR_NONGEN_SEMISPACE_POOL || gc->nos->collect_algorithm==MINOR_GEN_SEMISPACE_POOL)?"semi space":"partial forward")
+    <<(minor_is_semispace()?"semi space":"partial forward")
     <<"\ninitial mos size: "<<verbose_print_size(gc->mos->committed_heap_size)
     <<"\nmos collection algo: "
-    <<((gc->mos->collect_algorithm==MAJOR_COMPACT_MOVE)?"move compact":"slide compact")
+    <<(major_is_compact_move()?"move compact":"slide compact")
     <<"\ninitial los size: "<<verbose_print_size(gc->los->committed_heap_size)<<"\n");
 }
 
@@ -1035,14 +1031,14 @@
 /* init collector alloc_space */
 void gc_gen_init_collector_alloc(GC_Gen* gc, Collector* collector)
 {
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+  if(major_is_marksweep()){
     allocator_init_local_chunks((Allocator*)collector);
     gc_init_collector_free_chunk_list(collector);
   }
 
   Allocator* allocator = (Allocator*)collector;
   
-  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+  if( minor_is_semispace()){
     allocator->alloc_space = gc->nos; 
     /* init backup allocator */
     unsigned int size = sizeof(Allocator);
@@ -1057,14 +1053,14 @@
 void gc_gen_reset_collector_alloc(GC_Gen* gc, Collector* collector)
 {
   alloc_context_reset((Allocator*)collector);
-  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+  if( minor_is_semispace()){
     alloc_context_reset(collector->backup_allocator);
   }      
 }
 
 void gc_gen_destruct_collector_alloc(GC_Gen* gc, Collector* collector)
 {
-  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+  if( minor_is_semispace()){
     STD_FREE(collector->backup_allocator);  
   }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Fri Mar 14 04:21:27 2008
@@ -33,33 +33,7 @@
 struct GC_Gen_Stats;
 #endif
 
-extern Boolean gen_mode;
-
-inline Boolean gc_is_gen_mode()
-{  return gen_mode; }
-
-inline void gc_enable_gen_mode()
-{  
-  gen_mode = TRUE;
-  gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
-  HelperClass_set_GenMode(TRUE);
-}
-
-inline void gc_disable_gen_mode()
-{  
-  gen_mode = FALSE; 
-  gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
-  HelperClass_set_GenMode(FALSE);
-}
-
-inline void gc_set_gen_mode(Boolean status)
-{
-  gen_mode = status; 
-  if(gen_mode) 
-    gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
-  HelperClass_set_GenMode(status);   
-}
-
+void gc_set_gen_mode(Boolean status);
 
 /* some globals */
 extern POINTER_SIZE_INT NOS_SIZE;
@@ -145,7 +119,7 @@
   Space *mos;
   Space *los;
       
-  Boolean force_major_collect;
+  Boolean next_collect_force_major;
   Gen_Mode_Adaptor* gen_mode_adaptor;
   Boolean force_gen_mode;
 
@@ -164,16 +138,6 @@
 void gc_gen_init_verbose(GC_Gen *gc);
 void gc_gen_wrapup_verbose(GC_Gen* gc);
                         
-inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
-{  return blocked_space_free_mem_size((Blocked_Space*)gc->nos) +
-          blocked_space_free_mem_size((Blocked_Space*)gc->mos) +
-          lspace_free_memory_size((Lspace*)gc->los);  }
-                    
-inline POINTER_SIZE_INT gc_gen_total_memory_size(GC_Gen* gc)
-{  return space_committed_size((Space*)gc->nos) +
-          space_committed_size((Space*)gc->mos) +
-          lspace_committed_size((Lspace*)gc->los);  }
-
 /////////////////////////////////////////////////////////////////////////////////////////
 
 void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size);
@@ -204,9 +168,8 @@
 void gc_set_mos(GC_Gen* gc, Space* mos);
 void gc_set_los(GC_Gen* gc, Space* los);
 
-void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo);
-void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause);
-unsigned int gc_next_collection_kind(GC_Gen* gc);
+GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los);
+void gc_gen_decide_collection_kind(GC_Gen* gc, unsigned int cause);
 
 void gc_gen_adapt(GC_Gen* gc, int64 pause_time);
 
@@ -235,11 +198,23 @@
 POINTER_SIZE_INT mos_used_space_size(Space* mos);
 POINTER_SIZE_INT nos_used_space_size(Space* nos);
 
+inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
+{  return nos_free_space_size((Space*)gc->nos) +
+          blocked_space_free_mem_size((Blocked_Space*)gc->mos) +
+          lspace_free_memory_size((Lspace*)gc->los);  }
+                    
+inline POINTER_SIZE_INT gc_gen_total_memory_size(GC_Gen* gc)
+{  return space_committed_size((Space*)gc->nos) +
+          space_committed_size((Space*)gc->mos) +
+          lspace_committed_size((Lspace*)gc->los);  }
+
 #ifndef STATIC_NOS_MAPPING
 void* nos_space_adjust(Space* space, void* new_nos_boundary, POINTER_SIZE_INT new_nos_size);
 #endif
 
 #endif /* ifndef _GC_GEN_H_ */
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Fri Mar 14 04:21:27 2008
@@ -103,14 +103,14 @@
   POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(nos);
   POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
   
-  if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) {
+  if(collect_is_major()) {
     assert(!gc_is_gen_mode());
     
     if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mos->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){    
       if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){
         gc->force_gen_mode = TRUE;
-        gc_enable_gen_mode();
-        gc->force_major_collect = FALSE;
+        gc_set_gen_mode(TRUE);
+        gc->next_collect_force_major = FALSE;
         return;
       }else{
         gen_mode_adaptor->major_repeat_count++;
@@ -121,7 +121,7 @@
     
   }else{
     /*compute throughput*/
-    if(gc->last_collect_kind != MINOR_COLLECTION){
+    if(!collect_last_is_minor((GC*)gc)){
       gen_mode_adaptor->nongen_minor_throughput = 1.0f;
     }
     if(gc->force_gen_mode){
@@ -141,7 +141,7 @@
    }
 
     if(gen_mode_adaptor->nongen_minor_throughput <=  gen_mode_adaptor->gen_minor_throughput ){
-      if( gc->last_collect_kind != MINOR_COLLECTION ){
+      if( !collect_last_is_minor((GC*)gc) ){
         gen_mode_adaptor->major_survive_ratio_threshold = mos->survive_ratio;
       }else if( !gc->force_gen_mode ){
         gc->force_gen_mode = TRUE;
@@ -149,18 +149,18 @@
       } 
     }
 
-    if(gc->force_major_collect && !gc->force_gen_mode){
-        gc->force_major_collect = FALSE;
+    if(gc->next_collect_force_major && !gc->force_gen_mode){
+        gc->next_collect_force_major = FALSE;
         gc->force_gen_mode = TRUE;
         gen_mode_adaptor->gen_mode_trial_count = 2;
-    }else if(gc->last_collect_kind != MINOR_COLLECTION && gc->force_gen_mode){
+    }else if( collect_last_is_minor((GC*)gc) && gc->force_gen_mode){
        gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;
     }
 
     if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){
         gc->force_gen_mode = FALSE;
-        gc_disable_gen_mode();
-        gc->force_major_collect = TRUE;
+        gc_set_gen_mode(FALSE);
+        gc->next_collect_force_major = TRUE;
         gen_mode_adaptor->gen_mode_trial_count = 0;
         return;
     }
@@ -170,17 +170,17 @@
 
       gen_mode_adaptor->gen_mode_trial_count --;
       if( gen_mode_adaptor->gen_mode_trial_count >= 0){
-        gc_enable_gen_mode();
+        gc_set_gen_mode(TRUE);
         return;
       }
           
       gc->force_gen_mode = FALSE;
-      gc->force_major_collect = TRUE;    
+      gc->next_collect_force_major = TRUE;    
       gen_mode_adaptor->gen_mode_trial_count = 0;
     }
   }
   
-  gc_disable_gen_mode();
+  gc_set_gen_mode(FALSE);
   return;
 }
 
@@ -206,31 +206,35 @@
   POINTER_SIZE_INT nos_free_size = space_committed_size(nos);  
 
   POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
-  if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) gc->force_gen_mode = FALSE;
+  if(collect_is_major()) gc->force_gen_mode = FALSE;
   if(!gc->force_gen_mode){
     /*Major collection:*/
-    if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)){
+    if(collect_is_major()){
       mos->time_collections += pause_time;
   
       Tslow = (float)pause_time;
       SMax = total_free_size;
       /*If fall back happens, and nos_boundary reaches heap_ceiling, then we force major.*/
       if( nos_free_size == 0)
-        gc->force_major_collect = TRUE;
-      else gc->force_major_collect = FALSE;
+        gc->next_collect_force_major = TRUE;
+      else gc->next_collect_force_major = FALSE;
       
-      /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/
-      if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){
+      /*If major is caused by LOS, or collection kind is ALGO_MAJOR_EXTEND, all survive ratio is not updated.*/
+      extern Boolean mos_extended;
+      if((gc->cause != GC_CAUSE_LOS_IS_FULL) && !mos_extended ){
         survive_ratio = (float)mos->period_surviving_size/(float)mos->committed_heap_size;
         mos->survive_ratio = survive_ratio;
       }
+      /* why do I set it FALSE here? because here is the only place where it's used. */
+      mos_extended = FALSE;
+      
       /*If there is no minor collection at all, we must give mos expected threshold a reasonable value.*/
       if((gc->tuner->kind != TRANS_NOTHING) && (nos->num_collections == 0))
         mspace_set_expected_threshold_ratio((Mspace *)mos, 0.5f);
       /*If this major is caused by fall back compaction, we must give nos->survive_ratio 
         *a conservative and reasonable number to avoid next fall back.
         *In fallback compaction, the survive_ratio of mos must be 1.*/
-      if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) nos->survive_ratio = 1;
+      if(collect_is_fallback()) nos->survive_ratio = 1;
 
     }
     /*Minor collection:*/    
@@ -250,7 +254,7 @@
       POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size;
       /*If the first GC is caused by LOS, mos->last_alloced_size should be smaller than this minor_surviving_size
         *Because the last_total_free_size is not accurate.*/
-      extern unsigned int MINOR_ALGO;
+
       if(nos->num_collections != 1){
       	assert(minor_surviving_size == mos->last_alloced_size);
       }
@@ -267,8 +271,8 @@
       /* FIXME: if the total free size is lesser than threshold, the time point might be too late!
        * Have a try to test whether the backup solution is better for specjbb.
        */
-      //   if ((mos_free_size + nos_free_size + minor_surviving_size) < free_size_threshold) gc->force_major_collect = TRUE;  
-      if ((mos_free_size + nos_free_size)< free_size_threshold) gc->force_major_collect = TRUE;
+      //   if ((mos_free_size + nos_free_size + minor_surviving_size) < free_size_threshold) gc->next_collect_force_major = TRUE;  
+      if ((mos_free_size + nos_free_size)< free_size_threshold) gc->next_collect_force_major = TRUE;
   
       survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)nos);
       nos->survive_ratio = survive_ratio;
@@ -314,7 +318,7 @@
     total_size = max_heap_size_bytes - space_committed_size(los);
 #else
     POINTER_SIZE_INT curr_heap_commit_end = 
-                              (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+                              (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
     assert(curr_heap_commit_end > (POINTER_SIZE_INT)mos->heap_start);
     total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mos->heap_start;
 #endif
@@ -408,7 +412,7 @@
 
   /* below are ajustment */  
   POINTER_SIZE_INT curr_heap_commit_end = 
-                             (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+                             (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
   
   void* new_nos_boundary = (void*)(curr_heap_commit_end - new_nos_size);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp Fri Mar 14 04:21:27 2008
@@ -43,7 +43,7 @@
 {
   GC_Gen_Stats* stats = gc->stats;
 
-  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+  if(collect_is_minor()){
     stats->nos_surviving_obj_num_minor = 0;
     stats->nos_surviving_obj_size_minor = 0;
     stats->los_suviving_obj_num = 0;
@@ -65,7 +65,7 @@
   GC_Gen_Collector_Stats* collector_stats;
   Boolean is_los_collected = gc_gen_stats->is_los_collected;
 
-  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) {
+  if(collect_is_minor()) {
 
     for (unsigned int i=0; i<gc->num_active_collectors; i++) {
       collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
@@ -101,15 +101,16 @@
 {
   GC_Gen_Stats* stats = gc->stats;
   Boolean is_los_collected = stats->is_los_collected;
-  if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
-    TRACE2("gc.space", "GC: Fspace Collection stats: "
-      <<"\nGC: collection algo: "<<((stats->nos_collection_algo_minor==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward")
-      <<"\nGC: num surviving objs: "<<stats->nos_surviving_obj_num_minor
-      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_surviving_obj_size_minor)
-      <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ratio_minor*100)<<"%\n");
+  if (collect_is_minor()){
+    TRACE2("gc.space", "GC: NOS Collection stats: "
+      <<"\nGC: " << (gc_is_gen_mode()?"generational":"nongenerational")
+      <<"\nGC: collection algo: " << (minor_is_semispace()?"semi-space":"partial-forward")
+      <<"\nGC: num surviving objs: " << stats->nos_surviving_obj_num_minor
+      <<"\nGC: size surviving objs: " << verbose_print_size(stats->nos_surviving_obj_size_minor)
+      <<"\nGC: surviving ratio: " << (int)(stats->nos_surviving_ratio_minor*100) << "%\n");
   }else{
-    TRACE2("gc.space", "GC: Mspace Collection stats: "
-      <<"\nGC: collection algo: "<<((stats->nos_mos_collection_algo_major==MAJOR_COMPACT_SLIDE)?"slide compact":"move compact")
+    TRACE2("gc.space", "GC: MOS Collection stats: "
+      <<"\nGC: collection algo: " << (major_is_marksweep()?"mark-sweep":"slide compact")
       <<"\nGC: num surviving objs: "<<stats->nos_mos_suviving_obj_num_major
       <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_mos_suviving_obj_size_major)
       <<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n");
@@ -117,7 +118,7 @@
 
   if(stats->is_los_collected) { /*if los is collected, need to output los related info*/
     TRACE2("gc.space", "GC: Lspace Collection stats: "
-      <<"\nGC: collection algo: "<<((stats->los_collection_algo==MAJOR_COMPACT_SLIDE)?"slide compact":"mark sweep")
+      <<"\nGC: collection algo: "<<(collect_is_major()?"slide compact":"mark sweep")
       <<"\nGC: num surviving objs: "<<stats->los_suviving_obj_num
       <<"\nGC: size surviving objs: "<<verbose_print_size(stats->los_suviving_obj_size)
       <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ratio*100)<<"%\n");

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp Fri Mar 14 04:21:27 2008
@@ -24,9 +24,9 @@
 #ifndef STATIC_NOS_MAPPING
 void* nos_space_adjust(Space* nos, void* new_nos_boundary, POINTER_SIZE_INT new_nos_size)
 {
-  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     return sspace_heap_start_adjust((Sspace*)nos, new_nos_boundary, new_nos_size);
-  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL )
+  else if(minor_is_forward())
     return fspace_heap_start_adjust((Fspace*)nos, new_nos_boundary, new_nos_size);  
   
   assert(0);
@@ -37,7 +37,7 @@
 POINTER_SIZE_INT mos_free_space_size(Space* mos)
 {
   POINTER_SIZE_INT free_size = 0;
-  if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+  if( !major_is_marksweep())
     return mspace_free_space_size((Mspace*)mos);
 
   assert(0);
@@ -47,9 +47,9 @@
 POINTER_SIZE_INT nos_free_space_size(Space* nos)
 {
   POINTER_SIZE_INT free_size = 0;
-  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     return sspace_free_space_size((Sspace*)nos);
-  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL)
+  else if( minor_is_forward())
     return fspace_free_space_size((Fspace*)nos);
 
   assert(0);
@@ -60,7 +60,7 @@
 POINTER_SIZE_INT mos_used_space_size(Space* mos)
 {
   POINTER_SIZE_INT free_size = 0;
-  if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+  if( !major_is_marksweep() )
     return mspace_used_space_size((Mspace*)mos);
 
   assert(0);
@@ -70,9 +70,9 @@
 POINTER_SIZE_INT nos_used_space_size(Space* nos)
 {
   POINTER_SIZE_INT free_size = 0;
-  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+  if(minor_is_semispace())
     return sspace_used_space_size((Sspace*)nos);
-  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL)
+  else if( minor_is_forward())
     return fspace_used_space_size((Fspace*)nos);
 
   assert(0);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp Fri Mar 14 04:21:27 2008
@@ -116,7 +116,7 @@
 
 JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getLargeObjectSize(JNIEnv *, jclass) 
 {
-   return (jint) GC_OBJ_SIZE_THRESHOLD;
+   return (jint) GC_LOS_OBJ_SIZE_THRESHOLD;
 }
 
 #ifdef __cplusplus

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp Fri Mar 14 04:21:27 2008
@@ -50,10 +50,10 @@
   
   //"org.apache.harmony.drlvm.gc_gen.GCHelper" 
   jclass GCHelper = jni_env->FindClass("GCHelper");
-  jfieldID gen_mode = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z");
-  assert(gen_mode);
+  jfieldID gen_mode_field = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z");
+  assert(gen_mode_field);
   
-  jni_env->SetStaticBooleanField(GCHelper, gen_mode, status?JNI_TRUE:JNI_FALSE);
+  jni_env->SetStaticBooleanField(GCHelper, gen_mode_field, status?JNI_TRUE:JNI_FALSE);
   
   hythread_suspend_disable();
 */  

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp Fri Mar 14 04:21:27 2008
@@ -41,7 +41,7 @@
 
 Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size)
 {
-  assert(size >= GC_OBJ_SIZE_THRESHOLD);
+  assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD);
   
   size = ALIGN_UP_TO_KILO(size);
   unsigned int index = pool_list_index_with_size(size);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h Fri Mar 14 04:21:27 2008
@@ -61,7 +61,7 @@
   area->next = area->prev = (Bidir_List*)area;
   area->size = size;
   
-  if( size < GC_OBJ_SIZE_THRESHOLD) return NULL;
+  if( size < GC_LOS_OBJ_SIZE_THRESHOLD) return NULL;
   else return area;
 }
 
@@ -92,7 +92,7 @@
 
 inline unsigned int pool_list_index_with_size(POINTER_SIZE_INT size)
 {
-  assert(size >= GC_OBJ_SIZE_THRESHOLD);
+  assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD);
   
   unsigned int index;
   index = (unsigned int) (size >> BIT_SHIFT_TO_KILO);
@@ -102,7 +102,7 @@
 
 inline Free_Area* free_pool_add_area(Free_Area_Pool* pool, Free_Area* free_area)
 {
-  assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD);
+  assert( free_area->size >= GC_LOS_OBJ_SIZE_THRESHOLD);
   
   unsigned int index = pool_list_index_with_size(free_area->size);
   bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp Fri Mar 14 04:21:27 2008
@@ -42,10 +42,10 @@
     vm_commit_mem(reserved_base, lspace_size);
   memset(reserved_base, 0, lspace_size);
 
-  min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_NULL;
-  lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
-  lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
-  lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL);
+  min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+  lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+  lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+  lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_BASE);
   lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size);
 
   lspace->gc = gc;
@@ -130,4 +130,6 @@
 {
   return lspace->failure_size;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h Fri Mar 14 04:21:27 2008
@@ -29,11 +29,11 @@
 #include "../common/hashcode.h"
 #endif
 
-/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/
+/*Fixme: This macro is for handling HEAP_BASE issues caused by JIT OPT*/
 #ifdef COMPRESS_REFERENCE
-  #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT )
+  #define LOS_HEAD_RESERVE_FOR_HEAP_BASE ( SPACE_ALLOC_UNIT )
 #else
-  #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB )
+  #define LOS_HEAD_RESERVE_FOR_HEAP_BASE ( 0*KB )
 #endif
 
 typedef struct Lspace{

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Fri Mar 14 04:21:27 2008
@@ -66,7 +66,7 @@
     /*if the list head is not NULL, it definitely satisfies the request. */   
     remain_size = free_area->size - alloc_size;
     assert(remain_size >= 0);
-    if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+    if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){
         new_list_nr = pool_list_index_with_size(remain_size);
         p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
         if(new_list_nr == list_hint){
@@ -89,7 +89,7 @@
         free_pool_unlock_nr_list(pool, list_hint);
         p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
         if(remain_size > 0){
-            assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+            assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD));
             free_area->size = remain_size;
         }
         return p_result;
@@ -118,7 +118,7 @@
     free_area = (Free_Area*)(head->next);
     while(  free_area != (Free_Area*)head ){
         remain_size = free_area->size - alloc_size;
-        if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+        if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){
             new_list_nr = pool_list_index_with_size(remain_size);
             p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
             if(new_list_nr == MAX_LIST_INDEX){
@@ -141,7 +141,7 @@
             free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
             p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
             if(remain_size > 0){
-                assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+                assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD));
                 free_area->size = remain_size;
             }
             return p_result;
@@ -343,7 +343,7 @@
         assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
-        if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         lspace->committed_heap_size += trans_size;
         break;
       }
@@ -358,7 +358,7 @@
         assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
-        if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         break;
       }
       default:{
@@ -368,7 +368,7 @@
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
         if(new_fa_size == 0) break;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
-        if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         break;
       }
     }
@@ -479,5 +479,7 @@
   TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
   return;
 }
+
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Fri Mar 14 04:21:27 2008
@@ -120,7 +120,7 @@
   GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
 #endif
   
-  assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+  assert(collect_is_fallback());
 
   /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
   unsigned int num_active_collectors = gc->num_active_collectors;
@@ -138,7 +138,7 @@
       REF *p_ref = (REF *)*iter;
       iter = vector_block_iterator_advance(root_set,iter);
 
-      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
       assert(*p_ref);
       
       collector_tracestack_push(collector, p_ref);
@@ -210,7 +210,7 @@
   GC* gc = collector->gc;
   Blocked_Space* space = (Blocked_Space*)((GC_Gen*)gc)->nos;
 
-  assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+  assert(collect_is_fallback());
 
   unsigned int num_active_collectors = gc->num_active_collectors;
   atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
@@ -251,6 +251,8 @@
     
 }
 #endif
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Fri Mar 14 04:21:27 2008
@@ -55,7 +55,7 @@
    
   /* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */
   assert((size % GC_OBJECT_ALIGNMENT) == 0);
-  assert( size <= GC_OBJ_SIZE_THRESHOLD );
+  assert( size <= GC_LOS_OBJ_SIZE_THRESHOLD );
 
   /* check if collector local alloc block is ok. If not, grab a new block */
   p_return = thread_local_alloc(size, allocator);
@@ -71,5 +71,7 @@
     
   return p_return;
 }
+
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Fri Mar 14 04:21:27 2008
@@ -22,8 +22,6 @@
 #include "mspace_collect_compact.h"
 
 
-Boolean IS_MOVE_COMPACT;
-
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
 
@@ -267,69 +265,35 @@
 
   pool_iterator_init(gc->metadata->gc_rootset_pool);
 
-  /* dual mark bits will consume two bits in obj info, that makes current 
-     header hashbits only 5 bits. That's not enough. We implement on-demend
-     hash field allocation in obj during moving. move_compact doesn't support it.
-     Dual mark bits is used for MINOR_NONGEN_FORWARD algorithm */
-
   //For_LOS_extend
   if(gc->tuner->kind != TRANS_NOTHING){
+    major_set_compact_slide();
+  }else if (collect_is_fallback()){
+    major_set_compact_slide();
+  }else{
+    major_set_compact_move();    
+  }
 
+  if(major_is_compact_slide()){
+  
     TRACE2("gc.process", "GC: slide compact algo start ... \n");
     collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
     TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
-
-#ifdef GC_GEN_STATS
-    gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
-
-  }else if (gc_match_kind(gc, FALLBACK_COLLECTION)){
-
-    TRACE2("gc.process", "GC: slide compact algo start ... \n");
-    collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);  
-    TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
-
-#ifdef GC_GEN_STATS
-    gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
-    gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
-    //IS_MOVE_COMPACT = TRUE;
-    //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
-    //IS_MOVE_COMPACT = FALSE;
-  }else{
-
-    switch(mspace->collect_algorithm){
-      case MAJOR_COMPACT_SLIDE:
-        TRACE2("gc.process", "GC: slide compact algo start ... \n");
-        collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
-        TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
-#ifdef GC_GEN_STATS
-        gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
-        gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
-        break;
-        
-      case MAJOR_COMPACT_MOVE:
-        IS_MOVE_COMPACT = TRUE;
-        
-        TRACE2("gc.process", "GC: move compact algo start ... \n");
-        collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
-        TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
-        IS_MOVE_COMPACT = FALSE;
-#ifdef GC_GEN_STATS
-        gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE);
-#endif
-        break;
   
-      default:
-        DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!");
-        exit(0);
-        break;
-    }
+  }else if( major_is_compact_move()){      
+    
+    TRACE2("gc.process", "GC: move compact algo start ... \n");
+    collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
+    TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
 
-  }  
+  }else{
+    DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!");
+    exit(0);
+  }
 
   return;  
 } 
+
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h Fri Mar 14 04:21:27 2008
@@ -49,9 +49,9 @@
 void fallback_clear_fwd_obj_oi_init(Collector* collector);
 #endif
 
-extern Boolean IS_MOVE_COMPACT;
-
 #endif /* _MSPACE_COLLECT_COMPACT_H_ */
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Fri Mar 14 04:21:27 2008
@@ -27,6 +27,8 @@
 static volatile Block *nos_first_free_block = NULL;
 static volatile Block *first_block_to_move = NULL;
 
+Boolean mos_extended = FALSE;
+
 static void set_first_and_end_block_to_move(Collector *collector, unsigned int mem_changed_size)
 {
   GC_Gen *gc_gen = (GC_Gen *)collector->gc;
@@ -205,7 +207,7 @@
 static void gc_reupdate_repointed_sets(GC* gc, Pool* pool, void *start_address, void *end_address, unsigned int addr_diff)
 {
   GC_Metadata *metadata = gc->metadata;
-  assert(gc_match_kind(gc, EXTEND_COLLECTION));
+  assert(mos_extended);
   
   pool_iterator_init(pool);
 
@@ -227,8 +229,8 @@
   GC *gc = collector->gc;  
   GC_Metadata *metadata = gc->metadata;
 
-  /* only for MAJOR_COLLECTION and FALLBACK_COLLECTION */
-  assert(gc_match_kind(gc, EXTEND_COLLECTION));
+  /* only for ALGO_MAJOR and ALGO_MAJOR_FALLBACK */
+  assert(mos_extended);
   
   gc_reupdate_repointed_sets(gc, metadata->gc_rootset_pool, start_address, end_address, addr_diff);
 
@@ -272,8 +274,8 @@
   Blocked_Space *mspace = (Blocked_Space *)gc_gen->mos;
   Blocked_Space *nspace = (Blocked_Space *)gc_gen->nos;
 
-  /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
-  gc_gen->collect_kind |= EXTEND_COLLECTION;
+  /*For_LOS adaptive: when doing ALGO_MAJOR_EXTEND, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
+  mos_extended = TRUE;
   
   unsigned int num_active_collectors = gc_gen->num_active_collectors;
   unsigned int old_num;
@@ -319,8 +321,8 @@
   Fspace *nspace = gc_gen->nos;
   Lspace *lspace = gc_gen->los;
 
-  /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
-  gc_gen->collect_kind |= EXTEND_COLLECTION;
+  /*For_LOS adaptive: when doing ALGO_MAJOR_EXTEND, mspace->survive_ratio should not be updated in gc_decide_next_collect()*/
+  mos_extended = TRUE;
   
   unsigned int num_active_collectors = gc_gen->num_active_collectors;
   unsigned int old_num;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Fri Mar 14 04:21:27 2008
@@ -46,7 +46,7 @@
   Block_Header *local_last_dest = dest_block;
 
   void* dest_sector_addr = dest_block->base;
-  Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION);
+  Boolean is_fallback = collect_is_fallback();
   
 #ifdef USE_32BITS_HASHCODE
   Hashcode_Buf* old_hashcode_buf = NULL;
@@ -83,10 +83,9 @@
     void* src_sector_addr = p_obj;
           
     while( p_obj ){
-
       debug_num_live_obj++;
       assert( obj_is_marked_in_vt(p_obj));
-      /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
+      /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */
       obj_clear_dual_bits_in_oi(p_obj); 
 
 #ifdef GC_GEN_STATS
@@ -125,7 +124,7 @@
         
       assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );
 
-      /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */      
+      /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */      
       p_obj =  block_get_next_marked_object(curr_block, &start_pos);
       if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector))
         continue;
@@ -133,7 +132,7 @@
       /* current sector is done, let's move it. */
       POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
       assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
-      /* if sector_distance is zero, we don't do anything. But since block stable is never cleaned, we have to set 0 to it. */
+      /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */
       curr_block->table[curr_sector] = sector_distance;
 
       if(sector_distance != 0) 
@@ -203,7 +202,7 @@
 
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
 
-  if(!gc_match_kind(gc, FALLBACK_COLLECTION))
+  if(!collect_is_fallback())
        mark_scan_heap(collector);  
   else
        mark_scan_heap_for_fallback(collector);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Fri Mar 14 04:21:27 2008
@@ -158,7 +158,7 @@
 {
   Block_Header *curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace);
   
-  /* for MAJOR_COLLECTION, we must iterate over all compact blocks */
+  /* for ALGO_MAJOR, we must iterate over all compact blocks */
   while( curr_block){
     block_fix_ref_after_repointing(curr_block); 
     curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace);
@@ -420,7 +420,7 @@
 
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
 
-  if(gc_match_kind(gc, FALLBACK_COLLECTION))
+  if(collect_is_fallback())
     mark_scan_heap_for_fallback(collector);
   else if(gc->tuner->kind != TRANS_NOTHING)
     mark_scan_heap_for_space_tune(collector);
@@ -447,7 +447,7 @@
     gc_init_block_for_collectors(gc, mspace);
     
 #ifdef USE_32BITS_HASHCODE
-    if(gc_match_kind(gc, FALLBACK_COLLECTION))
+    if(collect_is_fallback())
       fallback_clear_fwd_obj_oi_init(collector);
 #endif
 
@@ -465,7 +465,7 @@
   atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);
 
 #ifdef USE_32BITS_HASHCODE
-  if(gc_match_kind(gc, FALLBACK_COLLECTION))
+  if(collect_is_fallback())
     fallback_clear_fwd_obj_oi(collector);
 #endif
   mspace_compute_object_target(collector, mspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp Fri Mar 14 04:21:27 2008
@@ -155,7 +155,7 @@
       iter = vector_block_iterator_advance(root_set,iter);
 
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
-      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
       assert(p_obj!=NULL);
       /* we have to mark the object before put it into marktask, because
          it is possible to have two slots containing a same object. They will

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Fri Mar 14 04:21:27 2008
@@ -17,8 +17,6 @@
 
 #include "../common/gc_common.h"
 
-#ifdef USE_MARK_SWEEP_GC
-
 #include "gc_ms.h"
 #include "wspace_mark_sweep.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
@@ -29,6 +27,13 @@
 #include "../common/hashcode.h"
 #endif
 
+GC* gc_ms_create()
+{
+  GC* gc = (GC*)STD_MALLOC(sizeof(GC_MS));  
+  assert(gc);
+  memset(gc, 0, sizeof(GC_MS));
+  return gc;
+}
 
 void gc_ms_initialize(GC_MS *gc_ms, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size)
 {
@@ -43,7 +48,7 @@
   wspace_base = vm_reserve_mem(0, max_heap_size);
   wspace_initialize((GC*)gc_ms, wspace_base, max_heap_size, max_heap_size);
   
-  HEAP_NULL = (POINTER_SIZE_INT)wspace_base;
+  HEAP_BASE = (POINTER_SIZE_INT)wspace_base;
   
   gc_ms->heap_start = wspace_base;
   gc_ms->heap_end = (void*)((POINTER_SIZE_INT)wspace_base + max_heap_size);
@@ -138,7 +143,8 @@
 void gc_ms_update_space_statistics(GC_MS* gc)
 {
   POINTER_SIZE_INT num_live_obj = 0;
-  POINTER_SIZE_INT size_live_obj = 0;
+  POINTER_SIZE_INT size_live_obj = 0;  
+  POINTER_SIZE_INT new_obj_size = 0;
   
   Space_Statistics* wspace_stat = gc->wspace->space_statistic;
 
@@ -151,14 +157,33 @@
     size_live_obj += collector->live_obj_size;
   }
 
+  lock(gc->mutator_list_lock);
+  Mutator* mutator = gc->mutator_list;
+  while (mutator) {
+    new_obj_size += mutator->new_obj_size;
+    mutator->new_obj_size = 0;
+    mutator = mutator->next;
+  }  
+  unlock(gc->mutator_list_lock);
+
+  wspace_stat->size_new_obj += new_obj_size;
+  
   wspace_stat->num_live_obj = num_live_obj;
   wspace_stat->size_live_obj = size_live_obj;  
   wspace_stat->last_size_free_space = wspace_stat->size_free_space;
-  wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
+  wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/  
+  wspace_stat->space_utilization_ratio = (float)wspace_stat->size_new_obj / wspace_stat->last_size_free_space;;
 }
 
-void gc_ms_iterate_heap(GC_MS *gc)
+void gc_ms_reset_space_statistics(GC_MS* gc)
 {
+  Space_Statistics* wspace_stat = gc->wspace->space_statistic;
+  wspace_stat->size_new_obj = 0;
+  wspace_stat->num_live_obj = 0;
+  wspace_stat->size_live_obj = 0;
+  wspace_stat->space_utilization_ratio = 0;
 }
 
-#endif // USE_MARK_SWEEP_GC
+void gc_ms_iterate_heap(GC_MS *gc)
+{
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=637062&r1=637061&r2=637062&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Fri Mar 14 04:21:27 2008
@@ -18,8 +18,6 @@
 #ifndef _GC_MS_H_
 #define _GC_MS_H_
 
-#ifdef USE_MARK_SWEEP_GC
-
 #include "wspace.h"
 
 
@@ -123,9 +121,6 @@
 void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors);
 void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers);
 void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers);
-
-
-
-#endif // USE_MARK_SWEEP_GC
+void gc_ms_reset_space_statistics(GC_MS* gc);
 
 #endif // _GC_MS_H_



Mime
View raw message