harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r500803 [2/3] - in /harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/ vm/gc_gen/src/common/ vm/gc_gen/src/finalizer_weakref/ vm/gc_gen/src/gen/ vm/gc_gen/src/jni/ vm/gc_gen/src/mark_c...
Date Sun, 28 Jan 2007 14:41:11 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Sun Jan 28 06:41:08 2007
@@ -29,198 +29,106 @@
 
 Boolean IGNORE_FINREF = TRUE;
 
-/* reset obj_with_fin vector block of each mutator */
-void mutator_reset_obj_with_fin(Mutator *mutator)
-{
-  mutator->obj_with_fin = finref_get_free_block();
-}
 
-void gc_set_obj_with_fin(GC *gc)
+static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj)
 {
-  Finref_Metadata *metadata = gc->finref_metadata;
-  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
-
-  /* put back last obj_with_fin block of each mutator */
-  Mutator *mutator = gc->mutator_list;
-  while(mutator){
-    pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin);
-    mutator->obj_with_fin = NULL;
-    mutator = mutator->next;
-  }
-  return;
-}
-
-/* reset weak references vetctor block of each collector */
-void collector_reset_weakref_sets(Collector *collector)
-{
-  collector->softref_set = finref_get_free_block();
-  collector->weakref_set = finref_get_free_block();
-  collector->phanref_set= finref_get_free_block();
+  /*
+   * The first condition is for supporting switch between nongen and gen minor collection
+   * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi
+   */
+  return obj_belongs_to_nos(p_obj) && !obj_is_marked_or_fw_in_oi(p_obj);
 }
 
-void gc_set_weakref_sets(GC *gc)
+static inline Boolean obj_is_dead_in_nongen_minor_gc(Partial_Reveal_Object *p_obj)
 {
-  Finref_Metadata *metadata = gc->finref_metadata;
-  
-  /* put back last weak references block of each collector */
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  for(unsigned int i = 0; i < num_active_collectors; i++)
-  {
-    Collector* collector = gc->collectors[i];
-    pool_put_entry(metadata->softref_pool, collector->softref_set);
-    pool_put_entry(metadata->weakref_pool, collector->weakref_set);
-    pool_put_entry(metadata->phanref_pool, collector->phanref_set);
-    collector->softref_set = NULL;
-    collector->weakref_set= NULL;
-    collector->phanref_set= NULL;
-  }
-  return;
+  return (obj_belongs_to_nos(p_obj) && !obj_is_fw_in_oi(p_obj))
+          || (!obj_belongs_to_nos(p_obj) && !obj_is_marked_in_oi(p_obj));
 }
 
-
-extern Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj);
-static inline Boolean obj_is_dead_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  return !obj_is_marked_in_vt(p_obj);
-}
-static inline Boolean obj_is_dead_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline Boolean obj_is_dead_in_major_gc(Partial_Reveal_Object *p_obj)
 {
   return !obj_is_marked_in_vt(p_obj);
 }
 // clear the two least significant bits of p_obj first
-static inline Boolean obj_is_dead(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj)
 {
-  GC *gc = collector->gc;
+  unsigned int collect_kind = gc->collect_kind;
   
   assert(p_obj);
-  if(gc->collect_kind == MINOR_COLLECTION){
+  if(collect_kind == MINOR_COLLECTION){
     if( gc_is_gen_mode())
-      return obj_is_dead_in_minor_forward_gc(collector, p_obj);
+      return obj_is_dead_in_gen_minor_gc(p_obj);
     else
-      return obj_is_dead_in_minor_copy_gc(collector, p_obj);
+      return obj_is_dead_in_nongen_minor_gc(p_obj);
   } else {
-    return obj_is_dead_in_major_gc(collector, p_obj);
+    return obj_is_dead_in_major_gc(p_obj);
   }
 }
 
-
-static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space)
+static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj)
 {
-  if(!obj_belongs_to_space(p_obj, (Space*)space)) return FALSE;
+  if(!obj_belongs_to_nos(p_obj)) return FALSE;
   return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
 }
-static inline Boolean obj_need_move(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline Boolean obj_need_move(GC *gc, Partial_Reveal_Object *p_obj)
 {
-  assert(!obj_is_dead(collector, p_obj));
-  GC *gc = collector->gc;
+  assert(!gc_obj_is_dead(gc, p_obj));
   
   if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
-    return fspace_obj_to_be_forwarded(p_obj, collector->collect_space);
+    return fspace_obj_to_be_forwarded(p_obj);
   
   Space *space = space_of_addr(gc, p_obj);
   return space->move_object;
 }
 
-
-extern void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref);
-extern void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj);
-static inline void resurrect_obj_tree_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  resurrect_obj_tree_after_mark(collector, p_obj);
-}
-static inline void resurrect_obj_tree_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  resurrect_obj_tree_after_mark(collector, p_obj);
-}
-// clear the two least significant bits of p_obj first
-// add p_ref to repset
-static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref)
-{
-  GC *gc = collector->gc;
-  
-  if(!gc_is_gen_mode() || !(gc->collect_kind == MINOR_COLLECTION))
-    collector_repset_add_entry(collector, p_ref);
-  if(!obj_is_dead(collector, *p_ref)){
-    if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref))
-      *p_ref = obj_get_fw_in_oi(*p_ref);
-    return;
-  }
-  Partial_Reveal_Object* p_obj = *p_ref;
-  assert(p_obj);
-  
-  if(gc->collect_kind == MINOR_COLLECTION){
-    if( gc_is_gen_mode())
-      resurrect_obj_tree_after_trace(collector, p_ref);
-    else
-      resurrect_obj_tree_in_minor_copy_gc(collector, p_obj);
-  } else {
-    resurrect_obj_tree_in_major_gc(collector, p_obj);
-  }
-}
-
-
-/* called before loop of resurrect_obj_tree() */
-static inline void collector_reset_repset(Collector *collector)
-{
-  GC *gc = collector->gc;
-  
-  assert(!collector->rep_set);
-  if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
-    return;
-  collector->rep_set = free_set_pool_get_entry(gc->metadata);
-}
-/* called after loop of resurrect_obj_tree() */
-static inline void collector_put_repset(Collector *collector)
-{
-  GC *gc = collector->gc;
-  
-  if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
-    return;
-  pool_put_entry(gc->metadata->collector_repset_pool, collector->rep_set);
-  collector->rep_set = NULL;
-}
-
-
-static void finref_add_repset_from_pool(Collector *collector, Pool *pool)
+static void finref_add_repset_from_pool(GC *gc, Pool *pool)
 {
-  GC *gc = collector->gc;
-  
   finref_reset_repset(gc);
-
   pool_iterator_init(pool);
   while(Vector_Block *block = pool_iterator_next(pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
-    
-    while(!vector_block_iterator_end(block, iter)){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
+    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
-      iter = vector_block_iterator_advance(block, iter);
-	  
-      if(*p_ref && obj_need_move(collector, *p_ref))
+      if(*p_ref && obj_need_move(gc, *p_ref))
         finref_repset_add_entry(gc, p_ref);
     }
   }
   finref_put_repset(gc);
 }
 
+static inline void fallback_update_fw_ref(Partial_Reveal_Object **p_ref)
+{
+  if(!IS_FALLBACK_COMPACTION)
+    return;
+  
+  Partial_Reveal_Object *p_obj = *p_ref;
+  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
+    assert(!obj_is_marked_in_vt(p_obj));
+    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_obj);
+    *p_ref = p_obj;
+  }
+}
 
 static void identify_finalizable_objects(Collector *collector)
 {
   GC *gc = collector->gc;
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
-  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
   
   gc_reset_finalizable_objects(gc);
   pool_iterator_init(obj_with_fin_pool);
   while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){
     unsigned int block_has_ref = 0;
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      fallback_update_fw_ref(p_ref);
       Partial_Reveal_Object *p_obj = *p_ref;
       if(!p_obj)
         continue;
-      if(obj_is_dead(collector, p_obj)){
+      if(gc_obj_is_dead(gc, p_obj)){
         gc_add_finalizable_obj(gc, p_obj);
         *p_ref = NULL;
       } else {
@@ -231,83 +139,143 @@
       vector_block_clear(block);
   }
   gc_put_finalizable_objects(gc);
+}
+
+extern void trace_obj_in_gen_fw(Collector *collector, void *p_ref);
+extern void trace_obj_in_nongen_fw(Collector *collector, void *p_ref);
+extern void trace_obj_in_marking(Collector *collector, void *p_obj);
+extern void trace_obj_in_fallback_marking(Collector *collector, void *p_ref);
+
+typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj);
+// clear the two least significant bits of p_obj first
+// add p_ref to repset
+static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref)
+{
+  GC *gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  unsigned int collect_kind = gc->collect_kind;
+  Partial_Reveal_Object *p_obj = *p_ref;
+  assert(p_obj && gc_obj_is_dead(gc, p_obj));
+  
+  void *p_ref_or_obj = p_ref;
+  Trace_Object_Func trace_object;
+  
+  /* set trace_object() function */
+  if(collect_kind == MINOR_COLLECTION){
+    if(gc_is_gen_mode())
+      trace_object = trace_obj_in_gen_fw;
+    else
+      trace_object = trace_obj_in_nongen_fw;
+  } else if(collect_kind == MAJOR_COLLECTION){
+    p_ref_or_obj = p_obj;
+    trace_object = trace_obj_in_marking;
+    obj_mark_in_vt(p_obj);
+  } else {
+    assert(collect_kind == FALLBACK_COLLECTION);
+    trace_object = trace_obj_in_fallback_marking;
+  }
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  collector_tracestack_push(collector, p_ref_or_obj);
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
+  while(task_block){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
+    while(!vector_block_iterator_end(task_block, iter)){
+      void* p_ref_or_obj = (void *)*iter;
+      assert((collect_kind!=MAJOR_COLLECTION && *(Partial_Reveal_Object **)p_ref_or_obj)
+              || (collect_kind==MAJOR_COLLECTION && p_ref_or_obj));
+      trace_object(collector, p_ref_or_obj);
+      iter = vector_block_iterator_advance(task_block, iter);
+    }
+    vector_stack_clear(task_block);
+    pool_put_entry(metadata->free_task_pool, task_block);
+    task_block = pool_get_entry(metadata->mark_task_pool);      
+  }
+  
+  task_block = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(task_block);
+  pool_put_entry(metadata->free_task_pool, task_block);
+  collector->trace_stack = NULL;
+}
+
+static void resurrect_finalizable_objects(Collector *collector)
+{
+  GC *gc = collector->gc;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
+  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
+  unsigned int collect_kind = gc->collect_kind;
   
   if(!finalizable_obj_pool_is_empty(gc)){
-    collector_reset_repset(collector);
+    finref_reset_repset(gc);
     pool_iterator_init(finalizable_obj_pool);
     while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){
-      unsigned int *iter = vector_block_iterator_init(block);
-      while(!vector_block_iterator_end(block, iter)){
-        assert(*iter);
-        resurrect_obj_tree(collector, (Partial_Reveal_Object **)iter);
-        iter = vector_block_iterator_advance(block, iter);
+      POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
+      for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+        Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+        Partial_Reveal_Object *p_obj = *p_ref;
+        assert(p_obj);
+        
+        /*
+         * In major & fallback collection we need record p_ref of the root dead obj to update it later.
+         * Because it is outside heap, we can't update in ref fixing.
+         * In minor collection p_ref of the root dead obj is automatically updated while tracing.
+         */
+        if(collect_kind != MINOR_COLLECTION)
+          finref_repset_add_entry(gc, p_ref);
+        
+        /* Perhaps obj has been resurrected by previous resurrections */
+        if(!gc_obj_is_dead(gc, p_obj)){
+          if(gc->collect_kind == MINOR_COLLECTION && obj_need_move(gc, p_obj))
+            *p_ref = obj_get_fw_in_oi(p_obj);
+          continue;
+        }
+        
+        resurrect_obj_tree(collector, p_ref);
       }
     }
     metadata->pending_finalizers = TRUE;
-    collector_put_repset(collector);
+    finref_put_repset(gc);
   }
   
-  finref_add_repset_from_pool(collector, obj_with_fin_pool);
+  finref_add_repset_from_pool(gc, obj_with_fin_pool);
   /* fianlizable objects have been added to collector repset pool */
   //finref_add_repset_from_pool(collector, finalizable_obj_pool);
 }
 
-static void put_finalizable_obj_to_vm(GC *gc)
+static void identify_dead_refs(GC *gc, Pool *pool)
 {
-  Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
-  Pool *free_pool = gc->finref_metadata->free_pool;
-  
-  while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
-    while(!vector_block_iterator_end(block, iter)){
-      assert(*iter);
-      Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
-      vm_finalize_object(p_obj);
-      iter = vector_block_iterator_advance(block, iter);
-    }
-    vector_block_clear(block);
-    pool_put_entry(free_pool, block);
-  }
-}
-
-static void update_referent_ignore_finref(Collector *collector, Pool *pool)
-{
-  GC *gc = collector->gc;
-  
-  while(Vector_Block *block = pool_get_entry(pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+  finref_reset_repset(gc);
+  pool_iterator_init(pool);
+  while(Vector_Block *block = pool_iterator_next(pool)){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
       Partial_Reveal_Object *p_obj = *p_ref;
       assert(p_obj);
       Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+      fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object *p_referent = *p_referent_field;
       
       if(!p_referent){  // referent field has been cleared
         *p_ref = NULL;
         continue;
       }
-      if(!obj_is_dead(collector, p_referent)){  // referent is alive
-        if(obj_need_move(collector, p_referent))
+      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
+        if(obj_need_move(gc, p_referent))
           finref_repset_add_entry(gc, p_referent_field);
         *p_ref = NULL;
         continue;
       }
-      *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */
+      *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */
     }
   }
-}
-
-void update_ref_ignore_finref(Collector *collector)
-{
-  GC *gc = collector->gc;
-  Finref_Metadata *metadata = gc->finref_metadata;
-  
-  finref_reset_repset(gc);
-  update_referent_ignore_finref(collector, metadata->softref_pool);
-  update_referent_ignore_finref(collector, metadata->weakref_pool);
-  update_referent_ignore_finref(collector, metadata->phanref_pool);
   finref_put_repset(gc);
+  
+  finref_add_repset_from_pool(gc, pool);
 }
 
 static void identify_dead_softrefs(Collector *collector)
@@ -318,75 +286,22 @@
     return;
   }
   
-  Finref_Metadata *metadata = gc->finref_metadata;
-  Pool *softref_pool = metadata->softref_pool;
-  
-  finref_reset_repset(gc);
-  pool_iterator_init(softref_pool);
-  while(Vector_Block *block = pool_iterator_next(softref_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
-    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
-      Partial_Reveal_Object *p_obj = *p_ref;
-      assert(p_obj);
-      Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
-      Partial_Reveal_Object *p_referent = *p_referent_field;
-      
-      if(!p_referent){  // referent field has been cleared
-        *p_ref = NULL;
-        continue;
-      }
-      if(!obj_is_dead(collector, p_referent)){  // referent is alive
-        if(obj_need_move(collector, p_referent))
-          finref_repset_add_entry(gc, p_referent_field);
-        *p_ref = NULL;
-        continue;
-      }
-      *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */
-    }
-  }
-  finref_put_repset(gc);
-  
-  finref_add_repset_from_pool(collector, softref_pool);
-  return;
+  Pool *softref_pool = gc->finref_metadata->softref_pool;
+  identify_dead_refs(gc, softref_pool);
 }
 
 static void identify_dead_weakrefs(Collector *collector)
 {
   GC *gc = collector->gc;
-  Finref_Metadata *metadata = gc->finref_metadata;
-  Pool *weakref_pool = metadata->weakref_pool;
-  
-  finref_reset_repset(gc);
-  pool_iterator_init(weakref_pool);
-  while(Vector_Block *block = pool_iterator_next(weakref_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
-    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
-      Partial_Reveal_Object *p_obj = *p_ref;
-      assert(p_obj);
-      Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
-      Partial_Reveal_Object *p_referent = *p_referent_field;
-      
-      if(!p_referent){  // referent field has been cleared
-        *p_ref = NULL;
-        continue;
-      }
-      if(!obj_is_dead(collector, p_referent)){  // referent is alive
-        if(obj_need_move(collector, p_referent))
-          finref_repset_add_entry(gc, p_referent_field);
-        *p_ref = NULL;
-        continue;
-      }
-      *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */
-    }
-  }
-  finref_put_repset(gc);
+  Pool *weakref_pool = gc->finref_metadata->weakref_pool;
   
-  finref_add_repset_from_pool(collector, weakref_pool);
-  return;
+  identify_dead_refs(gc, weakref_pool);
 }
 
+/*
+ * The reason why we don't use identify_dead_refs() to implement this function is
+ * that we will differentiate phanref from softref & weakref in the future.
+ */
 static void identify_dead_phanrefs(Collector *collector)
 {
   GC *gc = collector->gc;
@@ -397,20 +312,21 @@
 //  collector_reset_repset(collector);
   pool_iterator_init(phanref_pool);
   while(Vector_Block *block = pool_iterator_next(phanref_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
       Partial_Reveal_Object *p_obj = *p_ref;
       assert(p_obj);
       Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+      fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object *p_referent = *p_referent_field;
       
       if(!p_referent){  // referent field has been cleared
         *p_ref = NULL;
         continue;
       }
-      if(!obj_is_dead(collector, p_referent)){  // referent is alive
-        if(obj_need_move(collector, p_referent))
+      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
+        if(obj_need_move(gc, p_referent))
           finref_repset_add_entry(gc, p_referent_field);
         *p_ref = NULL;
         continue;
@@ -428,16 +344,33 @@
 //  collector_put_repset(collector);
   finref_put_repset(gc);
   
-  finref_add_repset_from_pool(collector, phanref_pool);
-  return;
+  finref_add_repset_from_pool(gc, phanref_pool);
 }
 
-static inline void put_dead_refs_to_vm(GC *gc, Pool *reference_pool)
+static void put_finalizable_obj_to_vm(GC *gc)
+{
+  Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
+  Pool *free_pool = gc->finref_metadata->free_pool;
+  
+  while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
+    while(!vector_block_iterator_end(block, iter)){
+      assert(*iter);
+      Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
+      vm_finalize_object(p_obj);
+      iter = vector_block_iterator_advance(block, iter);
+    }
+    vector_block_clear(block);
+    pool_put_entry(free_pool, block);
+  }
+}
+
+static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *reference_pool)
 {
   Pool *free_pool = gc->finref_metadata->free_pool;
   
   while(Vector_Block *block = pool_get_entry(reference_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
       if(p_obj)
@@ -449,7 +382,7 @@
   }
 }
 
-static void put_dead_weak_refs_to_vm(GC *gc)
+static void put_dead_refs_to_vm(GC *gc)
 {
   if(softref_pool_is_empty(gc)
       && weakref_pool_is_empty(gc)
@@ -465,9 +398,9 @@
   Pool *phanref_pool = gc->finref_metadata->phanref_pool;
   Pool *free_pool = gc->finref_metadata->free_pool;
   
-  put_dead_refs_to_vm(gc, softref_pool);
-  put_dead_refs_to_vm(gc, weakref_pool);
-  put_dead_refs_to_vm(gc, phanref_pool);
+  put_dead_weak_refs_to_vm(gc, softref_pool);
+  put_dead_weak_refs_to_vm(gc, weakref_pool);
+  put_dead_weak_refs_to_vm(gc, phanref_pool);
 }
 
 void collector_identify_finref(Collector *collector)
@@ -478,12 +411,13 @@
   identify_dead_softrefs(collector);
   identify_dead_weakrefs(collector);
   identify_finalizable_objects(collector);
+  resurrect_finalizable_objects(collector);
   identify_dead_phanrefs(collector);
 }
 
 void gc_put_finref_to_vm(GC *gc)
 {
-  put_dead_weak_refs_to_vm(gc);
+  put_dead_refs_to_vm(gc);
   put_finalizable_obj_to_vm(gc);
 }
 
@@ -502,7 +436,7 @@
   gc_set_obj_with_fin(gc);
   unlock(gc->mutator_list_lock);
   while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
       if(p_obj)
@@ -515,33 +449,90 @@
   vm_gc_unlock_enum();
 }
 
-void gc_update_finref_repointed_refs(GC* gc)
+static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
+{
+  while(Vector_Block *block = pool_get_entry(pool)){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
+    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
+      assert(p_obj);
+      Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+      fallback_update_fw_ref(p_referent_field);
+      Partial_Reveal_Object *p_referent = *p_referent_field;
+      
+      if(!p_referent){  // referent field has been cleared
+        *p_ref = NULL;
+        continue;
+      }
+      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
+        if(obj_need_move(gc, p_referent))
+          finref_repset_add_entry(gc, p_referent_field);
+        *p_ref = NULL;
+        continue;
+      }
+      *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */
+    }
+  }
+}
+
+void gc_update_weakref_ignore_finref(GC *gc)
 {
+  Finref_Metadata *metadata = gc->finref_metadata;
+  
+  finref_reset_repset(gc);
+  update_referent_field_ignore_finref(gc, metadata->softref_pool);
+  update_referent_field_ignore_finref(gc, metadata->weakref_pool);
+  update_referent_field_ignore_finref(gc, metadata->phanref_pool);
+  finref_put_repset(gc);
+}
+
+static void move_compaction_update_referent_field(GC *gc, Partial_Reveal_Object **p_referent_field)
+{
+  if(!address_belongs_to_gc_heap((void *)p_referent_field, gc)){
+    *p_referent_field = obj_get_fw_in_table(*p_referent_field);
+    return;
+  }
+  
+  Space *ref_space = space_of_addr(gc, p_referent_field);
+  if(ref_space->move_object){
+    unsigned int offset = get_gc_referent_offset();
+    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_referent_field - offset);
+    Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref);
+    p_referent_field = (Partial_Reveal_Object **)((POINTER_SIZE_INT)p_new_ref + offset);
+  }
+  assert(space_of_addr(gc, *p_referent_field)->move_object);
+  *p_referent_field = obj_get_fw_in_table(*p_referent_field);
+}
+
+extern Boolean IS_MOVE_COMPACT;
+
+void gc_update_finref_repointed_refs(GC *gc)
+{
+  unsigned int collect_kind = gc->collect_kind;
   Finref_Metadata* metadata = gc->finref_metadata;
   Pool *repset_pool = metadata->repset_pool;
   
   /* NOTE:: this is destructive to the root sets. */
-  Vector_Block* root_set = pool_get_entry(repset_pool);
+  Vector_Block* repset = pool_get_entry(repset_pool);
 
-  while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
-    while(!vector_block_iterator_end(root_set,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(root_set,iter);
-
-      Partial_Reveal_Object* p_obj = *p_ref;
-      /* For repset, this check is unnecessary, since all slots are repointed; otherwise
-         they will not be recorded. For root set, it is possible to point to LOS or other
-         non-moved space.  */
-      Partial_Reveal_Object* p_target_obj;
-      assert(obj_is_fw_in_oi(p_obj));
-      p_target_obj = obj_get_fw_in_oi(p_obj);
-
-      *p_ref = p_target_obj;
-    }
-    vector_block_clear(root_set);
-    pool_put_entry(metadata->free_pool, root_set);
-    root_set = pool_get_entry(repset_pool);
+  while(repset){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
+    for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object** )*iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
+      
+      if(!IS_MOVE_COMPACT){
+        assert(obj_is_fw_in_oi(p_obj));
+        assert(collect_kind == MINOR_COLLECTION || obj_is_marked_in_vt(p_obj));
+        *p_ref = obj_get_fw_in_oi(p_obj);
+      } else {
+        move_compaction_update_referent_field(gc, p_ref);
+      }
+    }
+    vector_block_clear(repset);
+    pool_put_entry(metadata->free_pool, repset);
+    repset = pool_get_entry(repset_pool);
   } 
   
   return;
@@ -552,7 +543,7 @@
   Finref_Metadata* metadata = gc->finref_metadata;
   
   if(metadata->pending_finalizers || metadata->pending_weakrefs){
-	  metadata->pending_finalizers = FALSE;
+    metadata->pending_finalizers = FALSE;
     metadata->pending_weakrefs = FALSE;
     vm_hint_finalize();
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Sun Jan 28 06:41:08 2007
@@ -28,7 +28,7 @@
 
 extern Boolean IGNORE_FINREF;
 
-/* Phantom status: for future use
+/* Phanref status: for future use
  * #define PHANTOM_REF_ENQUEUE_STATUS_MASK 0x3
  * #define PHANTOM_REF_ENQUEUED_MASK 0x1
  * #define PHANTOM_REF_PENDING_MASK 0x2
@@ -82,13 +82,7 @@
   }
 }
 
-
-extern void mutator_reset_obj_with_fin(Mutator *mutator);
-extern void gc_set_obj_with_fin(GC *gc);
-extern void collector_reset_weakref_sets(Collector *collector);
-
-extern void gc_set_weakref_sets(GC *gc);
-extern void update_ref_ignore_finref(Collector *collector);
+extern void gc_update_weakref_ignore_finref(GC *gc);
 extern void collector_identify_finref(Collector *collector);
 extern void gc_put_finref_to_vm(GC *gc);
 extern void put_all_fin_on_exit(GC *gc);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Sun Jan 28 06:41:08 2007
@@ -22,14 +22,16 @@
 #include "../thread/mutator.h"
 #include "../thread/collector.h"
 
-#define POOL_SEGMENT_SIZE_BIT_SHIFT 20
-#define POOL_SEGMENT_SIZE_BYTES (1 << POOL_SEGMENT_SIZE_BIT_SHIFT)
+#define FINREF_METADATA_SEG_SIZE_BIT_SHIFT          20
+#define FINREF_METADATA_SEG_SIZE_BYTES                (1 << FINREF_METADATA_SEG_SIZE_BIT_SHIFT)
 
-#define METADATA_BLOCK_SIZE_BIT_SHIFT 10
-#define METADATA_BLOCK_SIZE_BYTES (1<<METADATA_BLOCK_SIZE_BIT_SHIFT)
+//#define FINREF_METADATA_BLOCK_SIZE_BYTES    must be equal to   VECTOR_BLOCK_DATA_SIZE_BYTES
+#define FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT  11
+#define FINREF_METADATA_BLOCK_SIZE_BYTES        (1 << FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT)
 
 static Finref_Metadata finref_metadata;
 
+
 unsigned int get_gc_referent_offset(void)
 {
   return finref_metadata.gc_referent_offset;
@@ -39,13 +41,16 @@
   finref_metadata.gc_referent_offset = offset;
 }
 
+
 void gc_finref_metadata_initialize(GC *gc)
 {
-  void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES);
-  memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES);
-  finref_metadata.num_alloc_segs = 0;
-  finref_metadata.pool_segments[finref_metadata.num_alloc_segs] = pool_segment;
-  ++finref_metadata.num_alloc_segs;
+  unsigned int seg_size =  FINREF_METADATA_SEG_SIZE_BYTES + FINREF_METADATA_BLOCK_SIZE_BYTES;
+  void *first_segment = STD_MALLOC(seg_size);
+  memset(first_segment, 0, seg_size);
+  finref_metadata.segments[0] = first_segment;
+  first_segment = (void*)round_up_to_size((POINTER_SIZE_INT)first_segment, FINREF_METADATA_BLOCK_SIZE_BYTES);
+  finref_metadata.num_alloc_segs = 1;
+  finref_metadata.alloc_lock = FREE_LOCK;
   
   finref_metadata.free_pool = sync_pool_create();
   finref_metadata.obj_with_fin_pool = sync_pool_create();
@@ -58,10 +63,10 @@
   finref_metadata.finalizable_obj_set= NULL;
   finref_metadata.repset = NULL;
   
-  unsigned int num_blocks =  POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
+  unsigned int num_blocks =  FINREF_METADATA_SEG_SIZE_BYTES >> FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT;
   for(unsigned int i=0; i<num_blocks; i++){
-    Vector_Block *block = (Vector_Block *)((unsigned int)pool_segment + i*METADATA_BLOCK_SIZE_BYTES);
-    vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
+    Vector_Block *block = (Vector_Block *)((POINTER_SIZE_INT)first_segment + i*FINREF_METADATA_BLOCK_SIZE_BYTES);
+    vector_block_init(block, FINREF_METADATA_BLOCK_SIZE_BYTES);
     assert(vector_block_is_empty((Vector_Block *)block));
     pool_put_entry(finref_metadata.free_pool, (void *)block);
   }
@@ -90,8 +95,8 @@
   metadata->repset = NULL;
   
   for(unsigned int i=0; i<metadata->num_alloc_segs; i++){
-    assert(metadata->pool_segments[i]);
-    STD_FREE(metadata->pool_segments[i]);
+    assert(metadata->segments[i]);
+    STD_FREE(metadata->segments[i]);
   }
   
   gc->finref_metadata = NULL;
@@ -112,6 +117,107 @@
   return;
 }
 
+
+/* called when there is no Vector_Block in finref_metadata->free_pool
+ * extend the pool by a segment
+ */
+Vector_Block *finref_metadata_extend(void)
+{
+  Finref_Metadata *metadata = &finref_metadata;
+  lock(metadata->alloc_lock);
+  Vector_Block* block = pool_get_entry(metadata->free_pool);
+  if( block ){
+    unlock(metadata->alloc_lock);
+    return block;
+  }
+  
+  unsigned int num_alloced = metadata->num_alloc_segs;
+  if(num_alloced == FINREF_METADATA_SEGMENT_NUM){
+    printf("Run out Finref metadata, please give it more segments!\n");
+    exit(0);
+  }
+  
+  unsigned int seg_size =  FINREF_METADATA_SEG_SIZE_BYTES + FINREF_METADATA_BLOCK_SIZE_BYTES;
+  void *new_segment = STD_MALLOC(seg_size);
+  memset(new_segment, 0, seg_size);
+  metadata->segments[num_alloced] = new_segment;
+  new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, FINREF_METADATA_BLOCK_SIZE_BYTES);
+  metadata->num_alloc_segs++;
+  
+  unsigned int num_blocks =  FINREF_METADATA_SEG_SIZE_BYTES >> FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT;
+  for(unsigned int i=0; i<num_blocks; i++){
+    Vector_Block *block = (Vector_Block *)((POINTER_SIZE_INT)new_segment + i*FINREF_METADATA_BLOCK_SIZE_BYTES);
+    vector_block_init(block, FINREF_METADATA_BLOCK_SIZE_BYTES);
+    assert(vector_block_is_empty((Vector_Block *)block));
+    pool_put_entry(metadata->free_pool, (void *)block);
+  }
+  
+  block = pool_get_entry(metadata->free_pool);
+  unlock(metadata->alloc_lock);
+  return block;
+}
+
+/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata
+ * shrink the free pool by half
+ */
+static void finref_metadata_shrink(GC *gc)
+{
+}
+
+
+/* reset obj_with_fin vector block of each mutator */
+static void gc_reset_obj_with_fin(GC *gc)
+{
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    assert(!mutator->obj_with_fin);
+    mutator->obj_with_fin = finref_get_free_block(gc);
+    mutator = mutator->next;
+  }
+}
+
+/* put back last obj_with_fin block of each mutator */
+void gc_set_obj_with_fin(GC *gc)
+{
+  Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool;
+
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin);
+    mutator->obj_with_fin = NULL;
+    mutator = mutator->next;
+  }
+}
+
+/* reset weak references vetctor block of each collector */
+void collector_reset_weakref_sets(Collector *collector)
+{
+  GC *gc = collector->gc;
+  
+  collector->softref_set = finref_get_free_block(gc);
+  collector->weakref_set = finref_get_free_block(gc);
+  collector->phanref_set= finref_get_free_block(gc);
+}
+
+/* put back last weak references block of each collector */
+void gc_set_weakref_sets(GC *gc)
+{
+  Finref_Metadata *metadata = gc->finref_metadata;
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  for(unsigned int i = 0; i < num_active_collectors; i++)
+  {
+    Collector* collector = gc->collectors[i];
+    pool_put_entry(metadata->softref_pool, collector->softref_set);
+    pool_put_entry(metadata->weakref_pool, collector->weakref_set);
+    pool_put_entry(metadata->phanref_pool, collector->phanref_set);
+    collector->softref_set = NULL;
+    collector->weakref_set= NULL;
+    collector->phanref_set= NULL;
+  }
+  return;
+}
+
 void gc_reset_finref_metadata(GC *gc)
 {
   Finref_Metadata *metadata = gc->finref_metadata;
@@ -127,7 +233,7 @@
   assert(metadata->repset == NULL);
   
   while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     if(vector_block_iterator_end(block, iter)){
       vector_block_clear(block);
       pool_put_entry(metadata->free_pool, block);
@@ -138,100 +244,54 @@
   assert(pool_is_empty(obj_with_fin_pool));
   metadata->obj_with_fin_pool = finalizable_obj_pool;
   metadata->finalizable_obj_pool = obj_with_fin_pool;
-}
-
-/* called when there is no Vector_Block in finref_metadata->free_pool
- * extend the pool by a pool segment
- */
-static void finref_metadata_extend(void)
-{
-  Finref_Metadata *metadata = &finref_metadata;
   
-  unsigned int pos = metadata->num_alloc_segs;
-  while(pos < POOL_SEGMENT_NUM){
-    unsigned int next_pos = pos + 1;
-    unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata->num_alloc_segs, next_pos, pos);
-    if(temp == pos)
-      break;
-    pos = metadata->num_alloc_segs;
-  }
-  if(pos > POOL_SEGMENT_NUM)
-    return;
-  
-  void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES);
-  memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES);
-  metadata->pool_segments[pos] = pool_segment;
-  
-  unsigned int num_blocks =  POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
-  for(unsigned int i=0; i<num_blocks; i++){
-    Vector_Block *block = (Vector_Block *)((unsigned int)pool_segment + i*METADATA_BLOCK_SIZE_BYTES);
-    vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
-    assert(vector_block_is_empty((Vector_Block *)block));
-    pool_put_entry(metadata->free_pool, (void *)block);
-  }
-  
-  return;
+  gc_reset_obj_with_fin(gc);
 }
 
-Vector_Block *finref_get_free_block(void)
-{
-  Vector_Block *block;
-  
-  while(!(block = pool_get_entry(finref_metadata.free_pool)))
-    finref_metadata_extend();
-  return block;
-}
-
-/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata
- * shrink the free pool by half
- */
-void finref_metadata_shrink(GC *gc)
-{
-}
 
-static inline void finref_metadata_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref)
+static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref)
 {
   assert(vector_block_in_use);
   assert(ref);
 
   Vector_Block* block = vector_block_in_use;
-  vector_block_add_entry(block, (unsigned int)ref);
+  vector_block_add_entry(block, (POINTER_SIZE_INT)ref);
   
   if(!vector_block_is_full(block)) return;
   
   pool_put_entry(pool, block);
-  vector_block_in_use = finref_get_free_block();
+  vector_block_in_use = finref_get_free_block(gc);
 }
 
 void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref)
 {
-  finref_metadata_add_entry(mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref);
+  finref_metadata_add_entry(mutator->gc, mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref);
 }
 
 void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref)
 {
-  finref_metadata_add_entry(finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref);
+  finref_metadata_add_entry(gc, finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref);
 }
 
 void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finref_metadata_add_entry(collector->softref_set, finref_metadata.softref_pool, ref);
+  finref_metadata_add_entry(collector->gc, collector->softref_set, finref_metadata.softref_pool, ref);
 }
 
 void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finref_metadata_add_entry(collector->weakref_set, finref_metadata.weakref_pool, ref);
+  finref_metadata_add_entry(collector->gc, collector->weakref_set, finref_metadata.weakref_pool, ref);
 }
 
 void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finref_metadata_add_entry(collector->phanref_set, finref_metadata.phanref_pool, ref);
+  finref_metadata_add_entry(collector->gc, collector->phanref_set, finref_metadata.phanref_pool, ref);
 }
 
 void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref)
 {
   assert(*p_ref);
-  finref_metadata_add_entry(finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref);
+  finref_metadata_add_entry(gc, finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref);
 }
 
 static inline Boolean pool_has_no_ref(Pool *pool)
@@ -240,7 +300,7 @@
     return TRUE;
   pool_iterator_init(pool);
   while(Vector_Block *block = pool_iterator_next(pool)){
-    unsigned int *iter = vector_block_iterator_init(block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       if(*iter)
         return FALSE;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h Sun Jan 28 06:41:08 2007
@@ -25,31 +25,32 @@
 #include "../utils/vector_block.h"
 #include "../utils/sync_pool.h"
 
-#define POOL_SEGMENT_NUM 256
+#define FINREF_METADATA_SEGMENT_NUM 256
 
 typedef struct Finref_Metadata{
-  void *pool_segments[POOL_SEGMENT_NUM];  // malloced free pool segments' addresses array
-  unsigned int num_alloc_segs;              // next available position in pool_segments array
+  void *segments[FINREF_METADATA_SEGMENT_NUM];  // malloced free pool segments' addresses array
+  unsigned int num_alloc_segs;                  // allocated segment number
+  SpinLock alloc_lock;                          // thread must hold this lock when allocating new segment
   
-  Pool *free_pool;                        // list of free buffers for the five pools below
+  Pool *free_pool;                              // list of free buffers for the five pools below
   
-  Pool *obj_with_fin_pool;                // list of objects that have finalizer;
-                                          // these objects are added in when they are allocated
-  Pool *finalizable_obj_pool;             // temporary buffer for finalizable objects identified during one single GC
+  Pool *obj_with_fin_pool;                      // list of objects that have finalizer;
+                                                // these objects are added in when they are allocated
+  Pool *finalizable_obj_pool;                   // temporary buffer for finalizable objects identified during one single GC
   
-  Pool *softref_pool;                     // temporary buffer for soft references identified during one single GC
-  Pool *weakref_pool;                     // temporary buffer for weak references identified during one single GC
-  Pool *phanref_pool;                     // temporary buffer for phantom references identified during one single GC
+  Pool *softref_pool;                           // temporary buffer for soft references identified during one single GC
+  Pool *weakref_pool;                           // temporary buffer for weak references identified during one single GC
+  Pool *phanref_pool;                           // temporary buffer for phantom references identified during one single GC
   
-  Pool *repset_pool;                      // repointed reference slot sets
+  Pool *repset_pool;                            // repointed reference slot sets
   
-  Vector_Block *finalizable_obj_set;      // buffer for finalizable_objects_pool
-  Vector_Block *repset;                   // buffer for repset_pool
+  Vector_Block *finalizable_obj_set;            // buffer for finalizable_objects_pool
+  Vector_Block *repset;                         // buffer for repset_pool
   
-  Boolean pending_finalizers;             // there are objects waiting to be finalized
-  Boolean pending_weakrefs;               // there are weak references waiting to be enqueued
+  Boolean pending_finalizers;                   // there are objects waiting to be finalized
+  Boolean pending_weakrefs;                     // there are weak references waiting to be enqueued
   
-  unsigned int gc_referent_offset;        // the referent field's offset in Reference Class
+  unsigned int gc_referent_offset;              // the referent field's offset in Reference Class
 }Finref_Metadata;
 
 extern unsigned int get_gc_referent_offset(void);
@@ -58,8 +59,11 @@
 extern void gc_finref_metadata_initialize(GC *gc);
 extern void gc_finref_metadata_destruct(GC *gc);
 extern void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc);
+
+extern void gc_set_obj_with_fin(GC *gc);
+extern void collector_reset_weakref_sets(Collector *collector);
+extern void gc_set_weakref_sets(GC *gc);
 extern void gc_reset_finref_metadata(GC *gc);
-extern Vector_Block *finref_get_free_block(void);
 
 extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref);
 extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref);
@@ -76,6 +80,18 @@
 extern Boolean finref_repset_pool_is_empty(GC *gc);
 
 extern void gc_clear_weakref_pools(GC *gc);
+
+extern Vector_Block *finref_metadata_extend(void);
+inline Vector_Block *finref_get_free_block(GC *gc)
+{
+  Vector_Block *block = pool_get_entry(gc->finref_metadata->free_pool);
+  
+  while(!block)
+    block = finref_metadata_extend();
+  
+  assert(vector_block_is_empty(block));
+  return block;
+}
 
 
 /* called before loop of recording finalizable objects */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Sun Jan 28 06:41:08 2007
@@ -28,13 +28,19 @@
 /* fspace size limit is not interesting. only for manual tuning purpose */
 unsigned int min_nos_size_bytes = 16 * MB;
 unsigned int max_nos_size_bytes = 256 * MB;
+unsigned int min_los_size_bytes = 4*MB;
 unsigned int NOS_SIZE = 0;
+unsigned int MIN_LOS_SIZE = 0;
 unsigned int MIN_NOS_SIZE = 0;
 unsigned int MAX_NOS_SIZE = 0;
 
 static unsigned int MINOR_ALGO = 0;
 static unsigned int MAJOR_ALGO = 0;
 
+Boolean GEN_NONGEN_SWITCH = FALSE;
+
+Boolean JVMTI_HEAP_ITERATION = false;
+
 #ifndef STATIC_NOS_MAPPING
 void* nos_boundary;
 #endif
@@ -45,29 +51,35 @@
 {
   gc_gen->_machine_page_size_bytes = port_vmem_page_sizes()[0];
   gc_gen->_num_processors = port_CPUs_number();
+  gc_gen->_system_alloc_unit = vm_get_system_alloc_unit();
+  SPACE_ALLOC_UNIT = max(gc_gen->_system_alloc_unit, GC_BLOCK_SIZE_BYTES);
 }
 
+void* alloc_large_pages(size_t size, const char* hint);
+
 void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size) 
 {
   assert(gc_gen); 
+  gc_gen_get_system_info(gc_gen); 
 
-  /*Give GC a hint of gc survive ratio.*/
-  gc_gen->survive_ratio = 0.2f;
-
-  /*fixme: max_heap_size should not beyond 448 MB*/
   max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT);
   min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT);
   assert(max_heap_size <= max_heap_size_bytes);
-  assert(max_heap_size > min_heap_size_bytes);
+  assert(max_heap_size >= min_heap_size_bytes);
 
-  gc_gen_get_system_info(gc_gen); 
   min_nos_size_bytes *=  gc_gen->_num_processors;
+
+  unsigned int min_nos_size_threshold = max_heap_size>>5;
+  if(min_nos_size_bytes  > min_nos_size_threshold){
+    min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT);
+  }
   
   if( MIN_NOS_SIZE )  min_nos_size_bytes = MIN_NOS_SIZE;
 
   unsigned int los_size = max_heap_size >> 7;
-  if(los_size < GC_MIN_LOS_SIZE) 
-    los_size = GC_MIN_LOS_SIZE;
+  if(MIN_LOS_SIZE) min_los_size_bytes = MIN_LOS_SIZE;
+  if(los_size < min_los_size_bytes ) 
+    los_size = min_los_size_bytes ;
   
   los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT);
 
@@ -78,6 +90,8 @@
   unsigned int mos_reserve_size, mos_commit_size; 
   unsigned int los_mos_size;
   
+  /*Give GC a hint of gc survive ratio.*/
+  gc_gen->survive_ratio = 0.2f;
 
   if(NOS_SIZE){
     los_mos_size = max_heap_size - NOS_SIZE;
@@ -103,20 +117,23 @@
 
 #ifdef STATIC_NOS_MAPPING
 
-  assert((unsigned int)nos_boundary%SPACE_ALLOC_UNIT == 0);
+  //FIXME: no large page support in static nos mapping
+  assert(large_page_hint==NULL);
+  
+  assert((POINTER_SIZE_INT)nos_boundary%SPACE_ALLOC_UNIT == 0);
   nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size);
   if( nos_base != nos_boundary ){
     printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size);  
     printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n");
     exit(0);
   }
-  reserved_end = (void*)((unsigned int)nos_base + nos_reserve_size);
+  reserved_end = (void*)((POINTER_SIZE_INT)nos_base + nos_reserve_size);
 
-  void* los_mos_base = (void*)((unsigned int)nos_base - los_mos_size);
-  assert(!((unsigned int)los_mos_base%SPACE_ALLOC_UNIT));
+  void* los_mos_base = (void*)((POINTER_SIZE_INT)nos_base - los_mos_size);
+  assert(!((POINTER_SIZE_INT)los_mos_base%SPACE_ALLOC_UNIT));
   reserved_base = vm_reserve_mem(los_mos_base, los_mos_size);
   while( !reserved_base || reserved_base >= nos_base){
-    los_mos_base = (void*)((unsigned int)los_mos_base - SPACE_ALLOC_UNIT);
+    los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT);
     if(los_mos_base < RESERVE_BOTTOM){
       printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size);  
       exit(0);      
@@ -126,15 +143,31 @@
   
 #else /* STATIC_NOS_MAPPING */
 
-  reserved_base = vm_reserve_mem(0, max_heap_size);
-  while( !reserved_base ){
-    printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size);  
-    exit(0);      
+  reserved_base = NULL;
+  if(large_page_hint){
+    reserved_base = alloc_large_pages(max_heap_size, large_page_hint);
+    if(reserved_base == NULL) {
+      free(large_page_hint);
+      large_page_hint = NULL;
+      printf("GC use small pages.\n");
+    }
   }
-  reserved_end = (void*)((unsigned int)reserved_base + max_heap_size);
+  
+  if(reserved_base==NULL){
+    reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT);
+    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
+    assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0);
+
+    while( !reserved_base ){
+      printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size);  
+      exit(0);      
+    }
+  }
+
+  reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size);
     
   /* compute first time nos_boundary */
-  nos_base = (void*)((unsigned int)reserved_base + mos_commit_size + los_size);
+  nos_base = (void*)((POINTER_SIZE_INT)reserved_base + mos_commit_size + los_size);
   /* init nos_boundary if NOS is not statically mapped */
   nos_boundary = nos_base; 
 
@@ -147,10 +180,11 @@
   gc_gen->num_collections = 0;
   gc_gen->time_collections = 0;
   gc_gen->force_major_collect = FALSE;
+  gc_gen->force_gen_mode = FALSE;
   
   gc_los_initialize(gc_gen, reserved_base, los_size);
 
-  reserved_base = (void*)((unsigned int)reserved_base + los_size);
+  reserved_base = (void*)((POINTER_SIZE_INT)reserved_base + los_size);
   gc_mos_initialize(gc_gen, reserved_base, mos_reserve_size, mos_commit_size);
 
   gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); 
@@ -166,9 +200,11 @@
   mos->collect_algorithm = MAJOR_ALGO;
 
   /*Give GC a hint of space survive ratio.*/
-  nos->survive_ratio = gc_gen->survive_ratio;
-  mos->survive_ratio = gc_gen->survive_ratio;
+//  nos->survive_ratio = gc_gen->survive_ratio;
+//  mos->survive_ratio = gc_gen->survive_ratio;
   gc_space_tuner_initialize((GC*)gc_gen);
+
+  gc_gen_mode_adapt_init(gc_gen);
     
   gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) +
                                 space_committed_size((Space*)gc_gen->mos) +
@@ -183,6 +219,14 @@
 
 void gc_gen_destruct(GC_Gen *gc_gen) 
 {
+  Space* nos = (Space*)gc_gen->nos;
+  Space* mos = (Space*)gc_gen->mos;
+  Space* los = (Space*)gc_gen->los;
+
+  vm_unmap_mem(nos->heap_start, space_committed_size(nos));
+  vm_unmap_mem(mos->heap_start, space_committed_size(mos));
+  vm_unmap_mem(los->heap_start, space_committed_size(los));
+
   gc_nos_destruct(gc_gen);
   gc_gen->nos = NULL;
   
@@ -192,14 +236,6 @@
   gc_los_destruct(gc_gen);  
   gc_gen->los = NULL;
 
-  Space* nos = (Space*)gc_gen->nos;
-  Space* mos = (Space*)gc_gen->mos;
-  Space* los = (Space*)gc_gen->los;
-
-  vm_unmap_mem(nos->heap_start, space_committed_size(nos));
-  vm_unmap_mem(mos->heap_start, space_committed_size(mos));
-  vm_unmap_mem(los->heap_start, space_committed_size(los));
-
   return;  
 }
 
@@ -259,17 +295,17 @@
   }
   
   if(!major_algo){
-    MAJOR_ALGO= MAJOR_COMPACT_SLIDE;
+    MAJOR_ALGO= MAJOR_COMPACT_MOVE;
     
   }else{
     string_to_upper(major_algo);
 
     if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){
      MAJOR_ALGO= MAJOR_COMPACT_SLIDE;
-          
+      
     }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){
      MAJOR_ALGO= MAJOR_COMPACT_MOVE;
-
+    
     }else{
      printf("\nGC algorithm setting incorrect. Will use default algorithm.\n");  
       
@@ -286,8 +322,6 @@
 { 
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
 
-  int64 start_time = time_now();
-
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
   Blocked_Space* mspace = (Blocked_Space*)gc->mos;
   mspace->num_used_blocks = mspace->free_block_idx - mspace->first_block_idx;
@@ -339,13 +373,63 @@
     exit(0);
   }
   
-  int64 pause_time = time_now() - start_time;
-  
-  gc->time_collections += pause_time;
-  
   if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
 
-  gc_gen_adapt(gc, pause_time);
-
   return;
+}
+
+void gc_gen_iterate_heap(GC_Gen *gc)
+{
+  /** the function is called after stoped the world **/
+  Mutator *mutator = gc->mutator_list;
+  bool cont = true;   
+  while (mutator) {
+    Block_Header* block = (Block_Header*)mutator->alloc_block;
+  	if(block != NULL) block->free = mutator->free;
+  	mutator = mutator->next;
+  }
+
+  Mspace* mspace = gc->mos;
+  Block_Header *curr_block = (Block_Header*)mspace->blocks;
+  Block_Header *space_end = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  while(curr_block < space_end) {
+    POINTER_SIZE_INT p_obj = (POINTER_SIZE_INT)curr_block->base;
+    POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free;
+    while(p_obj < block_end){
+      cont = vm_iterate_object((Managed_Object_Handle)p_obj);
+      if (!cont) return;
+      p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj);
+    }
+    curr_block = curr_block->next;
+    if(curr_block == NULL) break;
+  }
+  
+  Fspace* fspace = gc->nos;
+  curr_block = (Block_Header*)fspace->blocks;
+  space_end = (Block_Header*)&fspace->blocks[fspace->free_block_idx - fspace->first_block_idx];
+  while(curr_block < space_end) {
+   	POINTER_SIZE_INT p_obj = (POINTER_SIZE_INT)curr_block->base;
+    POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free;
+    while(p_obj < block_end){
+      cont = vm_iterate_object((Managed_Object_Handle)p_obj);
+      if (!cont) return;
+      p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj);
+    }
+    	curr_block = curr_block->next;
+      if(curr_block == NULL) break;
+    }
+
+  Lspace* lspace = gc->los;
+  POINTER_SIZE_INT lspace_obj = (POINTER_SIZE_INT)lspace->heap_start;
+  POINTER_SIZE_INT lspace_end = (POINTER_SIZE_INT)lspace->heap_end;
+  while (lspace_obj < lspace_end) {
+    if(!*((unsigned int *)lspace_obj)){
+      lspace_obj = lspace_obj + ((Free_Area*)lspace_obj)->size;
+    }else{
+      cont = vm_iterate_object((Managed_Object_Handle)lspace_obj);
+      if (!cont) return;
+      unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object *)lspace_obj));
+      lspace_obj = lspace_obj + obj_size;
+    }
+  }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Sun Jan 28 06:41:08 2007
@@ -30,8 +30,6 @@
 #include "../mark_sweep/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref_metadata.h"
 
-#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT)
-
 enum Write_Barrier_Kind{
   WRITE_BARRIER_NIL,  
   WRITE_BARRIER_SLOT,  
@@ -49,6 +47,8 @@
 extern unsigned int min_nos_size_bytes;
 extern unsigned int max_nos_size_bytes;
 
+struct Gen_Mode_Adaptor;
+
 typedef struct GC_Gen {
   /* <-- First couple of fields overloaded as GC */
   void* heap_start;
@@ -75,6 +75,7 @@
 
   unsigned int collect_kind; /* MAJOR or MINOR */
   unsigned int last_collect_kind;
+  unsigned int cause;/*GC_CAUSE_LOS_IS_FULL, GC_CAUSE_NOS_IS_FULL, or GC_CAUSE_RUNTIME_FORCE_GC*/  
   Boolean collect_result; /* succeed or fail */
   
   Boolean generate_barrier;
@@ -92,8 +93,11 @@
   Lspace *los;
       
   Boolean force_major_collect;
+  Gen_Mode_Adaptor* gen_mode_adaptor;
+  Boolean force_gen_mode;
   
   /* system info */ 
+  unsigned int _system_alloc_unit;
   unsigned int _machine_page_size_bytes;
   unsigned int _num_processors;
   
@@ -159,6 +163,12 @@
 void gc_gen_adapt(GC_Gen* gc, int64 pause_time);
 
 void gc_gen_reclaim_heap(GC_Gen* gc);
+
+void gc_gen_mode_adapt_init(GC_Gen *gc);
+
+void gc_gen_iterate_heap(GC_Gen *gc);
+
+extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Sun Jan 28 06:41:08 2007
@@ -19,14 +19,51 @@
  */
 
 #include "gen.h"
+#include "../common/space_tuner.h"
+#include <math.h>
 
 #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<5)
+/*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/
+#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB)
+/*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/
+//#define NOS_SURVIVE_RATIO_SENSITIVE
 
-#include <math.h>
+struct Mspace;
+void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold);
 
 static float Tslow = 0.0f;
-static unsigned int SMax = 0;
-static unsigned int last_total_free_size = 0;
+static POINTER_SIZE_INT SMax = 0;
+static POINTER_SIZE_INT last_total_free_size = 0;
+
+typedef struct Gen_Mode_Adaptor{
+  float gen_minor_throughput;
+  float nongen_minor_throughput;
+
+  /*for obtaining the gen minor collection throughput.*/
+  int gen_mode_trial_count;
+
+  float major_survive_ratio_threshold;
+  unsigned int major_repeat_count;
+
+  POINTER_SIZE_INT adapt_nos_size;
+}Gen_Mode_Adaptor;
+
+void gc_gen_mode_adapt_init(GC_Gen *gc)
+{
+  gc->gen_mode_adaptor = (Gen_Mode_Adaptor*)STD_MALLOC( sizeof(Gen_Mode_Adaptor));
+  Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor;
+  
+  gen_mode_adaptor->gen_minor_throughput = 0.0f;
+  /*reset the nongen_minor_throughput: the first default nongen minor (maybe testgc)may caused the result
+  calculated to be zero. so we initial the value to 1.0f here. */
+  gen_mode_adaptor->nongen_minor_throughput = 1.0f;
+  gen_mode_adaptor->gen_mode_trial_count = 0;
+
+  gen_mode_adaptor->major_survive_ratio_threshold = 1.0f;
+  gen_mode_adaptor->major_repeat_count  = 1;
+
+  gen_mode_adaptor->adapt_nos_size = min_nos_size_bytes;
+}
 
 static float mini_free_ratio(float k, float m)
 {
@@ -53,84 +90,199 @@
   return res;
 }
 
-#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (4*1024*1024)
-static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time)
+#define MAX_MAJOR_REPEAT_COUNT 3
+#define MAX_MINOR_TRIAL_COUNT 2
+#define MAX_INT32 0x7fffffff
+
+void gc_gen_mode_adapt(GC_Gen* gc, int64 pause_time)
 {
+  if(GEN_NONGEN_SWITCH == FALSE) return;
+  
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
   Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+  Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor;
 
-  float survive_ratio = 0;
-
-  unsigned int mos_free_size = space_free_memory_size(mspace);
-  unsigned int nos_free_size = space_free_memory_size(fspace);
-  unsigned int total_free_size = mos_free_size  + nos_free_size;
-  
-  if(gc->collect_kind != MINOR_COLLECTION) 
-  {
-    mspace->time_collections += pause_time;
-
-    Tslow = (float)pause_time;
-    SMax = total_free_size;
-    gc->force_major_collect = FALSE;
-    
-    unsigned int major_survive_size = space_committed_size((Space*)mspace) - mos_free_size;
-    survive_ratio = (float)major_survive_size/(float)gc_gen_total_memory_size(gc);
-    mspace->survive_ratio = survive_ratio;
+  POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace);
+  POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace);
+  POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
   
+  if(gc->collect_kind != MINOR_COLLECTION) {
+    assert(!gc_is_gen_mode());
+    
+    if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mspace->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){    
+      if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){
+        gc->force_gen_mode = TRUE;
+        gc_enable_gen_mode();
+        gc->force_major_collect = FALSE;
+        return;
+      }else{
+        gen_mode_adaptor->major_repeat_count++;
+      }
+    }else{
+      gen_mode_adaptor->major_repeat_count = 1;
+    }
+    
   }else{
-    /*Give a hint to mini_free_ratio. */
-    if(gc->num_collections == 1){
-      /*fixme: This is only set for tuning the first warehouse!*/
-      Tslow = pause_time / gc->survive_ratio;
-      SMax = (unsigned int)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio ));
-      last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size;
+    /*compute throughput*/
+    if(gc->last_collect_kind != MINOR_COLLECTION){
+      gen_mode_adaptor->nongen_minor_throughput = 1.0f;
+    }
+    if(gc->force_gen_mode){
+      if(pause_time!=0){
+        if(gen_mode_adaptor->gen_minor_throughput != 0)
+          gen_mode_adaptor->gen_minor_throughput = (gen_mode_adaptor->gen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f;
+        else
+          gen_mode_adaptor->gen_minor_throughput =(float) nos_free_size/(float)pause_time;
+      }
+    }else{
+      if(pause_time!=0){
+        if(gen_mode_adaptor->gen_minor_throughput != 1.0f)
+          gen_mode_adaptor->nongen_minor_throughput = (gen_mode_adaptor->nongen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f;      
+        else
+          gen_mode_adaptor->nongen_minor_throughput = (float) nos_free_size/(float)pause_time;
+      }
+   }
+
+    if(gen_mode_adaptor->nongen_minor_throughput <=  gen_mode_adaptor->gen_minor_throughput ){
+      if( gc->last_collect_kind != MINOR_COLLECTION ){
+        gen_mode_adaptor->major_survive_ratio_threshold = mspace->survive_ratio;
+      }else if( !gc->force_gen_mode ){
+        gc->force_gen_mode = TRUE;
+        gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;        
+      } 
     }
 
-    fspace->time_collections += pause_time;  
-    unsigned int free_size_threshold;
-      
-    unsigned int minor_survive_size = last_total_free_size - total_free_size;
-
-    float k = Tslow * fspace->num_collections/fspace->time_collections;
-    float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
-    float free_ratio_threshold = mini_free_ratio(k, m);
-    free_size_threshold = (unsigned int)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+    if(gc->force_major_collect && !gc->force_gen_mode){
+        gc->force_major_collect = FALSE;
+        gc->force_gen_mode = TRUE;
+        gen_mode_adaptor->gen_mode_trial_count = 2;
+    }else if(gc->last_collect_kind != MINOR_COLLECTION && gc->force_gen_mode){
+       gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;
+    }
 
-    if ((mos_free_size + nos_free_size)< free_size_threshold)  {
-      gc->force_major_collect = TRUE;
+    if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){
+        gc->force_gen_mode = FALSE;
+        gc_disable_gen_mode();
+        gc->force_major_collect = TRUE;
+        gen_mode_adaptor->gen_mode_trial_count = 0;
+        return;
     }
+    
+    if( gc->force_gen_mode ){
+      assert( gen_mode_adaptor->gen_mode_trial_count >= 0);
 
-    survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace);
-    fspace->survive_ratio = survive_ratio;
+      gen_mode_adaptor->gen_mode_trial_count --;
+      if( gen_mode_adaptor->gen_mode_trial_count >= 0){
+        gc_enable_gen_mode();
+        return;
+      }
+          
+      gc->force_gen_mode = FALSE;
+      gc->force_major_collect = TRUE;    
+      gen_mode_adaptor->gen_mode_trial_count = 0;
+    }
   }
   
-  gc->survive_ratio =  (gc->survive_ratio + survive_ratio)/2.0f;
+  gc_disable_gen_mode();
+  return;
+}
+
+void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold);
+
+static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time)
+{
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
 
-  last_total_free_size = total_free_size;
+  float survive_ratio = 0;
 
+  POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace);
+  POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace);
+  POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
+  if(gc->collect_kind != MINOR_COLLECTION) gc->force_gen_mode = FALSE;
+  if(!gc->force_gen_mode){  
+    if(gc->collect_kind != MINOR_COLLECTION){
+      mspace->time_collections += pause_time;
+  
+      Tslow = (float)pause_time;
+      SMax = total_free_size;
+      gc->force_major_collect = FALSE;
+      
+      POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size;
+      /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/
+      if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (gc->collect_kind != EXTEND_COLLECTION)){
+        survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace);
+        mspace->survive_ratio = survive_ratio;
+      }
+      if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){
+        POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1;
+        mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold );
+      }
+  #ifdef NOS_SURVIVE_RATIO_SENSITIVE
+      /*If this major is caused by fall back compaction, 
+         we must give fspace->survive_ratio a conservative and reasonable number to avoid next fall back.*/
+      fspace->survive_ratio = mspace->survive_ratio;
+  #endif
+    }else{
+      /*Give a hint to mini_free_ratio. */
+      if(fspace->num_collections == 1){
+        /*fixme: This is only set for tuning the first warehouse!*/
+        Tslow = pause_time / gc->survive_ratio;
+        SMax = (POINTER_SIZE_INT)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio ));
+        last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size;
+      }
+  
+      fspace->time_collections += pause_time;  
+      POINTER_SIZE_INT free_size_threshold;
+        
+      POINTER_SIZE_INT minor_survive_size = last_total_free_size - total_free_size;
+  
+      float k = Tslow * fspace->num_collections/fspace->time_collections;
+      float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
+      float free_ratio_threshold = mini_free_ratio(k, m);
+      free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+  
+      if ((mos_free_size + nos_free_size)< free_size_threshold)  {
+        gc->force_major_collect = TRUE;
+      }
+  
+      survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace);
+      fspace->survive_ratio = survive_ratio;
+      /*For_LOS adaptive*/
+      POINTER_SIZE_INT mspace_size_threshold = space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace) - free_size_threshold;
+      mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold );
+    }
+    
+    gc->survive_ratio =  (gc->survive_ratio + survive_ratio)/2.0f;
+  
+    last_total_free_size = total_free_size;
+  }
+
+  gc_gen_mode_adapt(gc,pause_time);
+    
   return;
 }
 
 
-Boolean gc_compute_new_space_size(GC_Gen* gc, unsigned int* mos_size, unsigned int* nos_size)
+Boolean gc_compute_new_space_size(GC_Gen* gc, POINTER_SIZE_INT* mos_size, POINTER_SIZE_INT* nos_size)
 {
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
   Blocked_Space* mspace = (Blocked_Space*)gc->mos;
   Blocked_Space* lspace = (Blocked_Space*)gc->los;  
   
-  unsigned int new_nos_size;
-  unsigned int new_mos_size;
+  POINTER_SIZE_INT new_nos_size;
+  POINTER_SIZE_INT new_mos_size;
 
-  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
-  unsigned int used_mos_size = space_used_memory_size(mspace);
-  unsigned int free_mos_size = space_committed_size((Space*)mspace) - used_mos_size;
+  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace);
+  POINTER_SIZE_INT used_mos_size = space_used_memory_size(mspace);
+  POINTER_SIZE_INT free_mos_size = space_committed_size((Space*)mspace) - used_mos_size;
 
-  unsigned int total_size;
+  POINTER_SIZE_INT total_size;
 
 #ifdef STATIC_NOS_MAPPING
     total_size = max_heap_size_bytes - lspace->committed_heap_size;
 #else
-    total_size = (unsigned int)gc->heap_end - (unsigned int)mspace->heap_start;
+    total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start;
 #endif
 
   /* check if curr nos size is too small to shrink */
@@ -142,16 +294,20 @@
   }
   */
   
-  unsigned int total_free = total_size - used_mos_size;
+  POINTER_SIZE_INT total_free = total_size - used_mos_size;
   /* predict NOS + NOS*ratio = total_free_size */
-  int nos_reserve_size;
-  nos_reserve_size = (int)(((float)total_free)/(1.0f + fspace->survive_ratio));
-  new_nos_size = round_down_to_size((unsigned int)nos_reserve_size, SPACE_ALLOC_UNIT);
+  POINTER_SIZE_INT nos_reserve_size;
+  nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio));
+  new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, SPACE_ALLOC_UNIT);
 #ifdef STATIC_NOS_MAPPING
   if(new_nos_size > fspace->reserved_heap_size) new_nos_size = fspace->reserved_heap_size;
 #endif  
   if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ;
 
+  if(gc->force_gen_mode){
+    new_nos_size = min_nos_size_bytes;//round_down_to_size((unsigned int)(gc->gen_minor_adaptor->adapt_nos_size), SPACE_ALLOC_UNIT);
+  }
+  
   new_mos_size = total_size - new_nos_size;
 #ifdef STATIC_NOS_MAPPING
   if(new_mos_size > mspace->reserved_heap_size) new_mos_size = mspace->reserved_heap_size;
@@ -173,21 +329,21 @@
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
   Blocked_Space* mspace = (Blocked_Space*)gc->mos;
   
-  unsigned int new_nos_size;
-  unsigned int new_mos_size;
+  POINTER_SIZE_INT new_nos_size;
+  POINTER_SIZE_INT new_mos_size;
 
   Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);
 
   if(!result) return;
 
-  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
+  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace);
 
-  if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
+  if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
     return;
   
   /* below are ajustment */  
 
-  nos_boundary = (void*)((unsigned int)gc->heap_end - new_nos_size);
+  nos_boundary = (void*)((POINTER_SIZE_INT)gc->heap_end - new_nos_size);
 
   fspace->heap_start = nos_boundary;
   fspace->blocks = (Block*)nos_boundary;
@@ -208,7 +364,8 @@
   Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0];
   /* this is redundant: mos_last_block->next = nos_first_block; */
 
-  HelperClass_set_NosBoundary(nos_boundary);
+  if( gc_is_gen_mode())
+    HelperClass_set_NosBoundary(nos_boundary);
   
   return;
 }
@@ -221,8 +378,8 @@
 
   if(NOS_SIZE) return;
 
-  unsigned int new_nos_size;
-  unsigned int new_mos_size;
+  POINTER_SIZE_INT new_nos_size;
+  POINTER_SIZE_INT new_mos_size;
 
   Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);
 
@@ -231,18 +388,18 @@
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
   Blocked_Space* mspace = (Blocked_Space*)gc->mos;
   
-  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
+  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace);
 
-  if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
+  if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
     return;
       
-  unsigned int used_mos_size = space_used_memory_size((Blocked_Space*)mspace);  
-  unsigned int free_mos_size = space_free_memory_size((Blocked_Space*)mspace);  
+  POINTER_SIZE_INT used_mos_size = space_used_memory_size((Blocked_Space*)mspace);  
+  POINTER_SIZE_INT free_mos_size = space_free_memory_size((Blocked_Space*)mspace);  
 
-  unsigned int new_free_mos_size = new_mos_size -  used_mos_size;
+  POINTER_SIZE_INT new_free_mos_size = new_mos_size -  used_mos_size;
   
-  unsigned int curr_mos_end = (unsigned int)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
-  unsigned int mos_border = (unsigned int)mspace->heap_end;
+  POINTER_SIZE_INT curr_mos_end = (POINTER_SIZE_INT)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  POINTER_SIZE_INT mos_border = (POINTER_SIZE_INT)mspace->heap_end;
   if(  curr_mos_end + new_free_mos_size > mos_border){
     /* we can't let mos cross border */
     new_free_mos_size = mos_border - curr_mos_end;    

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp Sun Jan 28 06:41:08 2007
@@ -20,9 +20,9 @@
     return (jint)tls_gc_offset;
 }
 
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
+JNIEXPORT jobject JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
 {
-    return (jint)nos_boundary;
+    return (jobject)nos_boundary;
 }
 
 JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp Sun Jan 28 06:41:08 2007
@@ -32,6 +32,7 @@
 
   unsigned int nfields = class_number_fields(GCHelper_clss);
   unsigned int i;
+  
   for(i=0; i<nfields; i++){
     Field_Handle field = class_get_field(GCHelper_clss, i);
     if(!strcmp(field_get_name(field), "GEN_MODE")){
@@ -65,11 +66,14 @@
 
   unsigned int nfields = class_number_fields(GCHelper_clss);
   unsigned int i;
+  
   for(i=0; i<nfields; i++){
     Field_Handle field = class_get_field(GCHelper_clss, i);
     if(!strcmp(field_get_name(field), "NOS_BOUNDARY")){
-      jint* p_nos_boundary = (jint*)field_get_address(field);
-      *p_nos_boundary = (jint)boundary;
+      //jint* p_nos_boundary = (jint*)field_get_address(field);
+      //*p_nos_boundary = (jint)boundary;
+      jobject* p_nos_boundary = (jobject*)field_get_address(field);
+      *p_nos_boundary = (jobject)boundary;
       break;
     }
   }
@@ -77,4 +81,4 @@
   assert(i<nfields);
 
   return;
-}
\ No newline at end of file
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Sun Jan 28 06:41:08 2007
@@ -116,7 +116,7 @@
   /* first step: copy all root objects to mark tasks. 
       FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
   while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
       iter = vector_block_iterator_advance(root_set,iter);
@@ -141,7 +141,7 @@
   Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
   
   while(mark_task){
-    unsigned int* iter = vector_block_iterator_init(mark_task);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
     while(!vector_block_iterator_end(mark_task,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter;
       iter = vector_block_iterator_advance(mark_task,iter);
@@ -175,4 +175,9 @@
   collector->trace_stack = NULL;
   
   return;
+}
+
+void trace_obj_in_fallback_marking(Collector *collector, void *p_ref)
+{
+  trace_object(collector, (Partial_Reveal_Object **)p_ref);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Sun Jan 28 06:41:08 2007
@@ -20,6 +20,8 @@
 
 #include "mspace.h"
 
+#include "../common/gc_space.h"
+
 static void mspace_destruct_blocks(Mspace* mspace)
 {   
   return;
@@ -40,12 +42,19 @@
 
   void* reserved_base = start;
   /* commit mspace mem */
-  vm_commit_mem(reserved_base, commit_size);
+  if(!large_page_hint)
+    vm_commit_mem(reserved_base, commit_size);
   memset(reserved_base, 0, commit_size);
   
   mspace->committed_heap_size = commit_size;
   mspace->heap_start = reserved_base;
-  mspace->heap_end = (void *)((unsigned int)reserved_base + mspace_size);
+  
+#ifdef STATIC_NOS_MAPPING
+  mspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + mspace_size);
+#else
+  mspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + commit_size);
+#endif
+
   mspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT;
   
   mspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base);
@@ -62,6 +71,10 @@
 
   mspace->move_object = TRUE;
   mspace->gc = gc;
+
+  /*For_LOS adaptive: The threshold is initiated by half of MOS + NOS commit size.*/
+  mspace->expected_threshold = (unsigned int)( ( (float)mspace->committed_heap_size * (1.f + 1.f / gc->survive_ratio) ) * 0.5f );
+
   gc_set_mos((GC_Gen*)gc, (Space*)mspace);
 
   return;
@@ -147,3 +160,16 @@
    
   return;  
 }
+
+/*For_LOS adaptive.*/
+void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold)
+{
+    mspace->expected_threshold = threshold;
+    return;
+}
+
+unsigned int mspace_get_expected_threshold(Mspace* mspace)
+{
+    return mspace->expected_threshold;
+}
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Sun Jan 28 06:41:08 2007
@@ -36,6 +36,10 @@
   unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
+  /*Size allocted after last collection.*/
+  unsigned int alloced_size;
+  /*For_statistic: size survived after major*/  
+  unsigned int surviving_size;
   /* END of Space --> */
     
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -50,8 +54,9 @@
   unsigned int num_total_blocks;
   /* END of Blocked_Space --> */
   
-  volatile Block_Header* block_iterator;  
-  
+  volatile Block_Header* block_iterator;    
+  /*Threshold computed by NOS adaptive*/
+  POINTER_SIZE_INT expected_threshold;
 }Mspace;
 
 void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size, unsigned int commit_size);
@@ -66,5 +71,7 @@
 Block_Header* mspace_block_iterator_get(Mspace* mspace);
 
 void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace);
+
+void mspace_set_expected_threshold(Mspace* mspace, unsigned int threshold);
 
 #endif //#ifdef _MSC_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Sun Jan 28 06:41:08 2007
@@ -38,6 +38,8 @@
     Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]);
     assert(alloc_block->status == BLOCK_FREE);
     alloc_block->status = BLOCK_IN_USE;
+    /*For_statistic mos allocation infomation*/
+    mspace->alloced_size += GC_BLOCK_SIZE_BYTES;
     
     /* set allocation context */
     void* new_free = alloc_block->free;
@@ -52,7 +54,7 @@
 
     /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */
     unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES;
-    allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size);
+    allocator->ceiling = (void*)((POINTER_SIZE_INT)new_free + zeroing_size);
     memset(new_free, 0, zeroing_size);
 
 #endif /* #ifndef ALLOC_ZEROING */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Sun Jan 28 06:41:08 2007
@@ -74,6 +74,9 @@
     }
   }
   mspace->num_used_blocks = new_num_used;
+  /*For_statistic mos infomation*/
+  mspace->surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES;
+  mspace->alloced_size = 0;
   
   /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */
   for(; i < mspace->num_managed_blocks; i++){
@@ -183,6 +186,9 @@
   return NULL;
 }
 
+#include "../trace_forward/fspace.h"
+#include "../gen/gen.h"
+
 Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
 {    
   Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
@@ -205,8 +211,13 @@
     assert( cur_target_block->status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)); 
   */
 
-  /* nos is higher than mos, we cant use nos block for compaction target */
-  while( cur_target_block ){
+  /* mos may be out of space, so we can use nos blocks for compaction target.
+   * but we can't use the blocks which are given to los when los extension happens.
+   * in this case, an out-of-mem should be given to user.
+   */
+  Fspace *nos = ((GC_Gen*)collector->gc)->nos;
+  Block_Header *nos_end = ((Block_Header *)&nos->blocks[nos->num_managed_blocks-1])->next;
+  while( cur_target_block != nos_end){
     //For_LOS_extend
     //assert( cur_target_block <= collector->cur_compact_block);
     Block_Header* next_target_block = cur_target_block->next;
@@ -242,8 +253,6 @@
 
 void mspace_collection(Mspace* mspace) 
 {
-  // printf("Major Collection ");
-
   mspace->num_collections++;
 
   GC* gc = mspace->gc;  
@@ -259,16 +268,13 @@
 
   //For_LOS_extend
   if(gc->tuner->kind != TRANS_NOTHING){
-    // printf("for LOS extention");
     collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
     
   }else if (gc->collect_kind == FALLBACK_COLLECTION){
-    // printf("for Fallback");
     collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);  
     //IS_MOVE_COMPACT = TRUE;
     //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
     //IS_MOVE_COMPACT = FALSE;
-
   }else{
 
     switch(mspace->collect_algorithm){
@@ -281,7 +287,7 @@
         collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
         IS_MOVE_COMPACT = FALSE;
         break;
-        
+  
       default:
         printf("\nThe speficied major collection algorithm doesn't exist!\n");
         exit(0);
@@ -289,8 +295,6 @@
     }
 
   }  
-
-  // printf("...end.\n");
   return;  
 } 
 



Mime
View raw message