harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r606876 [2/6] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ jni/ los/ mark_compact/ mark_sweep/ semi_space/ thread/ trace_forward/ utils/ verify/
Date Wed, 26 Dec 2007 10:17:15 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,91 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "hashcode.h"
+#include "gc_metadata.h"
+
+/* <-- processing of hashcode in different GC algorithms */
+Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, 
+                                                unsigned int* p_obj_size, Collector* collector, 
+                                                Hashcode_Buf* old_buf, Hashcode_Buf* new_buf)
+{
+  Obj_Info_Type obj_info = get_obj_info(p_obj);
+  POINTER_SIZE_INT hashcode = 0;
+
+  switch(obj_info & HASHCODE_MASK){
+    case HASHCODE_SET_UNALLOCATED:
+      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
+        *p_obj_size += GC_OBJECT_ALIGNMENT; 
+        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
+        *(int*) &hashcode = hashcode_gen(p_obj);
+        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
+        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
+        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
+      } 
+      break;
+      
+    case HASHCODE_SET_ATTACHED:
+      obj_sethash_in_vt(p_obj);
+      break;
+      
+    case HASHCODE_SET_BUFFERED:
+      *(int*) &hashcode = hashcode_buf_lookup(p_obj, old_buf);
+      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
+        *p_obj_size += GC_OBJECT_ALIGNMENT; 
+        obj_info = obj_info & ~HASHCODE_BUFFERED_BIT;
+        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
+        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
+        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
+        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
+      }else{
+        hashcode_buf_add((Partial_Reveal_Object*)dest_addr, *(int*) &hashcode, new_buf);          
+      }
+      break;
+      
+    case HASHCODE_UNSET:
+      break;
+      
+    default:
+      assert(0);
+  
+  }
+  return obj_info;
+}
+
+void move_compact_process_hashcode(Partial_Reveal_Object* p_obj,Hashcode_Buf* old_buf,  
+                                           Hashcode_Buf* new_buf)
+{
+  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){
+    int hashcode;
+    if(hashcode_is_buffered(p_obj)){
+      /*already buffered objects;*/
+      hashcode = hashcode_buf_lookup(p_obj, old_buf);
+      hashcode_buf_add(p_obj, hashcode, new_buf);
+    }else{
+      /*objects need buffering.*/
+      hashcode = hashcode_gen(p_obj);
+      hashcode_buf_add(p_obj, hashcode, new_buf);
+      Obj_Info_Type oi = get_obj_info_raw(p_obj);
+      set_obj_info(p_obj, oi | HASHCODE_BUFFERED_BIT);
+    }
+  }
+}
+
+/* processing of hashcode in different GC algorithms --> */
+
+
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h Wed Dec 26 02:17:10 2007
@@ -287,103 +287,43 @@
   return;
 }
 
-void collector_hashcodeset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref);
+int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj);
 
-inline Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, 
-                                                unsigned int* p_obj_size, Collector* collector, 
-                                                Hashcode_Buf* old_buf, Hashcode_Buf* new_buf)
+inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info)
 {
-  Obj_Info_Type obj_info = get_obj_info(p_obj);
-  POINTER_SIZE_INT hashcode = 0;
-
-  switch(obj_info & HASHCODE_MASK){
-    case HASHCODE_SET_UNALLOCATED:
-      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
-        *p_obj_size += GC_OBJECT_ALIGNMENT; 
-        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
-        *(int*) &hashcode = hashcode_gen(p_obj);
-        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
-      } 
-      break;
-      
-    case HASHCODE_SET_ATTACHED:
-      obj_sethash_in_vt(p_obj);
-      break;
-      
-    case HASHCODE_SET_BUFFERED:
-      *(int*) &hashcode = hashcode_buf_lookup(p_obj, old_buf);
-      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
-        *p_obj_size += GC_OBJECT_ALIGNMENT; 
-        obj_info = obj_info & ~HASHCODE_BUFFERED_BIT;
-        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
-        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
-      }else{
-        hashcode_buf_add((Partial_Reveal_Object*)dest_addr, *(int*) &hashcode, new_buf);          
-      }
-      break;
-      
-    case HASHCODE_UNSET:
-      break;
-      
-    default:
-      assert(0);
-  
+  int hash;
+  if(hashcode_is_attached(p_obj)){
+    int offset = vm_object_size(p_obj);
+    unsigned char* pos = (unsigned char *)p_obj;
+    hash = *(int*) (pos + offset);
+  }else if(hashcode_is_buffered(p_obj)){
+    hash = obj_lookup_hashcode_in_buf(p_obj);
   }
-  return obj_info;
+  return hash;
 }
 
-inline void move_compact_process_hashcode(Partial_Reveal_Object* p_obj,Hashcode_Buf* old_buf,  
-                                           Hashcode_Buf* new_buf)
+inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, void* targ_addr,
+                                               unsigned int * obj_size_precompute)
 {
-  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){
-    int hashcode;
-    if(hashcode_is_buffered(p_obj)){
-      /*already buffered objects;*/
-      hashcode = hashcode_buf_lookup(p_obj, old_buf);
-      hashcode_buf_add(p_obj, hashcode, new_buf);
-    }else{
-      /*objects need buffering.*/
-      hashcode = hashcode_gen(p_obj);
-      hashcode_buf_add(p_obj, hashcode, new_buf);
-      Obj_Info_Type oi = get_obj_info_raw(p_obj);
-      set_obj_info(p_obj, oi | HASHCODE_BUFFERED_BIT);
-    }
+  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ 
+    if((POINTER_SIZE_INT)targ_addr != (POINTER_SIZE_INT)p_obj)
+        *obj_size_precompute += GC_OBJECT_ALIGNMENT;
   }
 }
 
-inline Obj_Info_Type trace_forward_process_hashcode(Partial_Reveal_Object* p_obj, Partial_Reveal_Object* p_old_obj,
+inline Obj_Info_Type forward_obj_attach_hashcode(Partial_Reveal_Object* p_targ_obj, Partial_Reveal_Object* p_obj,
                                                     Obj_Info_Type oi, unsigned int p_obj_size)
 {
     oi  |= HASHCODE_ATTACHED_BIT;
-    *(int *)(((char*)p_obj) + p_obj_size - GC_OBJECT_ALIGNMENT) = hashcode_gen(p_old_obj);
-    assert(vm_object_size(p_obj) != 0);
+    *(int *)(((char*)p_targ_obj) + p_obj_size) = hashcode_gen(p_obj);
     return oi;
 }
 
-inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, void* dest_addr,
-                                               unsigned int * obj_size_precompute)
-{
-  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ 
-    if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj)
-        *obj_size_precompute += GC_OBJECT_ALIGNMENT;
-  }
-}
+Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, 
+                                                unsigned int* p_obj_size, Collector* collector, 
+                                                Hashcode_Buf* old_buf, Hashcode_Buf* new_buf);
+
+void move_compact_process_hashcode(Partial_Reveal_Object* p_obj,Hashcode_Buf* old_buf,  
+                                           Hashcode_Buf* new_buf);
 
-inline int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj);
-inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info)
-{
-  int hash;
-  if(hashcode_is_attached(p_obj)){
-    int offset = vm_object_size(p_obj);
-    unsigned char* pos = (unsigned char *)p_obj;
-    hash = *(int*) (pos + offset);
-  }else if(hashcode_is_buffered(p_obj)){
-    hash = obj_lookup_hashcode_in_buf(p_obj);
-  }
-  return hash;
-}
 #endif //_HASHCODE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp Wed Dec 26 02:17:10 2007
@@ -47,8 +47,8 @@
   vm_notify_obj_alive( (void *)p_obj);
   assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
   
-  Partial_Reveal_VTable *vtable = uncompress_vt(obj_get_vt(p_obj));
-  if(VTABLE_TRACING)
+  Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
+  if(TRACE_JLC_VIA_VTABLE)
     if(vtable->vtmark == VT_UNMARKED) {
       vtable->vtmark = VT_MARKED;
       if(obj_mark_in_vt(vtable->jlC))

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/object_status.h Wed Dec 26 02:17:10 2007
@@ -20,7 +20,7 @@
 
 #include "../gen/gen.h"
 #include "../mark_sweep/gc_ms.h"
-#include "../mark_sweep/sspace_mark_sweep.h"
+#include "../mark_sweep/wspace_mark_sweep.h"
 
 
 inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj)
@@ -59,6 +59,12 @@
 }
 #endif
 
+#ifdef USE_UNIQUE_MOVE_COMPACT_GC
+inline Boolean obj_is_dead_in_move_compact_no_los_gc(Partial_Reveal_Object *p_obj)
+{
+  return !obj_is_marked_in_vt(p_obj);
+}
+#endif
 inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj)
 {
   assert(p_obj);
@@ -67,6 +73,10 @@
   return obj_is_dead_in_mark_sweep_gc(p_obj);
 #endif
 
+#ifdef USE_UNIQUE_MOVE_COMPACT_GC
+	return obj_is_dead_in_move_compact_no_los_gc(p_obj);
+#endif
+
   if(gc_match_kind(gc, MINOR_COLLECTION)){
     if(gc_is_gen_mode())
       return obj_is_dead_in_gen_minor_gc(p_obj);
@@ -89,10 +99,14 @@
   /* assert(!gc_obj_is_dead(gc, p_obj)); commented out for weakroot */
 
 #ifdef USE_MARK_SWEEP_GC
-  Sspace *sspace = gc_ms_get_sspace((GC_MS*)gc);
-  return sspace->move_object;
+  Wspace *wspace = gc_ms_get_wspace((GC_MS*)gc);
+  return wspace->move_object;
 #endif
 
+#ifdef USE_UNIQUE_MOVE_COMPACT_GC
+  Cspace *cspace = gc_mc_get_cspace((GC_MC*)gc);
+  return cspace->move_object;
+#endif
   if(gc_is_gen_mode() && gc_match_kind(gc, MINOR_COLLECTION))
     return fspace_obj_to_be_forwarded(p_obj);
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp Wed Dec 26 02:17:10 2007
@@ -198,7 +198,7 @@
     Collector *collector = gc->collectors[i];
     non_los_live_obj_size += collector->non_los_live_obj_size;
     los_live_obj_size += collector->los_live_obj_size;
-    for(unsigned int j = NORMAL_SIZE_SEGMENT_NUM; j--;){
+    for(unsigned int j = 0; j < NORMAL_SIZE_SEGMENT_NUM; j++) {
       segment_live_size[j] += collector->segment_live_size[j];
     }
     memset(collector->segment_live_size, 0, sizeof(POINTER_SIZE_INT) * NORMAL_SIZE_SEGMENT_NUM);
@@ -206,14 +206,12 @@
   
   //POINTER_SIZE_INT additional_non_los_size = ((collector_num * 2) << GC_BLOCK_SHIFT_COUNT) + (non_los_live_obj_size >> GC_BLOCK_SHIFT_COUNT) * (GC_OBJ_SIZE_THRESHOLD/4);
   double additional_non_los_size = 0;
-  for(unsigned int i = NORMAL_SIZE_SEGMENT_NUM; i--;){
+  for(unsigned int i = 0; i < NORMAL_SIZE_SEGMENT_NUM; i++) {
     additional_non_los_size += (double)segment_live_size[i] * SEGMENT_INDEX_TO_SIZE(i) / non_los_live_obj_size;
   }
   additional_non_los_size *= 1.2; // in case of some cases worse than average one
   POINTER_SIZE_INT non_los_live_block = non_los_live_obj_size / (GC_BLOCK_BODY_SIZE_BYTES-(POINTER_SIZE_INT)additional_non_los_size);
-  additional_non_los_size *= non_los_live_block + 1;
-  additional_non_los_size += collector_num << (GC_BLOCK_SHIFT_COUNT + 1);
-  non_los_live_obj_size = round_up_to_size(non_los_live_obj_size + (POINTER_SIZE_INT)additional_non_los_size, GC_BLOCK_SIZE_BYTES);
+  non_los_live_obj_size = (non_los_live_block << GC_BLOCK_SHIFT_COUNT);
   if(non_los_live_obj_size > non_los_size)
     non_los_live_obj_size = non_los_size;
 
@@ -538,4 +536,6 @@
   STD_FREE(tuner->interim_blocks);
   return;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/weak_roots.cpp Wed Dec 26 02:17:10 2007
@@ -70,7 +70,9 @@
 
       if(IS_MOVE_COMPACT){
         assert(space_of_addr(gc, p_obj)->move_object);
-        *p_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_obj));
+        *p_ref = obj_get_fw_in_table(p_obj);
+      } else if(gc_match_kind(gc, MC_COLLECTION)){
+        *p_ref = obj_get_fw_in_table(p_obj);
       } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
         if(obj_is_fw_in_oi(p_obj)){
           p_obj = obj_get_fw_in_oi(p_obj);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Wed Dec 26 02:17:10 2007
@@ -31,6 +31,7 @@
 #include "../common/space_tuner.h"
 #include "../common/compressed_ref.h"
 #include "../common/object_status.h"
+#include "../common/gc_concurrent.h"
 
 Boolean IGNORE_FINREF = FALSE;
 Boolean DURING_RESURRECTION = FALSE;
@@ -115,6 +116,7 @@
 extern void trace_obj_in_ms_fallback_marking(Collector *collector, void *p_ref);
 extern void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj);
 extern void trace_obj_in_ms_marking(Collector *collector, void *p_obj);
+extern void trace_obj_in_ms_concurrent_mark(Collector *collector, void *p_obj);
 
 
 typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj);
@@ -163,7 +165,10 @@
   } else {
     assert(gc_match_kind(gc, MARK_SWEEP_GC));
     p_ref_or_obj = p_obj;
-    trace_object = trace_obj_in_ms_marking;
+    if(!gc_mark_is_concurrent())
+      trace_object = trace_obj_in_ms_marking;
+    else
+      trace_object = trace_obj_in_ms_concurrent_mark;
   }
   
   collector->trace_stack = free_task_pool_get_entry(metadata);
@@ -662,16 +667,18 @@
   if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
     unsigned int offset = get_gc_referent_offset();
     Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
-    Partial_Reveal_Object *p_new_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_old_ref));
+    Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref);
     p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
   }
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   assert(space_of_addr(gc, p_obj)->move_object);
   
   if(p_obj < los_boundary)
-    write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+    p_obj = obj_get_fw_in_oi(p_obj);
   else
-    *p_ref = obj_get_fw_in_table(p_obj);
+    p_obj = obj_get_fw_in_table(p_obj);
+
+  write_slot(p_ref, p_obj);
 }
 
 /* In two cases mark-sweep needs fixing repointed refs:
@@ -803,6 +810,8 @@
   finref_copy_pool(finalizable_obj_pool, finalizable_obj_pool_copy, gc);
   finref_copy_pool_to_rootset(gc, finalizable_obj_pool_copy);
 }
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Wed Dec 26 02:17:10 2007
@@ -64,6 +64,7 @@
   REF *p_referent_field = obj_get_referent_field(p_obj);
   REF p_referent = *p_referent_field;
   if (!p_referent) return;
+
   if(DURING_RESURRECTION){
     write_slot(p_referent_field, NULL);
     return;
@@ -86,6 +87,19 @@
       break;
   }
 }
+
+inline void scan_weak_reference_direct(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot)
+{
+  WeakReferenceType type = special_reference_type(p_obj);
+  if(type == NOT_REFERENCE)
+    return;
+  REF *p_referent_field = obj_get_referent_field(p_obj);
+  REF p_referent = *p_referent_field;
+  if (!p_referent) return;
+
+  scan_slot(collector, p_referent_field);
+}
+
 
 extern void gc_update_weakref_ignore_finref(GC *gc);
 extern void collector_identify_finref(Collector *collector);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Wed Dec 26 02:17:10 2007
@@ -188,7 +188,7 @@
   Mutator *mutator = gc->mutator_list;
   while(mutator){
     pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin);
-    mutator->obj_with_fin = NULL;
+    mutator->obj_with_fin = finref_get_free_block(gc);
     mutator = mutator->next;
   }
 }
@@ -216,15 +216,27 @@
   for(unsigned int i = 0; i < num_active_collectors; i++)
   {
     Collector *collector = gc->collectors[i];
-    pool_put_entry(metadata->softref_pool, collector->softref_set);
-    pool_put_entry(metadata->weakref_pool, collector->weakref_set);
-    pool_put_entry(metadata->phanref_pool, collector->phanref_set);
+    if(!vector_block_is_empty(collector->softref_set))
+      pool_put_entry(metadata->softref_pool, collector->softref_set);
+    else
+      pool_put_entry(metadata->free_pool, collector->softref_set);
+    
+    if(!vector_block_is_empty(collector->weakref_set))
+      pool_put_entry(metadata->weakref_pool, collector->weakref_set);
+    else
+      pool_put_entry(metadata->free_pool, collector->weakref_set);
+      
+    if(!vector_block_is_empty(collector->phanref_set))
+      pool_put_entry(metadata->phanref_pool, collector->phanref_set);
+    else
+      pool_put_entry(metadata->free_pool, collector->phanref_set);
+      
     collector->softref_set = NULL;
     collector->weakref_set= NULL;
     collector->phanref_set= NULL;
   }
   
-  if(gc_mark_is_concurrent()){
+  if(gc_mark_is_concurrent() && !gc_sweep_is_concurrent()){
     unsigned int num_active_markers = gc->num_active_markers;
     for(unsigned int i = 0; i < num_active_markers; i++)
     {
@@ -274,7 +286,7 @@
   metadata->obj_with_fin_pool = finalizable_obj_pool;
   metadata->finalizable_obj_pool = obj_with_fin_pool;
   
-  gc_reset_obj_with_fin(gc);
+  //gc_reset_obj_with_fin(gc);
 }
 
 
@@ -428,4 +440,6 @@
   }
  return TRUE;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp Wed Dec 26 02:17:10 2007
@@ -22,16 +22,16 @@
 #include "../gen/gen.h"
 #include "../thread/mutator.h"
 #include "gc_for_barrier.h"
-#include "../mark_sweep/sspace_mark_sweep.h"
+#include "../mark_sweep/wspace_mark_sweep.h"
 #include "../common/gc_concurrent.h"
 
 
 /* All the write barrier interfaces need cleanup */
 
-Boolean gen_mode;
+volatile unsigned int write_barrier_function;
 
 /* The implementations are only temporary */
-static void gc_slot_write_barrier(Managed_Object_Handle *p_slot, 
+static void write_barrier_rem_source_slot(Managed_Object_Handle *p_slot, 
                       Managed_Object_Handle p_target) 
 {
   if(p_target >= nos_boundary && p_slot < nos_boundary){
@@ -44,6 +44,19 @@
   return;
 }
 
+static void write_barrier_rem_source_obj(Managed_Object_Handle p_obj_holding_ref)
+{
+  if(obj_need_remember((Partial_Reveal_Object*)p_obj_holding_ref)){
+    Mutator *mutator = (Mutator *)gc_get_tls();
+
+    //FIXME: Release lock.
+    lock(mutator->dirty_set_lock);
+    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
+    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
+    unlock(mutator->dirty_set_lock);
+  }
+}
+
 static void gc_object_write_barrier(Managed_Object_Handle p_object) 
 {
   
@@ -82,27 +95,13 @@
   return;
 }
 
-void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
-{
-  /*concurrent mark: since object clone and array copy do not modify object slot, 
-      we treat it as an new object. It has already been marked when dest object was created.*/  
-  if( !gc_is_gen_mode() ) return;
-  if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){
-    /* for array copy and object clone */
-    gc_object_write_barrier(p_obj_written); 
-  }
-}
-
-/* The following routines were supposed to be the only way to alter any value in gc heap. */
-void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target) 
-{  assert(0); }
 
 /*This function is for concurrent mark.*/
-static void gc_dirty_object_write_barrier(Managed_Object_Handle p_obj_holding_ref)
+static void write_barrier_rem_obj_snapshot(Managed_Object_Handle p_obj_holding_ref)
 {
   Mutator *mutator = (Mutator *)gc_get_tls();
   REF* p_obj_slot; 
-  if(obj_need_take_snaptshot((Partial_Reveal_Object*)p_obj_holding_ref)){
+  if(obj_need_take_snapshot((Partial_Reveal_Object*)p_obj_holding_ref)){
     if (object_is_array((Partial_Reveal_Object*)p_obj_holding_ref)) {
       Partial_Reveal_Object* array = (Partial_Reveal_Object*)p_obj_holding_ref;
       assert(!obj_is_primitive_array(array));
@@ -114,7 +113,7 @@
         p_obj_slot = (REF*)vector_get_element_address_ref((Vector_Handle) array, i);
         obj_to_snapshot = (Partial_Reveal_Object*)read_slot(p_obj_slot);
         if (obj_to_snapshot != NULL)  
-          mutator_snapshotset_add_entry(mutator, obj_to_snapshot);
+          mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
       }   
     }else{
       /* scan non-array object */
@@ -123,12 +122,12 @@
       int *ref_iterator = object_ref_iterator_init(p_obj);
       
       Partial_Reveal_Object* obj_to_snapshot; 
-      
+
       for(unsigned int i=0; i<num_refs; i++){
         p_obj_slot = object_ref_iterator_get(ref_iterator+i, p_obj);        
         obj_to_snapshot = (Partial_Reveal_Object*)read_slot(p_obj_slot);
         if (obj_to_snapshot != NULL)  
-          mutator_snapshotset_add_entry(mutator, obj_to_snapshot);
+          mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
       }
     }
     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
@@ -136,28 +135,81 @@
   }
 }
 
+static void write_barrier_rem_slot_oldvar(Managed_Object_Handle* p_slot)
+{
+  Mutator *mutator = (Mutator *)gc_get_tls();  
+  REF* p_obj_slot = (REF*) p_slot ;
+  Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
+  if(p_obj && obj_need_remember_oldvar(p_obj)){
+    mutator_dirtyset_add_entry(mutator, p_obj);
+  }
+}
+
+//===========================================
 
+/* The following routines were supposed to be the only way to alter any value in gc heap. */
+void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target) 
+{  assert(0); }
+
+void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
+{
+  /*Concurrent Mark: Since object clone and array copy do not modify object slots, 
+      we treat it as an new object. It has already been marked when dest object was created.
+      We use WRITE_BARRIER_REM_SOURCE_OBJ function here to debug.
+    */  
+
+  if(WRITE_BARRIER_REM_SOURCE_OBJ == write_barrier_function){
+    Mutator *mutator = (Mutator *)gc_get_tls();  
+    lock(mutator->dirty_set_lock);
+    
+    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
+    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
+    
+    unlock(mutator->dirty_set_lock);
+  }
+
+  if( !gc_is_gen_mode() ) return;
+  if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){
+    /* for array copy and object clone */
+    gc_object_write_barrier(p_obj_written); 
+  }
+}
 
 /* FIXME:: this is not the right interface for write barrier */
 void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
-{  
-  if(!gc_is_concurrent_mark_phase()){
-    *p_slot = p_target;
-    
-    if( !gc_is_gen_mode() ) return;
-    gc_slot_write_barrier(p_slot, p_target); 
-  }else{
-    gc_dirty_object_write_barrier(p_obj_holding_ref);
-    *p_slot = p_target;
+{ 
+  switch(write_barrier_function){
+    case WRITE_BARRIER_REM_NIL:
+      *p_slot = p_target;
+      break;
+    case WRITE_BARRIER_REM_SOURCE_REF:
+      *p_slot = p_target;
+      write_barrier_rem_source_slot(p_slot, p_target); 
+      break;      
+    case WRITE_BARRIER_REM_SOURCE_OBJ:
+      *p_slot = p_target;
+      write_barrier_rem_source_obj(p_obj_holding_ref);
+      break;
+    case WRITE_BARRIER_REM_OBJ_SNAPSHOT:
+      write_barrier_rem_obj_snapshot(p_obj_holding_ref);
+      *p_slot = p_target;
+      break;
+    case WRITE_BARRIER_REM_OLD_VAR:
+      write_barrier_rem_slot_oldvar(p_slot);      
+      *p_slot = p_target;
+      break;
+    default:
+      assert(0);
+      return;
   }
 }
 
 /* this is used for global object update, e.g., strings. */
 void gc_heap_write_global_slot(Managed_Object_Handle *p_slot,Managed_Object_Handle p_target)
 {
-  /*concurrent mark: global object is enumerated, so the old object has been already marked.*/
+  /*Concurrent Mark & Generational Mode: 
+      Global objects are roots. After root set enumeration, this objects will be touched by GC. No barrier here.
+    */
 
   *p_slot = p_target;
-  
-  /* Since globals are roots, no barrier here */
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h Wed Dec 26 02:17:10 2007
@@ -24,6 +24,22 @@
 
 #include "../jni/java_support.h"
 
+extern volatile unsigned int write_barrier_function;
+
+enum Write_Barrier_Function{
+  WRITE_BARRIER_REM_NIL           = 0x00,
+  WRITE_BARRIER_REM_SOURCE_OBJ    = 0x01,
+  WRITE_BARRIER_REM_SOURCE_REF    = 0x02,
+  WRITE_BARRIER_REM_OLD_VAR       = 0x03,
+  WRITE_BARRIER_REM_NEW_VAR       = 0x04,
+  WRITE_BARRIER_REM_OBJ_SNAPSHOT  = 0x05
+};
+
+inline void gc_set_barrier_function(unsigned int wb_function)
+{
+  write_barrier_function = wb_function;
+}
+
 extern Boolean gen_mode;
 
 inline Boolean gc_is_gen_mode()
@@ -32,20 +48,25 @@
 inline void gc_enable_gen_mode()
 {  
   gen_mode = TRUE;
+  gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
   HelperClass_set_GenMode(TRUE);
 }
 
 inline void gc_disable_gen_mode()
 {  
   gen_mode = FALSE; 
+  gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
   HelperClass_set_GenMode(FALSE);
 }
 
 inline void gc_set_gen_mode(Boolean status)
 {
   gen_mode = status; 
+  if(gen_mode) 
+    gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
   HelperClass_set_GenMode(status);   
 }
-
 #endif /* _GC_FOR_BARRIER_H_ */
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Wed Dec 26 02:17:10 2007
@@ -42,13 +42,16 @@
 POINTER_SIZE_INT MIN_NOS_SIZE = 0;
 POINTER_SIZE_INT MAX_NOS_SIZE = 0;
 
-static unsigned int MINOR_ALGO = 0;
+/* should clean up */
+unsigned int MINOR_ALGO = 0;
 static unsigned int MAJOR_ALGO = 0;
 
 Boolean GEN_NONGEN_SWITCH = FALSE;
 
 Boolean JVMTI_HEAP_ITERATION = true;
 
+Boolean gen_mode;
+
 #ifndef STATIC_NOS_MAPPING
 void* nos_boundary;
 #endif
@@ -288,16 +291,23 @@
 void gc_set_mos(GC_Gen *gc, Space *mos){ gc->mos = mos; }
 void gc_set_los(GC_Gen *gc, Space *los){ gc->los = los; }
 
+Space_Alloc_Func nos_alloc;
 Space_Alloc_Func mos_alloc;
-//void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);}
-void* nos_alloc(unsigned size, Allocator *allocator){return fspace_alloc(size, allocator);}
 Space_Alloc_Func los_alloc;
-//void* los_alloc(unsigned size, Allocator *allocator){return lspace_alloc(size, allocator);}
+
 void* los_try_alloc(POINTER_SIZE_INT size, GC* gc){  return lspace_try_alloc((Lspace*)((GC_Gen*)gc)->los, size); }
 
 void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size)
 {
-  Space *nos = (Space*)fspace_initialize((GC*)gc, start, nos_size, commit_size);
+  Space *nos;
+  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL){
+    nos = (Space*)sspace_initialize((GC*)gc, start, nos_size, commit_size);
+    nos_alloc = sspace_alloc;
+  }else{
+    nos = (Space*)fspace_initialize((GC*)gc, start, nos_size, commit_size);
+    nos_alloc = fspace_alloc;
+  }
+  
   gc_set_nos(gc, nos);
   nos->collect_algorithm = MINOR_ALGO;
 }
@@ -309,8 +319,8 @@
 {
   Space *mos;
   if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
-    mos = (Space*)sspace_initialize((GC*)gc, start, mos_size, commit_size);
-    mos_alloc = sspace_alloc;
+    mos = (Space*)wspace_initialize((GC*)gc, start, mos_size, commit_size);
+    mos_alloc = wspace_alloc;
   } else {
     mos = (Space*)mspace_initialize((GC*)gc, start, mos_size, commit_size);
     mos_alloc = mspace_alloc;
@@ -322,7 +332,7 @@
 void gc_mos_destruct(GC_Gen *gc)
 {
   if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
-    sspace_destruct((Sspace*)gc->mos);
+    wspace_destruct((Wspace*)gc->mos);
   else
     mspace_destruct((Mspace*)gc->mos);
 }
@@ -333,7 +343,7 @@
   if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
     assert(los_size == 0);
     los = NULL;
-    los_alloc = sspace_alloc;
+    los_alloc = wspace_alloc;
   } else {
     los = (Space*)lspace_initialize((GC*)gc, start, los_size);
     los_alloc = lspace_alloc;
@@ -350,7 +360,7 @@
 
 Boolean FORCE_FULL_COMPACT = FALSE;
 Boolean IGNORE_VTABLE_TRACING = FALSE;
-Boolean VTABLE_TRACING = FALSE;
+Boolean TRACE_JLC_VIA_VTABLE = FALSE;
 
 unsigned int gc_next_collection_kind(GC_Gen* gc)
 {
@@ -365,19 +375,20 @@
 {
   /* this is for debugging. */
   gc->last_collect_kind = gc->collect_kind;
-  
+#if defined(USE_MARK_SWEEP_GC)
+  gc->collect_kind = MS_COLLECTION;
+#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
+  gc->collect_kind = MC_COLLECTION;
+#else
   if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
     gc->collect_kind = NORMAL_MAJOR_COLLECTION;
   else
     gc->collect_kind = MINOR_COLLECTION;
     
   if(IGNORE_VTABLE_TRACING || (gc->collect_kind == MINOR_COLLECTION))
-    VTABLE_TRACING = FALSE;
+    TRACE_JLC_VIA_VTABLE = FALSE;
   else
-    VTABLE_TRACING = TRUE;
-
-#ifdef USE_MARK_SWEEP_GC
-  gc->collect_kind = MS_COLLECTION;
+    TRACE_JLC_VIA_VTABLE = TRUE;
 #endif
   return;
 }
@@ -399,7 +410,11 @@
       MINOR_ALGO = MINOR_GEN_FORWARD_POOL;
       gc_enable_gen_mode();
     
-    }else{
+    }else if(!strcmp(minor_algo, "MINOR_NONGEN_SEMISPACE_POOL")){
+      MINOR_ALGO = MINOR_NONGEN_SEMISPACE_POOL;
+      gc_disable_gen_mode();
+    
+    }else {
       WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
     
     }
@@ -430,6 +445,18 @@
   
 }
 
+static Boolean nos_alloc_block(Space* space, Allocator* allocator)
+{
+  Boolean result;
+  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL)
+    result = sspace_alloc_block((Sspace*)space, allocator); 
+  else
+    result = fspace_alloc_block((Fspace*)space, allocator);   
+ 
+  return result;   
+}
+
+/* assign a free area to the mutator who triggers the collection */
 void gc_gen_assign_free_area_to_mutators(GC_Gen* gc)
 {
   if(gc->cause == GC_CAUSE_LOS_IS_FULL){
@@ -438,14 +465,15 @@
     los->failure_size = 0;
      
   }else{ 
-    Blocked_Space* nos = (Blocked_Space*)gc->nos;
-    if(nos->num_managed_blocks == 0) return;
-
-    Mutator *mutator = (Mutator *)gc_get_tls();   
-    allocator_init_free_block((Allocator*)mutator, (Block_Header*)nos->blocks);
-    nos->free_block_idx++;
+    /* it is possible that NOS has no free block,
+       because MOS takes all space after fallback or LOS extension.
+       Allocator should be cleared. */
+    Allocator *allocator = (Allocator *)gc_get_tls();   
+    Boolean ok = nos_alloc_block(gc->nos, allocator);
+    /* we don't care about the return value. If no block available, that means,
+       first allocation after mutator resumption will probably trigger OOME. */
   }
-    
+
   return;     
 }
 
@@ -554,12 +582,17 @@
 }
 
 static inline void nos_collection(Space *nos)
-{ fspace_collection((Fspace*)nos); }
+{ 
+  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL)
+    sspace_collection((Sspace*)nos); 
+  else
+    fspace_collection((Fspace*)nos); 
+}
 
 static inline void mos_collection(Space *mos)
 {
   if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
-    sspace_collection((Sspace*)mos);
+    wspace_collection((Wspace*)mos);
   else
     mspace_collection((Mspace*)mos);
 }
@@ -623,10 +656,19 @@
     }
   }
 }
-
+ 
 static void nos_reset_after_collection(Space *nos)
 {
-  fspace_reset_after_collection((Fspace*)nos);
+  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL)
+    sspace_reset_after_collection((Sspace*)nos);
+  else
+    fspace_reset_after_collection((Fspace*)nos);
+}
+
+static void nos_prepare_for_collection(Space *nos)
+{
+  if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL)
+    sspace_prepare_for_collection((Sspace*)nos);
 }
 
 static void mos_reset_after_collection(Space *mos)
@@ -634,7 +676,7 @@
   if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
     mspace_reset_after_collection((Mspace*)mos);
   else
-    sspace_reset_after_collection((Sspace*)mos);
+    wspace_reset_after_collection((Wspace*)mos);
 }
 
 Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
@@ -662,7 +704,9 @@
   gc_gen_stats_reset_before_collection(gc);
   gc_gen_collector_stats_reset(gc);
 #endif
-  
+
+  nos_prepare_for_collection(nos);
+
   if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
 
     INFO2("gc.process", "GC: start minor collection ...\n");
@@ -747,6 +791,7 @@
     
     if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
       los->move_object = TRUE;
+
     mos_collection(mos); /* collect both mos and nos */
     los_collection(los);
     if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
@@ -778,7 +823,7 @@
   
   assert(MAJOR_ALGO == MAJOR_MARK_SWEEP || !los->move_object);
   
-  if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+  if(MAJOR_ALGO != MAJOR_MARK_SWEEP && MINOR_ALGO != MINOR_NONGEN_SEMISPACE_POOL){
     gc_gen_adjust_heap_size(gc);
     
     int64 pause_time = time_now() - gc_start_time;
@@ -863,14 +908,6 @@
   }
 }
 
-void gc_gen_hook_for_collector_init(Collector *collector)
-{
-  if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
-    allocator_init_local_chunks((Allocator*)collector);
-    collector_init_free_chunk_list(collector);
-  }
-}
-
 void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time)
 {
 
@@ -960,6 +997,43 @@
     <<"\nGC: total collection time: "<<stats->total_pause_time
     <<"\nGC: total appliction execution time: "<<stats->total_mutator_time<<"\n");
 #endif
+}
+
+/* init collector alloc_space */
+void gc_gen_init_collector_alloc(GC_Gen* gc, Collector* collector)
+{
+  if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+    allocator_init_local_chunks((Allocator*)collector);
+    gc_init_collector_free_chunk_list(collector);
+  }
+
+  Allocator* allocator = (Allocator*)collector;
+  
+  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+    allocator->alloc_space = gc->nos; 
+    /* init backup allocator */
+    unsigned int size = sizeof(Allocator);
+    allocator = (Allocator*)STD_MALLOC(size);  //assign its alloc_space below.
+    memset(allocator, 0, size);  
+    collector->backup_allocator = allocator;
+  }
+    
+  allocator->alloc_space = gc->mos;
+}
+
+void gc_gen_reset_collector_alloc(GC_Gen* gc, Collector* collector)
+{
+  alloc_context_reset((Allocator*)collector);
+  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+    alloc_context_reset(collector->backup_allocator);
+  }      
+}
+
+void gc_gen_destruct_collector_alloc(GC_Gen* gc, Collector* collector)
+{
+  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+    STD_FREE(collector->backup_allocator);  
+  }
 }
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Wed Dec 26 02:17:10 2007
@@ -25,10 +25,15 @@
 
 #include "../common/gc_common.h"
 #include "../thread/gc_thread.h"
+
 #include "../trace_forward/fspace.h"
+#include "../semi_space/sspace.h"
+
 #include "../mark_compact/mspace.h"
 #include "../los/lspace.h"
-#include "../mark_sweep/sspace.h"
+
+#include "../mark_sweep/wspace.h"
+
 #include "../finalizer_weakref/finalizer_weakref_metadata.h"
 
 #ifdef GC_GEN_STATS
@@ -105,6 +110,7 @@
 
   SpinLock concurrent_mark_lock;
   SpinLock enumerate_rootset_lock;
+  SpinLock concurrent_sweep_lock;
 
   
   /* system info */
@@ -165,7 +171,7 @@
 }
 
 extern Space_Alloc_Func mos_alloc;
-void* nos_alloc(unsigned size, Allocator *allocator);
+extern Space_Alloc_Func nos_alloc;
 extern Space_Alloc_Func los_alloc;
 void* los_try_alloc(POINTER_SIZE_INT size, GC* gc);
 
@@ -186,6 +192,9 @@
 void gc_gen_reclaim_heap(GC_Gen* gc, int64 gc_start_time);
 
 void gc_gen_assign_free_area_to_mutators(GC_Gen* gc);
+void gc_gen_init_collector_alloc(GC_Gen* gc, Collector* collector);
+void gc_gen_reset_collector_alloc(GC_Gen* gc, Collector* collector);
+void gc_gen_destruct_collector_alloc(GC_Gen* gc, Collector* collector);
 
 void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time);
 
@@ -201,6 +210,8 @@
 extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Wed Dec 26 02:17:10 2007
@@ -376,15 +376,15 @@
   }else if ( new_nos_size >= curr_nos_size ){
     INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
     POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
-    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
+    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
       <<verbose_print_size(adapt_size)
-      <<" size was transfered from nos to mos)\n"); 
+      <<" size was transferred from mos to nos)\n"); 
   } else {
     INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
     POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
-    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
+    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
       <<verbose_print_size(adapt_size)
-      <<" size was transfered from mos to nos)\n"); 
+      <<" size was transferred from nos to mos)\n"); 
   }
 
   /* below are ajustment */  
@@ -449,13 +449,13 @@
     POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
     INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
       <<verbose_print_size(adapt_size)
-      <<" size was transfered from mos to nos)\n"); 
+      <<" size was transferred from mos to nos)\n"); 
   } else {
     INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n");
     POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
     INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
       <<verbose_print_size(adapt_size)
-      <<" size was transfered from nos to mos)\n"); 
+      <<" size was transferred from nos to mos)\n"); 
   }
   
   POINTER_SIZE_INT used_mos_size = blocked_space_used_mem_size((Blocked_Space*)mspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp Wed Dec 26 02:17:10 2007
@@ -58,76 +58,64 @@
     GCHelper_clss = *vm_class_ptr;
 }
 
-#if !defined(_IPF_)
-
 JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getZeroingSize(JNIEnv *e, jclass c)
 {
+#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
     return (jint)ZEROING_SIZE;
+#else
+    return (jint)0;
+#endif
 }
 
 JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getPrefetchDist(JNIEnv *e, jclass c)
 {
+#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
     return (jint)PREFETCH_DISTANCE;
+#else
+    return (jint)0;
+#endif
 }
 
 JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getPrefetchStride(JNIEnv *e, jclass c)
 {
+#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
     return (jint)PREFETCH_STRIDE;
+#else
+    return (jint)0;
+#endif
 }
 
-JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_isPrefetchEnabled(JNIEnv *, jclass) {
+JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_isPrefetchEnabled(JNIEnv *, jclass) 
+{
+#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
    return (jboolean) PREFETCH_ENABLED;
+#else
+    return (jboolean)JNI_FALSE;
+#endif
 }
 
-#else /* _IPF_ is defined*/
-/*
- Alloc prefetch is disabled in GC code by default. Moreover, allocation helpers are not enabled as well.
- So return zeroes for prefetch distance, prefetch stride and zeroing size here. 
- Also isPrefetchEnabled returns JNI_FALSE. These defaults should be taken into account 
- when enabling helpers on IPF.
-*/
-
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getZeroingSize(JNIEnv *e, jclass c)
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaFreeOffset(JNIEnv *, jclass) 
 {
-    return 0;
+    return (jint)((POINTER_SIZE_INT) &(((Allocator*)0)->free));
 }
 
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getPrefetchDist(JNIEnv *e, jclass c)
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaCeilingOffset(JNIEnv *, jclass) 
 {
-    return 0;
+    return (jint)((POINTER_SIZE_INT) &(((Allocator*)0)->ceiling));
 }
 
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getPrefetchStride(JNIEnv *e, jclass c)
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaEndOffset(JNIEnv *, jclass) 
 {
-    return 0;
-}
-
-JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_isPrefetchEnabled(JNIEnv *, jclass) {
-   return (jboolean) JNI_FALSE;
+    return (jint)((POINTER_SIZE_INT) &(((Allocator*)0)->end));
 }
-#endif /* _IPF_ */
-
 
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaFreeOffset(JNIEnv *, jclass) {
-    Allocator allocator;
-    return (jint) ((POINTER_SIZE_INT)&allocator.free - (POINTER_SIZE_INT)&allocator);
-}
-
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaCeilingOffset(JNIEnv *, jclass) {
-    Allocator allocator;
-    return (jint) ((POINTER_SIZE_INT)&allocator.ceiling - (POINTER_SIZE_INT)&allocator);
-}
-
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getTlaEndOffset(JNIEnv *, jclass) {
-    Allocator allocator;
-    return (jint) ((POINTER_SIZE_INT)&allocator.end - (POINTER_SIZE_INT)&allocator);
-}
-
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGCObjectAlignment(JNIEnv *, jclass) {
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGCObjectAlignment(JNIEnv *, jclass) 
+{
    return (jint) GC_OBJECT_ALIGNMENT;
 }
 
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getLargeObjectSize(JNIEnv *, jclass) {
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getLargeObjectSize(JNIEnv *, jclass) 
+{
    return (jint) GC_OBJ_SIZE_THRESHOLD;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Wed Dec 26 02:17:10 2007
@@ -206,9 +206,7 @@
     Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
     Free_Area_Pool* pool = lspace->free_pool;
 
-    if(gc_need_start_concurrent_mark(allocator->gc))
-      gc_start_concurrent_mark(allocator->gc);   
-
+    gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL);
     
     while( try_count < 2 ){
         if(p_result = lspace_try_alloc(lspace, alloc_size))
@@ -273,7 +271,7 @@
 
     if( obj_info != 0 ) {
       collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
-      collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info);
+      collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
     }
       
     obj_set_fw_in_oi(p_obj, dest_addr);
@@ -480,5 +478,7 @@
   TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
   return;
 }
+
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Wed Dec 26 02:17:10 2007
@@ -41,8 +41,8 @@
   assert(p_obj);
   assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
 
-  Partial_Reveal_VTable *vtable = uncompress_vt(obj_get_vt(p_obj));
-  if(VTABLE_TRACING)
+  Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
+  if(TRACE_JLC_VIA_VTABLE)
     if(!(vtable->vtmark & VT_FALLBACK_MARKED)) {
       vtable->vtmark |= VT_FALLBACK_MARKED;  //we need different marking for fallback compaction
       collector_tracestack_push(collector, &(vtable->jlC));
@@ -203,24 +203,42 @@
 }
 
 #ifdef USE_32BITS_HASHCODE
+
+/* for semispace NOS, actually only the fromspace needs cleaning of oi. */
 void fallback_clear_fwd_obj_oi(Collector* collector)
 {
   GC* gc = collector->gc;
+  Blocked_Space* space = (Blocked_Space*)((GC_Gen*)gc)->nos;
+
   assert(gc_match_kind(gc, FALLBACK_COLLECTION));
 
   unsigned int num_active_collectors = gc->num_active_collectors;
   atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
-  Block_Header* curr_block = fspace_get_next_block();
+  Block_Header* curr_block = blocked_space_block_iterator_next(space);
   while(curr_block){
     Partial_Reveal_Object* curr_obj = (Partial_Reveal_Object*) curr_block->base;
     while(curr_obj < curr_block->free){
+      unsigned int obj_size = vm_object_size(curr_obj);
+      /* forwarded object is dead object (after fallback marking),but we need know its size to iterate live object */
       if(obj_is_fw_in_oi(curr_obj)){
-        set_obj_info(curr_obj, (Obj_Info_Type)0);
+        if(obj_is_sethash_in_vt(curr_obj)){ 
+          /* this only happens in semispace GC, where an object with attached hashcode is forwarded.
+             This object should be in survivor_area, forwarded from fromspace in last minor collection. 
+             We restore its hashbits correctly in oi. */
+          set_obj_info(curr_obj, (Obj_Info_Type)HASHCODE_SET_ATTACHED);
+        }else{
+          set_obj_info(curr_obj, (Obj_Info_Type)0);
+        }
       }
-      curr_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)curr_obj + vm_object_size(curr_obj));
+      /* if it's not forwared, it may still have hashcode attached if its in survivor_area. 
+         It's not forwarded because fallback happens before it's forwarded. */
+      if(hashcode_is_attached(curr_obj))
+        obj_size += GC_OBJECT_ALIGNMENT;
+      
+      curr_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)curr_obj + obj_size);
     }
-    curr_block = fspace_get_next_block();
+    curr_block = blocked_space_block_iterator_next(space);
   }
   atomic_inc32(&num_finished_collectors);
   while(num_finished_collectors < num_active_collectors) ;
@@ -228,9 +246,13 @@
 
 void fallback_clear_fwd_obj_oi_init(Collector* collector)
 {
-  fspace_block_iterate_init((Fspace*)((GC_Gen*)collector->gc)->nos);
+  Blocked_Space* space = (Blocked_Space*)((GC_Gen*)collector->gc)->nos;
+  blocked_space_block_iterator_init(space); 
+    
 }
 #endif
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Wed Dec 26 02:17:10 2007
@@ -21,16 +21,6 @@
 
 #include "mspace.h"
 
-#include "../common/gc_space.h"
-
-static void mspace_destruct_blocks(Mspace* mspace)
-{   
-#ifdef USE_32BITS_HASHCODE
-  space_desturct_blocks((Blocked_Space*)mspace);
-#endif
-  return;
-}
-
 struct GC_Gen;
 extern void gc_set_mos(GC_Gen* gc, Space* space);
 extern Space* gc_get_nos(GC_Gen* gc);
@@ -95,8 +85,10 @@
 
 void mspace_destruct(Mspace* mspace)
 {
-  //FIXME:: when map the to-half, the decommission start address should change
-  mspace_destruct_blocks(mspace);
+    //FIXME:: when map the to-half, the decommission start address should change
+#ifdef USE_32BITS_HASHCODE
+  space_desturct_blocks((Blocked_Space*)mspace);
+#endif
   STD_FREE(mspace);  
 }
 
@@ -180,4 +172,6 @@
 {
     return mspace->expected_threshold_ratio;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Wed Dec 26 02:17:10 2007
@@ -43,20 +43,16 @@
     return TRUE;
   }
 
-  /* Mspace is out, a collection should be triggered. It can be caused by mutator allocation
-     And it can be caused by collector allocation during nos forwarding. */
+  /* Mspace is out. If it's caused by mutator, a collection should be triggered. 
+     If it's caused by collector, a fallback should be triggered. */
   return FALSE;
   
 }
 
-struct GC_Gen;
-Space* gc_get_mos(GC_Gen* gc);
 void* mspace_alloc(unsigned int size, Allocator* allocator)
 {
   void *p_return = NULL;
- 
-  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)allocator->gc);
-  
+   
   /* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */
   assert((size % GC_OBJECT_ALIGNMENT) == 0);
   assert( size <= GC_OBJ_SIZE_THRESHOLD );
@@ -66,7 +62,8 @@
   if(p_return) return p_return;
   
   /* grab a new block */
-  Boolean ok = mspace_alloc_block(mspace, allocator);
+   Mspace* mspace = (Mspace*)allocator->alloc_space;;
+   Boolean ok = mspace_alloc_block(mspace, allocator);
   if(!ok) return NULL; 
   
   p_return = thread_local_alloc(size, allocator);
@@ -74,5 +71,7 @@
     
   return p_return;
 }
+
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Wed Dec 26 02:17:10 2007
@@ -15,10 +15,6 @@
  *  limitations under the License.
  */
 
-/**
- * @author Chunrong Lai, 2006/12/25
- */
-
 #include "mspace_collect_compact.h"
 #include "../trace_forward/fspace.h"
 #include "../los/lspace.h"
@@ -45,11 +41,11 @@
     first_block_to_move = nos_start_block;
 }
 
-static POINTER_SIZE_INT fspace_shrink(Fspace *fspace)
+static POINTER_SIZE_INT nspace_shrink(Fspace *nspace)
 {
-  void *committed_nos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)fspace) + fspace->committed_heap_size);
+  void *committed_nos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)nspace) + nspace->committed_heap_size);
   
-  POINTER_SIZE_INT nos_used_size = (POINTER_SIZE_INT)nos_first_free_block - (POINTER_SIZE_INT)fspace->heap_start;
+  POINTER_SIZE_INT nos_used_size = (POINTER_SIZE_INT)nos_first_free_block - (POINTER_SIZE_INT)nspace->heap_start;
   POINTER_SIZE_INT nos_free_size = (POINTER_SIZE_INT)committed_nos_end - (POINTER_SIZE_INT)nos_first_free_block;
   POINTER_SIZE_INT decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size;
   assert(decommit_size);
@@ -64,26 +60,26 @@
   Boolean result = vm_decommit_mem(decommit_base, decommit_size);
   assert(result == TRUE);
   
-  fspace->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)fspace->heap_start;
-  fspace->num_managed_blocks = (unsigned int)(fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
+  nspace->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)nspace->heap_start;
+  nspace->num_managed_blocks = (unsigned int)(nspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
   
-  Block_Header *new_last_block = (Block_Header *)&fspace->blocks[fspace->num_managed_blocks - 1];
-  fspace->ceiling_block_idx = new_last_block->block_idx;
+  Block_Header *new_last_block = (Block_Header *)&nspace->blocks[nspace->num_managed_blocks - 1];
+  nspace->ceiling_block_idx = new_last_block->block_idx;
   new_last_block->next = NULL;
   
   return decommit_size;
 }
 
-static void link_mspace_extended_blocks(Mspace *mspace, Fspace *fspace)
+static void link_mspace_extended_blocks(Mspace *mspace, Fspace *nspace)
 {
   Block_Header *old_last_mos_block = (Block_Header *)(mos_first_new_block -1);
   old_last_mos_block->next = (Block_Header *)mos_first_new_block;
   void *new_committed_mos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)mspace) + mspace->committed_heap_size); 
   Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1);
-  new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)fspace);
+  new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)nspace);
 }
 
-static Block *mspace_extend_without_link(Mspace *mspace, Fspace *fspace, unsigned int commit_size)
+static Block *mspace_extend_without_link(Mspace *mspace, Fspace *nspace, unsigned int commit_size)
 {
   assert(commit_size && !(commit_size % GC_BLOCK_SIZE_BYTES));
   
@@ -267,11 +263,14 @@
 static volatile unsigned int num_space_changing_collectors = 0;
 
 #ifndef STATIC_NOS_MAPPING
+
+/* FIXME:: this is a sequential process, the atomic parallel constructs should be removed. 
+   Better to call this function in the sequential region of last phase. */
 void mspace_extend_compact(Collector *collector)
 {
   GC_Gen *gc_gen = (GC_Gen *)collector->gc;
-  Mspace *mspace = (Mspace *)gc_gen->mos;
-  Fspace *fspace = (Fspace *)gc_gen->nos;
+  Blocked_Space *mspace = (Blocked_Space *)gc_gen->mos;
+  Blocked_Space *nspace = (Blocked_Space *)gc_gen->nos;
 
   /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
   gc_gen->collect_kind |= EXTEND_COLLECTION;
@@ -281,18 +280,20 @@
   atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1);
   old_num = atomic_inc32(&num_space_changing_collectors);
   if( ++old_num == num_active_collectors ){
-     Block *old_nos_boundary = fspace->blocks;
+     if(NOS_SIZE) /* when NOS_SIZE is speficied, it can't be shrunk. */
+       WARN2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: MOS is overflowed, have to reduce NOS size.");
+     Block *old_nos_boundary = nspace->blocks;
      nos_boundary = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
-     if(fspace->num_managed_blocks != 0)
+     if(nspace->num_managed_blocks != 0) /* FIXME:: why can it be 0 here?? */
        assert(nos_boundary > old_nos_boundary);
      POINTER_SIZE_INT mem_change_size = ((Block *)nos_boundary - old_nos_boundary) << GC_BLOCK_SHIFT_COUNT;
-     fspace->heap_start = nos_boundary;
-     fspace->blocks = (Block *)nos_boundary;
-     fspace->committed_heap_size -= mem_change_size;
-     fspace->num_managed_blocks = (unsigned int)(fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
-     fspace->num_total_blocks = fspace->num_managed_blocks;
-     fspace->first_block_idx = mspace->free_block_idx;
-     fspace->free_block_idx = fspace->first_block_idx;
+     nspace->heap_start = nos_boundary;
+     nspace->blocks = (Block *)nos_boundary;
+     nspace->committed_heap_size -= mem_change_size;
+     nspace->num_managed_blocks = (unsigned int)(nspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
+     nspace->num_total_blocks = nspace->num_managed_blocks;
+     nspace->first_block_idx = mspace->free_block_idx;
+     nspace->free_block_idx = nspace->first_block_idx;
      
      mspace->heap_end = nos_boundary;
      mspace->committed_heap_size += mem_change_size;
@@ -315,7 +316,7 @@
 {
   GC_Gen *gc_gen = (GC_Gen *)collector->gc;
   Mspace *mspace = gc_gen->mos;
-  Fspace *fspace = gc_gen->nos;
+  Fspace *nspace = gc_gen->nos;
   Lspace *lspace = gc_gen->los;
 
   /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
@@ -324,7 +325,7 @@
   unsigned int num_active_collectors = gc_gen->num_active_collectors;
   unsigned int old_num;
   
-  Block *nos_first_block = fspace->blocks;
+  Block *nos_first_block = nspace->blocks;
   nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
   assert(nos_first_free_block > nos_first_block);
   
@@ -333,8 +334,8 @@
     atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1);
     old_num = atomic_inc32(&num_space_changing_collectors);
     if( old_num == 0 ){
-      unsigned int mem_changed_size = fspace_shrink(fspace);
-      mos_first_new_block = mspace_extend_without_link(mspace, fspace, mem_changed_size);
+      unsigned int mem_changed_size = nspace_shrink(nspace);
+      mos_first_new_block = mspace_extend_without_link(mspace, nspace, mem_changed_size);
       
       set_first_and_end_block_to_move(collector, mem_changed_size);
       //mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move);
@@ -353,7 +354,7 @@
       /* init the iterator: prepare for refixing */
       lspace_refix_repointed_refs(collector, lspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
       gc_refix_rootset(collector, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
-      link_mspace_extended_blocks(mspace, fspace);
+      link_mspace_extended_blocks(mspace, nspace);
       mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move);
       num_refixing_collectors++;
     }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Wed Dec 26 02:17:10 2007
@@ -20,7 +20,6 @@
  */
 
 #include "mspace_collect_compact.h"
-#include "../trace_forward/fspace.h"
 #include "../los/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
@@ -28,7 +27,6 @@
 #include "../gen/gen_stats.h"
 #endif
 
-
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_mos(GC_Gen* gc);
@@ -116,7 +114,7 @@
 
       if( obj_info != 0 ) {
         collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
-        collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info);
+        collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
       }
       
       obj_set_fw_in_oi(p_obj, dest_addr);
@@ -376,7 +374,7 @@
       assert(obj_is_marked_in_vt(p_obj));
 #ifdef USE_32BITS_HASHCODE
       obj_clear_dual_bits_in_vt(p_obj);
- #else
+#else
       obj_unmark_in_vt(p_obj);
 #endif
 
@@ -411,7 +409,6 @@
 {
   GC* gc = collector->gc;
   Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
-  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
   Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
   
   unsigned int num_active_collectors = gc->num_active_collectors;
@@ -421,7 +418,7 @@
     *have references  that are going to be repointed.
     */
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ...");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking...");
 
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
 
@@ -435,6 +432,7 @@
 
    /* last collector's world here */
   if( ++old_num == num_active_collectors ){
+
     if(!IGNORE_FINREF )
       collector_identify_finref(collector);
 #ifndef BUILD_IN_REFERENT
@@ -461,24 +459,24 @@
   }
   while(num_marking_collectors != num_active_collectors + 1);
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1 and start pass2: relocating mos&nos...");
 
   /* Pass 2: **************************************************
      assign target addresses for all to-be-moved objects */
+
   atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);
 
 #ifdef USE_32BITS_HASHCODE
   if(gc_match_kind(gc, FALLBACK_COLLECTION))
     fallback_clear_fwd_obj_oi(collector);
 #endif
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in mos and nos ...");
   mspace_compute_object_target(collector, mspace);
   
   old_num = atomic_inc32(&num_repointing_collectors);
   /*last collector's world here*/
   if( ++old_num == num_active_collectors ){
     if(lspace->move_object) {
-      TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in los ...");
+      TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ...");
       lspace_compute_object_target(collector, lspace);
     }
     gc->collect_result = gc_collection_result(gc);
@@ -492,12 +490,11 @@
   }
   while(num_repointing_collectors != num_active_collectors + 1);
   if(!gc->collect_result) return;
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing...");
 
   /* Pass 3: **************************************************
     *update all references whose objects are to be moved
     */
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ...");
   old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
   mspace_fix_repointed_refs(collector, mspace);
   old_num = atomic_inc32(&num_fixing_collectors);
@@ -520,13 +517,11 @@
   }
   while(num_fixing_collectors != num_active_collectors + 1);
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving...");
 
   /* Pass 4: **************************************************
      move objects                                             */
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: move objects to target address ...");
-
   atomic_cas32( &num_moving_collectors, 0, num_active_collectors);
   
   mspace_sliding_compact(collector, mspace); 
@@ -534,12 +529,11 @@
   atomic_inc32(&num_moving_collectors);
   while(num_moving_collectors != num_active_collectors);
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info...");
 
   /* Pass 5: **************************************************
      restore obj_info                                         */
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass5: restore obj_info ...");
   atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);
   
   collector_restore_obj_info(collector);
@@ -557,7 +551,8 @@
   while(num_restoring_collectors != num_active_collectors + 1);
 
   /* Dealing with out of memory in mspace */
-  if(mspace->free_block_idx > fspace->first_block_idx){
+  void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  if( mspace_border > nos_boundary){
     atomic_cas32( &num_extending_collectors, 0, num_active_collectors);
     
     mspace_extend_compact(collector);
@@ -566,16 +561,7 @@
     while(num_extending_collectors != num_active_collectors);
   }
 
-  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 ...");
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done.");
 
-  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){
-    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
-    return;
-  }
-  
-  /* Leftover: **************************************************
-   */
-  
-  TRACE2("gc.process", "GC: collector[0]  finished");
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp Wed Dec 26 02:17:10 2007
@@ -55,8 +55,8 @@
   vm_notify_obj_alive( (void *)p_obj);
   assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
 
-  Partial_Reveal_VTable *vtable = uncompress_vt(obj_get_vt(p_obj));
-  if(VTABLE_TRACING)
+  Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
+  if(TRACE_JLC_VIA_VTABLE)
     if(vtable->vtmark == VT_UNMARKED) {
       vtable->vtmark = VT_MARKED;
       if(obj_mark_in_vt(vtable->jlC))

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Wed Dec 26 02:17:10 2007
@@ -20,6 +20,7 @@
 #ifdef USE_MARK_SWEEP_GC
 
 #include "gc_ms.h"
+#include "wspace_mark_sweep.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../common/compressed_ref.h"
 #include "../thread/marker.h"
@@ -38,14 +39,14 @@
   assert(max_heap_size <= max_heap_size_bytes);
   assert(max_heap_size >= min_heap_size_bytes);
   
-  void *sspace_base;
-  sspace_base = vm_reserve_mem(0, max_heap_size);
-  sspace_initialize((GC*)gc_ms, sspace_base, max_heap_size, max_heap_size);
+  void *wspace_base;
+  wspace_base = vm_reserve_mem(0, max_heap_size);
+  wspace_initialize((GC*)gc_ms, wspace_base, max_heap_size, max_heap_size);
   
-  HEAP_NULL = (POINTER_SIZE_INT)sspace_base;
+  HEAP_NULL = (POINTER_SIZE_INT)wspace_base;
   
-  gc_ms->heap_start = sspace_base;
-  gc_ms->heap_end = (void*)((POINTER_SIZE_INT)sspace_base + max_heap_size);
+  gc_ms->heap_start = wspace_base;
+  gc_ms->heap_end = (void*)((POINTER_SIZE_INT)wspace_base + max_heap_size);
   gc_ms->reserved_heap_size = max_heap_size;
   gc_ms->committed_heap_size = max_heap_size;
   gc_ms->num_collections = 0;
@@ -54,40 +55,72 @@
 
 void gc_ms_destruct(GC_MS *gc_ms)
 {
-  Sspace *sspace = gc_ms->sspace;
-  void *sspace_start = sspace->heap_start;
-  sspace_destruct(sspace);
-  gc_ms->sspace = NULL;
-  vm_unmap_mem(sspace_start, space_committed_size((Space*)sspace));
+  Wspace *wspace = gc_ms->wspace;
+  void *wspace_start = wspace->heap_start;
+  wspace_destruct(wspace);
+  gc_ms->wspace = NULL;
+  vm_unmap_mem(wspace_start, space_committed_size((Space*)wspace));
 }
 
 void gc_ms_reclaim_heap(GC_MS *gc)
 {
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
   
-  Sspace *sspace = gc_ms_get_sspace(gc);
+  Wspace *wspace = gc_ms_get_wspace(gc);
   
-  sspace_collection(sspace);
+  wspace_collection(wspace);
   
-  sspace_reset_after_collection(sspace);
+  wspace_reset_after_collection(wspace);
   
   if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
 }
 
-void sspace_mark_scan_concurrent(Marker* marker);
+void wspace_mark_scan_concurrent(Marker* marker);
 void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers)
 {
   if(gc->num_active_markers == 0)
     pool_iterator_init(gc->metadata->gc_rootset_pool);
   
-  marker_execute_task_concurrent((GC*)gc,(TaskType)sspace_mark_scan_concurrent,(Space*)gc->sspace, num_markers);
+  marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace, num_markers);
+}
+
+void wspace_mark_scan_mostly_concurrent(Marker* marker);
+void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers)
+{
+  if(gc->num_active_markers == 0)
+    pool_iterator_init(gc->metadata->gc_rootset_pool);
+  
+  marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers);
+}
+
+void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers)
+{
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
+  
+  marker_execute_task((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace);
+}
+
+
+
+void wspace_sweep_concurrent(Collector* collector);
+void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors)
+{
+  ops_color_flip();
+  //FIXME: Need barrier here.
+  //apr_memory_rw_barrier();
+  gc_disenable_alloc_obj_live();
+  wspace_init_pfc_pool_iterator(gc->wspace);
+  
+  collector_execute_task_concurrent((GC*)gc, (TaskType)wspace_sweep_concurrent, (Space*)gc->wspace, num_collectors);
+
+  collector_release_weakref_sets((GC*)gc, num_collectors);
 }
 
 void gc_ms_start_concurrent_mark(GC_MS* gc)
 {
   pool_iterator_init(gc->metadata->gc_rootset_pool);
   
-  marker_execute_task_concurrent((GC*)gc,(TaskType)sspace_mark_scan_concurrent,(Space*)gc->sspace);
+  marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace);
 }
 
 void gc_ms_update_space_statistics(GC_MS* gc)
@@ -95,7 +128,7 @@
   POINTER_SIZE_INT num_live_obj = 0;
   POINTER_SIZE_INT size_live_obj = 0;
   
-  Space_Statistics* sspace_stat = gc->sspace->space_statistic;
+  Space_Statistics* wspace_stat = gc->wspace->space_statistic;
 
   unsigned int num_collectors = gc->num_active_collectors;
   Collector** collectors = gc->collectors;
@@ -106,10 +139,10 @@
     size_live_obj += collector->live_obj_size;
   }
 
-  sspace_stat->num_live_obj = num_live_obj;
-  sspace_stat->size_live_obj = size_live_obj;  
-  sspace_stat->last_size_free_space = sspace_stat->size_free_space;
-  sspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
+  wspace_stat->num_live_obj = num_live_obj;
+  wspace_stat->size_live_obj = size_live_obj;  
+  wspace_stat->last_size_free_space = wspace_stat->size_free_space;
+  wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
 }
 
 void gc_ms_iterate_heap(GC_MS *gc)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Wed Dec 26 02:17:10 2007
@@ -20,7 +20,7 @@
 
 #ifdef USE_MARK_SWEEP_GC
 
-#include "sspace.h"
+#include "wspace.h"
 
 
 /* heap size limit is not interesting. only for manual tuning purpose */
@@ -77,35 +77,37 @@
 
   SpinLock concurrent_mark_lock;
   SpinLock enumerate_rootset_lock;
+  SpinLock concurrent_sweep_lock;
+  
   /* system info */
   unsigned int _system_alloc_unit;
   unsigned int _machine_page_size_bytes;
   unsigned int _num_processors;
   /* END of GC --> */
   
-  Sspace *sspace;
+  Wspace *wspace;
   
 } GC_MS;
 
 //////////////////////////////////////////////////////////////////////////////////////////
 
 inline void *gc_ms_fast_alloc(unsigned size, Allocator *allocator)
-{ return sspace_thread_local_alloc(size, allocator); }
+{ return wspace_thread_local_alloc(size, allocator); }
 
 inline void *gc_ms_alloc(unsigned size, Allocator *allocator)
-{ return sspace_alloc(size, allocator); }
+{ return wspace_alloc(size, allocator); }
 
-inline Sspace *gc_ms_get_sspace(GC_MS *gc)
-{ return gc->sspace; }
+inline Wspace *gc_ms_get_wspace(GC_MS *gc)
+{ return gc->wspace; }
 
-inline void gc_ms_set_sspace(GC_MS *gc, Sspace *sspace)
-{ gc->sspace = sspace; }
+inline void gc_ms_set_wspace(GC_MS *gc, Wspace *wspace)
+{ gc->wspace = wspace; }
 
 inline POINTER_SIZE_INT gc_ms_free_memory_size(GC_MS *gc)
-{ return sspace_free_memory_size(gc_ms_get_sspace(gc)); }
+{ return wspace_free_memory_size(gc_ms_get_wspace(gc)); }
 
 inline POINTER_SIZE_INT gc_ms_total_memory_size(GC_MS *gc)
-{ return space_committed_size((Space*)gc_ms_get_sspace(gc)); }
+{ return space_committed_size((Space*)gc_ms_get_wspace(gc)); }
 
 /////////////////////////////////////////////////////////////////////////////////////////
 
@@ -117,6 +119,11 @@
 void gc_ms_start_concurrent_mark(GC_MS* gc);
 void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers);
 void gc_ms_update_space_statistics(GC_MS* gc);
+void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors);
+void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers);
+void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers);
+
+
 
 #endif // USE_MARK_SWEEP_GC
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp Wed Dec 26 02:17:10 2007
@@ -1,231 +1,2 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-#include "sspace_chunk.h"
-#include "sspace_alloc.h"
-#include "sspace_mark_sweep.h"
-#include "sspace_verify.h"
-
-
-#define PFC_SORT_NUM  8
-
-void sspace_decide_compaction_need(Sspace *sspace)
-{
-  POINTER_SIZE_INT free_mem_size = free_mem_in_sspace(sspace, FALSE);
-  float free_mem_ratio = (float)free_mem_size / sspace->committed_heap_size;
-
-#ifdef USE_MARK_SWEEP_GC
-  if(!gc_mark_is_concurrent() && (free_mem_ratio > SSPACE_COMPACT_RATIO) && (sspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
-#else
-  if(gc_match_kind(sspace->gc, MAJOR_COLLECTION)){
-#endif
-    sspace->need_compact = sspace->move_object = TRUE;
-  } else {
-    sspace->need_compact = sspace->move_object = FALSE;
-  }
-}
-
-static inline void sorted_chunk_bucket_add_entry(Chunk_Header **head, Chunk_Header **tail, Chunk_Header *chunk)
-{
-  chunk->prev = NULL; /* Field adj_prev is used as prev */
-  
-  if(!*head){
-    assert(!*tail);
-    chunk->next = NULL;
-    *head = *tail = chunk;
-    return;
-  }
-  
-  assert(*tail);
-  chunk->next = *head;
-  (*head)->prev = chunk;
-  *head = chunk;
-}
-
-/* One assumption: pfc_pool is not empty */
-static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
-{
-  Chunk_Header *bucket_head[PFC_SORT_NUM];  /* Sorted chunk buckets' heads */
-  Chunk_Header *bucket_tail[PFC_SORT_NUM];  /* Sorted chunk buckets' tails */
-  unsigned int slot_num;
-  unsigned int chunk_num = 0;
-  unsigned int slot_alloc_num = 0;
-  
-  /* Init buckets' heads and tails */
-  memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
-  memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
-  
-  /* Roughly sort chunks in pfc_pool */
-  pool_iterator_init(pfc_pool);
-  Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
-  if(chunk) slot_num = chunk->slot_num;
-  while(chunk){
-    ++chunk_num;
-    assert(chunk->alloc_num);
-    slot_alloc_num += chunk->alloc_num;
-    Chunk_Header *next_chunk = chunk->next;
-    unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num;
-    assert(bucket_index < PFC_SORT_NUM);
-    sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk);
-    chunk = next_chunk;
-  }
-  
-  /* Empty the pfc pool because some chunks in this pool will be free after compaction */
-  pool_empty(pfc_pool);
-  
-  /* If we can't get a free chunk after compaction, there is no need to compact.
-   * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact
-   */
-  if(slot_num*(chunk_num-1) <= slot_alloc_num){
-    for(unsigned int i = 0; i < PFC_SORT_NUM; i++){
-      Chunk_Header *chunk = bucket_head[i];
-      while(chunk){
-        Chunk_Header *next_chunk = chunk->next;
-        pool_put_entry(pfc_pool, chunk);
-        chunk = next_chunk;
-      }
-    }
-    return FALSE;
-  }
-  
-  /* Link the sorted chunk buckets into one single ordered bidirectional list */
-  Chunk_Header *head = NULL;
-  Chunk_Header *tail = NULL;
-  for(unsigned int i = PFC_SORT_NUM; i--;){
-    assert((head && tail) || (!head && !tail));
-    assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i]));
-    if(!bucket_head[i]) continue;
-    if(!tail){
-      head = bucket_head[i];
-      tail = bucket_tail[i];
-    } else {
-      tail->next = bucket_head[i];
-      bucket_head[i]->prev = tail;
-      tail = bucket_tail[i];
-    }
-  }
-  
-  assert(head && tail);
-  *least_free_chunk = head;
-  *most_free_chunk = tail;
-  
-  return TRUE;
-}
-
-static inline Chunk_Header *get_least_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
-{
-  if(!*least_free_chunk){
-    assert(!*most_free_chunk);
-    return NULL;
-  }
-  Chunk_Header *result = *least_free_chunk;
-  *least_free_chunk = (*least_free_chunk)->next;
-  if(*least_free_chunk)
-    (*least_free_chunk)->prev = NULL;
-  else
-    *most_free_chunk = NULL;
-  return result;
-}
-static inline Chunk_Header *get_most_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
-{
-  if(!*most_free_chunk){
-    assert(!*least_free_chunk);
-    return NULL;
-  }
-  Chunk_Header *result = *most_free_chunk;
-  *most_free_chunk = (*most_free_chunk)->prev;
-  if(*most_free_chunk)
-    (*most_free_chunk)->next = NULL;
-  else
-    *least_free_chunk = NULL;
-  assert(!result->next);
-  return result;
-}
-
-static inline void move_obj_between_chunks(Chunk_Header **dest_ptr, Chunk_Header *src)
-{
-  Chunk_Header *dest = *dest_ptr;
-  assert(dest->slot_size == src->slot_size);
-  
-  unsigned int slot_size = dest->slot_size;
-  unsigned int alloc_num = src->alloc_num;
-  assert(alloc_num);
-  
-  while(alloc_num && dest){
-    Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src);
-    void *target = alloc_in_chunk(dest);
-    assert(p_obj && target);
-    memcpy(target, p_obj, slot_size);
-#ifdef SSPACE_VERIFY
-    sspace_modify_mark_in_compact(target, p_obj, slot_size);
-#endif
-    obj_set_fw_in_oi(p_obj, target);
-    --alloc_num;
-  }
-  
-  /* dest might be set to NULL, so we use *dest_ptr here */
-  assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num);
-  src->alloc_num = alloc_num;
-  if(!dest){
-    assert((*dest_ptr)->alloc_num == (*dest_ptr)->slot_num);
-    *dest_ptr = NULL;
-    clear_free_slot_in_table(src->table, src->slot_index);
-  }
-}
-
-void sspace_compact(Collector *collector, Sspace *sspace)
-{
-  Chunk_Header *least_free_chunk, *most_free_chunk;
-  Pool *pfc_pool = sspace_grab_next_pfc_pool(sspace);
-  
-  for(; pfc_pool; pfc_pool = sspace_grab_next_pfc_pool(sspace)){
-    if(pool_is_empty(pfc_pool)) continue;
-    Boolean pfc_pool_need_compact = pfc_pool_roughly_sort(pfc_pool, &least_free_chunk, &most_free_chunk);
-    if(!pfc_pool_need_compact) continue;
-    
-    Chunk_Header *dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
-    Chunk_Header *src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
-    Boolean src_is_new = TRUE;
-    while(dest && src){
-      if(src_is_new)
-        src->slot_index = 0;
-      chunk_depad_last_index_word(src);
-      move_obj_between_chunks(&dest, src);
-      if(!dest)
-        dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
-      if(!src->alloc_num){
-        collector_add_free_chunk(collector, (Free_Chunk*)src);
-        src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
-        src_is_new = TRUE;
-      } else {
-        src_is_new = FALSE;
-      }
-    }
-    
-    /* Rebuild the pfc_pool */
-    if(dest)
-      sspace_put_pfc(sspace, dest);
-    if(src){
-      chunk_pad_last_index_word(src, cur_alloc_mask);
-      pfc_reset_slot_index(src);
-      sspace_put_pfc(sspace, src);
-    }
-  }
-}
-
 
 



Mime
View raw message