harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r566913 [1/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ thread/ trace_forward/ utils/ verify/
Date Fri, 17 Aug 2007 04:33:10 GMT
Author: xli
Date: Thu Aug 16 21:33:02 2007
New Revision: 566913

URL: http://svn.apache.org/viewvc?view=rev&rev=566913
Log:
HARMONY-3818 and HARMONY-4325 : gc verbose support and mark-sweep algorithm improvement

Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/large_pages.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Thu Aug 16 21:33:02 2007
@@ -65,7 +65,7 @@
         return_value = atoi(value);
         destroy_property_value(value);
     }else{
-        printf("property value %s is not set\n", property_name);
+        DIE2("gc.base","Warning: property value "<<property_name<<"is not set!");
         exit(0);
     }
       
@@ -77,7 +77,7 @@
   assert(property_name);
   char *value = get_property(property_name, VM_PROPERTIES);
   if (NULL == value){
-    printf("property value %s is not set\n", property_name);
+    DIE2("gc.base","Warning: property value "<<property_name<<"is not set!");
     exit(0);
   }
   
@@ -96,7 +96,7 @@
   {
     return_value = TRUE;
   }else{
-    printf("property value %s is not properly set\n", property_name);
+    DIE2("gc.base","Warning: property value "<<property_name<<"is not set!");
     exit(0);
   }
     
@@ -128,6 +128,7 @@
 
 void gc_parse_options(GC* gc) 
 {
+  TRACE2("gc.process", "GC: parse options ...\n");
   if (!get_boolean_property("vm.assert_dialog", TRUE, VM_PROPERTIES))
     disable_assert_dialogs();
   
@@ -139,11 +140,11 @@
 
     if (max_heap_size < min_heap_size){
       max_heap_size = min_heap_size;
-      printf("Max heap size: too small, reset to %d MB!\n", max_heap_size/MB);
+      WARN2("gc.base","Warning: Max heap size you set is too small, reset to "<<max_heap_size/MB<<" MB!");
     }
     if (0 == max_heap_size){
       max_heap_size = HEAP_SIZE_DEFAULT;
-      printf("Max heap size: zero, reset to %d MB! \n", max_heap_size/MB);
+      WARN2("gc.base","Warning: Max heap size you set euqals to zero, reset to "<<max_heap_size/MB<<" MB!");
     }
  
     min_heap_size = max_heap_size / 10;
@@ -157,13 +158,13 @@
     min_heap_size = get_size_property("gc.ms");
     if (min_heap_size < min_heap_size_bytes){
       min_heap_size = min_heap_size_bytes;
-      printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);    
+      WARN2("gc.base","Warning: Min heap size you set is too small, reset to "<<min_heap_size/MB<<" MB!");
     } 
   }
 
   if (min_heap_size > max_heap_size){
     max_heap_size = min_heap_size;
-    printf("Max heap size: too small, reset to %d MB\n", max_heap_size / MB);
+    WARN2("gc.base","Warning: Max heap size is too small, reset to "<<max_heap_size/MB<<" MB!");
   }
 
   min_heap_size_bytes = min_heap_size;
@@ -274,9 +275,15 @@
 
 void gc_copy_interior_pointer_table_to_rootset();
 
+/*used for computing collection time and mutator time*/
+static int64 collection_start_time = time_now();  
+static int64 collection_end_time = time_now();
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
 { 
-  int64 start_time =  time_now();
+  INFO2("gc.process", "\nGC: GC start ...\n");
+
+  collection_start_time =  time_now();
+  int64 mutator_time = collection_start_time -collection_end_time;
 
   /* FIXME:: before mutators suspended, the ops below should be very careful
      to avoid racing with mutators. */
@@ -284,22 +291,22 @@
   gc->cause = gc_cause;
   gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
 
+#ifndef USE_MARK_SWEEP_GC
   gc_gen_update_space_before_gc((GC_Gen*)gc);
-
-#ifndef ONLY_SSPACE_IN_HEAP
   gc_compute_space_tune_size_before_marking(gc, gc_cause);
 #endif
 
 #ifdef MARK_BIT_FLIPPING
   if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip();
 #endif
-  
+
   gc_metadata_verify(gc, TRUE);
 #ifndef BUILD_IN_REFERENT
   gc_finref_metadata_verify((GC*)gc, TRUE);
 #endif
   
   /* Stop the threads and collect the roots. */
+  INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n");
   gc_reset_rootset(gc);  
   vm_enumerate_root_set_all_threads();
   gc_copy_interior_pointer_table_to_rootset();
@@ -310,7 +317,7 @@
 
   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   gc_gen_reclaim_heap((GC_Gen*)gc);
 #else
   gc_ms_reclaim_heap((GC_MS*)gc);
@@ -320,10 +327,14 @@
 
   gc_metadata_verify(gc, FALSE);
 
-  int64 pause_time = time_now() - start_time;  
+  collection_end_time = time_now(); 
+
+  int64 pause_time = collection_end_time - collection_start_time;  
   gc->time_collections += pause_time;
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
+  gc_gen_collection_verbose_info((GC_Gen*)gc, pause_time, mutator_time);
+  gc_gen_space_verbose_info((GC_Gen*)gc);
   gc_adjust_heap_size(gc, pause_time);
 
   gc_gen_adapt((GC_Gen*)gc, pause_time);
@@ -332,6 +343,7 @@
   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
   
   if(!IGNORE_FINREF ){
+    INFO2("gc.process", "GC: finref process after collection ...\n");
     gc_put_finref_to_vm(gc);
     gc_reset_finref_metadata(gc);
     gc_activate_finref_threads((GC*)gc);
@@ -341,7 +353,7 @@
 #endif
   }
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   gc_space_tuner_reset(gc);
   gc_gen_update_space_after_gc((GC_Gen*)gc);
   gc_assign_free_area_to_mutators(gc);
@@ -349,6 +361,7 @@
 
   vm_reclaim_native_objs();  
   vm_resume_threads_after();
+  INFO2("gc.process", "GC: GC end\n");
   return;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Thu Aug 16 21:33:02 2007
@@ -22,6 +22,7 @@
 #ifndef _GC_COMMON_H_
 #define _GC_COMMON_H_
 
+#include "cxxlog.h" 
 #include "port_vmem.h"
 
 #include "platform_lowlevel.h"
@@ -37,10 +38,13 @@
 
 #include "../gen/gc_for_barrier.h"
 
+#define GC_GEN_STATS
 #define null 0
 
 #define KB  (1<<10)
 #define MB  (1<<20)
+/*used for print size info in verbose system*/
+#define verbose_print_size(size) (((size)/MB!=0)?(size)/MB:(((size)/KB!=0)?(size)/KB:(size)))<<(((size)/MB!=0)?"MB":(((size)/KB!=0)?"KB":"B"))
 
 #define BITS_PER_BYTE 8 
 #define BYTES_PER_WORD (sizeof(POINTER_SIZE_INT))
@@ -73,7 +77,7 @@
 
 #define USE_32BITS_HASHCODE
 
-//#define ONLY_SSPACE_IN_HEAP
+//#define USE_MARK_SWEEP_GC
 
 typedef void (*TaskType)(void*);
 
@@ -104,7 +108,8 @@
   MAJOR_COLLECTION = 0x2,
   FALLBACK_COLLECTION = 0x4,
   EXTEND_COLLECTION = 0x8,
-  UNIQUE_SWEEP_COLLECTION = 0x10
+  MARK_SWEEP_GC = 0x10,
+  SWEEP_COMPACT_GC = 0x20
 };
 
 extern Boolean IS_FALLBACK_COMPACTION;  /* only for mark/fw bits debugging purpose */
@@ -394,6 +399,11 @@
   Vector_Block* uncompressed_root_set;
 
   Space_Tuner* tuner;
+  
+  /* system info */
+  unsigned int _system_alloc_unit;
+  unsigned int _machine_page_size_bytes;
+  unsigned int _num_processors;
 
 }GC;
 
@@ -416,6 +426,8 @@
   assert(gc->collect_kind && kind);
   return gc->collect_kind & kind;
 }
+
+inline unsigned int gc_get_processor_num(GC* gc) { return gc->_num_processors; }
 
 void gc_parse_options(GC* gc);
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Thu Aug 16 21:33:02 2007
@@ -20,6 +20,7 @@
  */
 
 #include <cxxlog.h>
+#include "port_sysinfo.h"
 #include "vm_threads.h"
 #include "compressed_ref.h"
 
@@ -41,11 +42,20 @@
 Boolean gc_requires_barriers() 
 {   return p_global_gc->generate_barrier; }
 
+static void gc_get_system_info(GC *gc)
+{
+  gc->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0];
+  gc->_num_processors = port_CPUs_number();
+  gc->_system_alloc_unit = vm_get_system_alloc_unit();
+  SPACE_ALLOC_UNIT = max(gc->_system_alloc_unit, GC_BLOCK_SIZE_BYTES);
+}
+
 int gc_init() 
 {      
+  INFO2("gc.process", "GC: call GC init...\n");
   assert(p_global_gc == NULL);
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   unsigned int gc_struct_size = sizeof(GC_Gen);
 #else
   unsigned int gc_struct_size = sizeof(GC_MS);
@@ -58,10 +68,12 @@
   gc_parse_options(gc);
   
   gc_tls_init();
-
+  
+  gc_get_system_info(gc);
+  
   gc_metadata_initialize(gc); /* root set and mark stack */
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
 #else
   gc_ms_initialize((GC_MS*)gc, min_heap_size_bytes, max_heap_size_bytes);
@@ -78,14 +90,17 @@
   
   mutator_need_block = FALSE;
 
+  INFO2("gc.process", "GC: end of GC init\n");
   return JNI_OK;
 }
 
 void gc_wrapup() 
 { 
+  INFO2("gc.process", "GC: call GC wrapup ....");
   GC* gc =  p_global_gc;
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
+  gc_gen_wrapup_verbose((GC_Gen*)gc);
   gc_gen_destruct((GC_Gen*)gc);
 #else
   gc_ms_destruct((GC_MS*)gc);
@@ -104,6 +119,7 @@
   STD_FREE(p_global_gc);
 
   p_global_gc = NULL;
+  INFO2("gc.process", "GC: end of GC wrapup\n");
 }
 
 #ifdef COMPRESS_REFERENCE
@@ -177,7 +193,7 @@
 
 int64 gc_free_memory()
 {
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
 #else
   return (int64)gc_ms_free_memory_size((GC_MS*)p_global_gc);
@@ -187,7 +203,7 @@
 /* java heap size.*/
 int64 gc_total_memory() 
 {
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc));
 #else
   return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
@@ -196,7 +212,7 @@
 
 int64 gc_max_memory() 
 {
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc));
 #else
   return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
@@ -266,8 +282,8 @@
 #else //USE_32BITS_HASHCODE
 int32 gc_get_hashcode(Managed_Object_Handle p_object)
 {
-#ifdef ONLY_SSPACE_IN_HEAP
-  return (int32)p_object;
+#ifdef USE_MARK_SWEEP_GC
+  return (int32)0;//p_object;
 #endif
 
   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object;
@@ -325,7 +341,7 @@
     // data structures in not consistent for heap iteration
     if (!JVMTI_HEAP_ITERATION) return;
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
     gc_gen_iterate_heap((GC_Gen *)p_global_gc);
 #else
     gc_ms_iterate_heap((GC_MS*)p_global_gc);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Thu Aug 16 21:33:02 2007
@@ -38,7 +38,7 @@
   /* FIXME:: since we use a list to arrange the root sets and tasks, we can
      dynamically alloc space for metadata. 
      We just don't have this dynamic support at the moment. */
-
+  TRACE2("gc.process", "GC: GC metadata init ...\n");
   unsigned int seg_size = GC_METADATA_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES;
   void* metadata = STD_MALLOC(seg_size);
   memset(metadata, 0, seg_size);
@@ -86,6 +86,7 @@
 
 void gc_metadata_destruct(GC* gc)
 {
+  TRACE2("gc.process", "GC: GC metadata destruct ...");
   GC_Metadata* metadata = gc->metadata;
   sync_pool_destruct(metadata->free_task_pool);
   sync_pool_destruct(metadata->mark_task_pool);
@@ -120,7 +121,7 @@
  
   unsigned int num_alloced = metadata->num_alloc_segs;
   if(num_alloced == GC_METADATA_SEGMENT_NUM){
-    printf("Run out GC metadata, please give it more segments!\n");
+    DIE2("gc.verbose","Warning: Run out GC metadata, please give it more segments!");
     exit(0);
   }
 
@@ -178,26 +179,27 @@
       iter = vector_block_iterator_advance(root_set,iter);
 
       Partial_Reveal_Object* p_obj = read_slot(p_ref);
-        if(IS_MOVE_COMPACT){
-        /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/
-        //if(obj_is_moved(p_obj)) 
-          /*Fixme: los_boundery ruined the modularity of gc_common.h*/
-          if(p_obj < los_boundary){
-            write_slot(p_ref, obj_get_fw_in_oi(p_obj));
-          }else{
-            *p_ref = obj_get_fw_in_table(p_obj);
-          }
+      if(IS_MOVE_COMPACT){
+      /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/
+      //if(obj_is_moved(p_obj)) 
+        /*Fixme: los_boundery ruined the modularity of gc_common.h*/
+        if(p_obj < los_boundary){
+          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
         }else{
-          if(obj_is_fw_in_oi(p_obj)){
-            /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
-             * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
-             * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.
-             * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
-             * since those which can be scanned in MOS & NOS must have been set fw bit in oi.  */
-            assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS);
-            write_slot(p_ref, obj_get_fw_in_oi(p_obj));
-          }
+          *p_ref = obj_get_fw_in_table(p_obj);
+        }
+      }else{
+        if(obj_is_fw_in_oi(p_obj)){
+          /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
+           * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
+           * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.
+           * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
+           * since those which can be scanned in MOS & NOS must have been set fw bit in oi.
+           */
+          assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc));
+          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
         }
+      }
     }
     root_set = pool_iterator_next(pool);
   } 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/large_pages.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/large_pages.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/large_pages.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/large_pages.cpp Thu Aug 16 21:33:02 2007
@@ -68,15 +68,15 @@
     alloc_addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);    
     release_lock_memory_priv();    
     if(alloc_addr == NULL){
-      printf("GC large_page: No required number of large pages found. Please reboot.....\n");
+      WARN2("gc.base","GC large_page: No required number of large pages found. Please reboot.....\n");
       return NULL;
     }else
       return alloc_addr;
   }else{
-    printf("GC large_page: Check that you have permissions:\n");
-    printf("GC large_page: Control Panel->Administrative Tools->Local Security Settings->->User Rights Assignment->Lock pages in memory;\n");
-    printf("GC large_page: Start VM as soon after reboot as possible, because large pages become fragmented and unusable after a while;\n");
-    printf("GC large_page: Heap size should be multiple of large page size.\n");
+    WARN2("gc.base","GC large_page: Check that you have permissions:");
+    WARN2("gc.base","GC large_page: Control Panel->Administrative Tools->Local Security Settings->->User Rights Assignment->Lock pages in memory;");
+    WARN2("gc.base","GC large_page: Start VM as soon after reboot as possible, because large pages become fragmented and unusable after a while;");
+    WARN2("gc.base","GC large_page: Heap size should be multiple of large page size.");
     return NULL;
   }
 }
@@ -107,7 +107,7 @@
 static void parse_proc_meminfo(size_t required_size){
   FILE* f = fopen("/proc/meminfo", "r");
   if (f == NULL){
-    printf("GC large_page: Can't open /proc/meminfo \n");
+    WARN2("gc.base","GC large_page: Can't open /proc/meminfo");
     return;
   }
 
@@ -124,18 +124,18 @@
   if (buf) free(buf);
   
   if (proc_huge_pages_total == (size_t)-1){
-    printf("GC large_page: Large pages are not supported by kernel.\n");
-    printf("GC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled.\n");
+    WARN2("gc.base","GC large_page: Large pages are not supported by kernel.");
+    WARN2("gc.base","GC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled.");
   } else if (proc_huge_pages_total == 0){
-    printf("GC large_page: No large pages reserved,  Use following command: echo num> /proc/sys/vm/nr_hugepages.\n");
-    printf("GC large_page: Do it just after kernel boot before huge pages become fragmented.\n");
+    WARN2("gc.base","GC large_page: No large pages reserved,  Use following command: echo num> /proc/sys/vm/nr_hugepages.");
+    WARN2("gc.base","GC large_page: Do it just after kernel boot before huge pages become fragmented.");
   } else if (proc_huge_pages_free * proc_huge_page_size < required_size) {
     if (proc_huge_pages_total * proc_huge_page_size >= required_size) {
-      printf("GC large_page: Not enough free large pages, some of reserved space is already busy.\n");
+      WARN2("gc.base","GC large_page: Not enough free large pages, some of reserved space is already busy.");
     } else {
-      printf("GC large_page: Not enough reserved large pages.\n");
+      WARN2("gc.base","GC large_page: Not enough free large pages, some of reserved space is already busy.");
     }
-    printf("GC large_page: Large pages can be only allocated.\n");
+    WARN2("gc.base","GC large_page: Large pages can be only allocated.");
   }
 }
 
@@ -150,9 +150,9 @@
 
   int fd = open(buf, O_CREAT | O_RDWR, 0700);
   if (fd == -1){
-    printf("GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfsi.\n");
-    printf("GC large_page: Check you have appropriate permissions to /mnt/huge.\n");
-    printf("GC large_page: Use command line switch -Dgc.lp=/mnt/huge.\n");
+    WARN2("gc.base","GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfsi.");
+    WARN2("gc.base","GC large_page: Check you have appropriate permissions to /mnt/huge.");
+    WARN2("gc.base","GC large_page: Use command line switch -Dgc.lp=/mnt/huge.");
     free(buf);
     return NULL;
   }
@@ -160,7 +160,7 @@
 
   void* addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
   if (addr == MAP_FAILED){
-    printf("GC large_page: Map failed.\n");
+    WARN2("gc.base","GC large_page: Map failed.");
     close(fd);
     free(buf);
     return NULL;
@@ -174,7 +174,7 @@
   parse_proc_meminfo(size);
   void* alloc_addr = mmap_large_pages(size, hint);
   if(alloc_addr == NULL){
-    printf("GC large_page: Large pages allocation failed.\n");
+    WARN2("gc.base","GC large_page: Large pages allocation failed.");
     return NULL;
   }
   return alloc_addr;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp Thu Aug 16 21:33:02 2007
@@ -24,6 +24,9 @@
 #include "../gen/gen.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref)
 {
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
@@ -31,6 +34,10 @@
 
   if(obj_mark_in_vt(p_obj))
     collector_tracestack_push(collector, p_obj);
+#ifdef GC_GEN_STATS
+    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+    gc_gen_collector_update_marked_obj_stats_major(stats);
+#endif
   
   return;
 }
@@ -110,6 +117,9 @@
 {
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
 
   /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
   unsigned int num_active_collectors = gc->num_active_collectors;
@@ -137,8 +147,13 @@
          and the second time the value is the ref slot is the old position as expected.
          This can be worked around if we want. 
       */
-      if(obj_mark_in_vt(p_obj))
+      if(obj_mark_in_vt(p_obj)){
         collector_tracestack_push(collector, p_obj);
+#ifdef GC_GEN_STATS
+        gc_gen_collector_update_rootset_ref_num(stats);
+        gc_gen_collector_update_marked_obj_stats_major(stats);
+#endif
+      }
 
     } 
     root_set = pool_iterator_next(metadata->gc_rootset_pool);
@@ -192,5 +207,6 @@
 
 void trace_obj_in_normal_marking(Collector *collector, void *p_obj)
 {
+  obj_mark_in_vt((Partial_Reveal_Object*)p_obj);
   trace_object(collector, (Partial_Reveal_Object *)p_obj);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Thu Aug 16 21:33:02 2007
@@ -56,9 +56,9 @@
   return !obj_is_marked_in_vt(p_obj);
 }
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
 extern Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
-static inline Boolean obj_is_dead_in_unique_sweep_gc(Partial_Reveal_Object * p_obj)
+static inline Boolean obj_is_dead_in_mark_sweep_gc(Partial_Reveal_Object * p_obj)
 {
   return !obj_is_marked_in_table(p_obj);
 }
@@ -68,8 +68,8 @@
 {
   assert(p_obj);
 
-#ifdef ONLY_SSPACE_IN_HEAP
-  return obj_is_dead_in_unique_sweep_gc(p_obj);
+#ifdef USE_MARK_SWEEP_GC
+  return obj_is_dead_in_mark_sweep_gc(p_obj);
 #endif
 
   if(gc_match_kind(gc, MINOR_COLLECTION)){
@@ -91,7 +91,7 @@
 {
   assert(!gc_obj_is_dead(gc, p_obj));
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
   Sspace *sspace = gc_ms_get_sspace((GC_MS*)gc);
   return sspace->move_object;
 #endif
@@ -112,7 +112,7 @@
     POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       REF *p_ref = (REF*)iter;
-      Partial_Reveal_Object* p_obj = read_slot(p_ref);
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
       if(*p_ref && obj_need_move(gc, p_obj))
         finref_repset_add_entry(gc, p_ref);
     }
@@ -172,7 +172,7 @@
   }
   gc_put_finalizable_objects(gc);
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
     finref_add_repset_from_pool(gc, obj_with_fin_pool);
 }
 
@@ -220,11 +220,10 @@
     }else{  
       trace_object = trace_obj_in_normal_marking;
     }
-    obj_mark_in_vt(p_obj);
   } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
     trace_object = trace_obj_in_fallback_marking;
   } else {
-    assert(gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION));
+    assert(gc_match_kind(gc, MARK_SWEEP_GC|SWEEP_COMPACT_GC));
     p_ref_or_obj = p_obj;
     trace_object = trace_obj_in_ms_marking;
   }
@@ -241,7 +240,7 @@
       void *p_ref_or_obj = (void*)*iter;
       assert((gc_match_kind(gc, MINOR_COLLECTION | FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj)
               || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj)
-              || (gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION) && p_ref_or_obj));
+              || (gc_match_kind(gc, MARK_SWEEP_GC|SWEEP_COMPACT_GC) && p_ref_or_obj));
       trace_object(collector, p_ref_or_obj);
       if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
       
@@ -255,7 +254,7 @@
       break; /* force return */
     }
     
-    task_block = pool_get_entry(metadata->mark_task_pool);      
+    task_block = pool_get_entry(metadata->mark_task_pool);
   }
   
   task_block = (Vector_Block*)collector->trace_stack;
@@ -275,9 +274,6 @@
   
   DURING_RESURRECTION = TRUE;
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
-    finref_reset_repset(gc);
-  
   pool_iterator_init(finalizable_obj_pool);
   Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
   while(block){
@@ -287,13 +283,6 @@
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       assert(p_obj);
       
-      /* In major & fallback collection we need record p_ref of the root dead obj to update it later.
-       * Because it is outside heap, we can't update in ref fixing.
-       * In minor collection p_ref of the root dead obj is automatically updated while tracing.
-       */
-      if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
-        finref_repset_add_entry(gc, p_ref);
-      
       /* Perhaps obj has been resurrected by previous resurrections */
       if(!gc_obj_is_dead(gc, p_obj)){
         if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj))
@@ -312,9 +301,12 @@
     block = pool_iterator_next(finalizable_obj_pool);
   }
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
-    finref_put_repset(gc);
-  
+  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
+   * Because it is outside heap, we can't update it in ref fixing.
+   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
+   */
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
+    finref_add_repset_from_pool(gc, finalizable_obj_pool);
   metadata->pending_finalizers = TRUE;
   
   DURING_RESURRECTION = FALSE;
@@ -324,7 +316,7 @@
 
 static void identify_dead_refs(GC *gc, Pool *pool)
 {
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
     finref_reset_repset(gc);
   pool_iterator_init(pool);
   Vector_Block *block = pool_iterator_next(pool);
@@ -348,7 +340,7 @@
           if(gc_match_kind(gc, MINOR_COLLECTION)){
             assert(obj_is_fw_in_oi(p_referent));
             write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent)));
-          } else {
+          } else if(!gc_match_kind(gc, MARK_SWEEP_GC)){
             finref_repset_add_entry(gc, p_referent_field);
           }
         }
@@ -361,7 +353,7 @@
     block = pool_iterator_next(pool);
   }
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, pool);
   }
@@ -397,7 +389,7 @@
   Finref_Metadata *metadata = gc->finref_metadata;
   Pool *phanref_pool = metadata->phanref_pool;
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
     finref_reset_repset(gc);
 //  collector_reset_repset(collector);
   pool_iterator_init(phanref_pool);
@@ -422,7 +414,7 @@
           if(gc_match_kind(gc, MINOR_COLLECTION)){
             assert(obj_is_fw_in_oi(p_referent));
             write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent)));
-          } else {
+          } else if(!gc_match_kind(gc, MARK_SWEEP_GC)){
             finref_repset_add_entry(gc, p_referent_field);
           }
         *p_ref = (REF)NULL;
@@ -440,7 +432,7 @@
     block = pool_iterator_next(phanref_pool);
   }
 //  collector_put_repset(collector);
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)){
     finref_put_repset(gc);
     finref_add_repset_from_pool(gc, phanref_pool);
   }
@@ -709,16 +701,18 @@
 {
   Finref_Metadata *metadata = gc->finref_metadata;
   
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
     finref_reset_repset(gc);
-  update_referent_field_ignore_finref(gc, metadata->softref_pool);
-  update_referent_field_ignore_finref(gc, metadata->weakref_pool);
-  update_referent_field_ignore_finref(gc, metadata->phanref_pool);
-  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION))
+  if(!gc_match_kind(gc, MARK_SWEEP_GC)){
+    update_referent_field_ignore_finref(gc, metadata->softref_pool);
+    update_referent_field_ignore_finref(gc, metadata->weakref_pool);
+    update_referent_field_ignore_finref(gc, metadata->phanref_pool);
+  }
+  if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC))
     finref_put_repset(gc);
 }
 
-extern void* los_boundary;
+extern void *los_boundary;
 /* Move compaction needs special treament when updating referent field */
 static inline void move_compaction_update_ref(GC *gc, REF *p_ref)
 {
@@ -728,8 +722,6 @@
    * So if p_ref belongs to heap, it must be a referent field pointer.
    * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
    */
-//  if(address_belongs_to_gc_heap(p_ref, gc) && !address_belongs_to_space(p_ref, gc_get_los((GC_Gen*)gc))){ 
-// && space_of_addr(gc, p_ref)->move_object //comment this out because all spaces are movable in major collection.
   if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
     unsigned int offset = get_gc_referent_offset();
     Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
@@ -737,14 +729,40 @@
     p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
   }
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
-  assert(space_of_addr(gc, (void*)p_obj)->move_object);
-//  if(obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc)))
+  assert(space_of_addr(gc, p_obj)->move_object);
+  
   if(p_obj < los_boundary)
-    write_slot(p_ref , obj_get_fw_in_oi(p_obj));
+    write_slot(p_ref, obj_get_fw_in_oi(p_obj));
   else
     *p_ref = obj_get_fw_in_table(p_obj);
 }
 
+static inline void sweep_compaction_update_ref(GC *gc, REF *p_ref)
+{
+  /* There are only two kinds of p_ref being added into finref_repset_pool:
+   * 1. p_ref is in a vector block from one finref pool;
+   * 2. p_ref is a referent field.
+   * So if p_ref belongs to heap, it must be a referent field pointer.
+   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
+   */
+  if(address_belongs_to_gc_heap((void*)p_ref, gc)){
+    unsigned int offset = get_gc_referent_offset();
+    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
+    if(obj_is_fw_in_oi(p_old_ref)){
+      Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref);
+      p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
+    }
+  }
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  /* assert(obj_need_move(gc, p_obj));
+   * This assertion is commented out because it assert(!obj_is_dead(gc, p_obj)).
+   * When gc_fix_rootset is invoked, mark bit and alloc bit have been flipped in Mark-Sweep,
+   * so this assertion will fail.
+   * But for sure p_obj here must be an one needing moving.
+   */
+  write_slot(p_ref, obj_get_fw_in_oi(p_obj));
+}
+
 extern Boolean IS_MOVE_COMPACT;
 
 /* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
@@ -765,12 +783,14 @@
         p_ref = (REF*)iter;
       p_obj = read_slot(p_ref);
       
-      if(!IS_MOVE_COMPACT){
-        assert(obj_is_marked_in_vt(p_obj));
-        assert(obj_is_fw_in_oi(p_obj));
-        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
-      } else {
+      if(IS_MOVE_COMPACT){
         move_compaction_update_ref(gc, p_ref);
+      } else if(gc_match_kind(gc, SWEEP_COMPACT_GC)){
+        if(obj_is_fw_in_oi(p_obj))
+          sweep_compaction_update_ref(gc, p_ref);
+      } else {
+        assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
+        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
       }
     }
     vector_block_clear(repset);
@@ -799,11 +819,13 @@
       p_obj = read_slot(p_ref);
       
       if(!IS_MOVE_COMPACT){
-        assert(obj_is_marked_in_vt(p_obj));
-        assert(obj_is_fw_in_oi(p_obj));
-        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
-      } else {
         move_compaction_update_ref(gc, p_ref);
+      } else if(gc_match_kind(gc, SWEEP_COMPACT_GC)){
+        if(obj_is_fw_in_oi(p_obj))
+          sweep_compaction_update_ref(gc, p_ref);
+      } else {
+        assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
+        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
       }
     }
     repset = pool_iterator_next(pool);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Thu Aug 16 21:33:02 2007
@@ -44,6 +44,7 @@
 
 void gc_finref_metadata_initialize(GC *gc)
 {
+  TRACE2("gc.process", "GC: gc finref metadata init ... \n");
   unsigned int seg_size =  FINREF_METADATA_SEG_SIZE_BYTES + FINREF_METADATA_BLOCK_SIZE_BYTES;
   void *first_segment = STD_MALLOC(seg_size);
   memset(first_segment, 0, seg_size);
@@ -82,6 +83,7 @@
 
 void gc_finref_metadata_destruct(GC *gc)
 {
+  TRACE2("gc.process", "GC: GC finref metadata destruct ...");
   Finref_Metadata *metadata = gc->finref_metadata;
   
   sync_pool_destruct(metadata->free_pool);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Thu Aug 16 21:33:02 2007
@@ -19,8 +19,6 @@
  * @author Xiao-Feng Li, 2006/10/05
  */
 
-#include "port_sysinfo.h"
-
 #include "gen.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../verify/verify_live_heap.h"
@@ -31,6 +29,9 @@
 #include "../common/hashcode.h"
 #endif
 
+#ifdef GC_GEN_STATS
+#include "gen_stats.h"
+#endif
 /* fspace size limit is not interesting. only for manual tuning purpose */
 POINTER_SIZE_INT min_nos_size_bytes = 16 * MB;
 POINTER_SIZE_INT max_nos_size_bytes = 256 * MB;
@@ -54,20 +55,13 @@
 
 #define RESERVE_BOTTOM ((void*)0x1000000)
 
-static void gc_gen_get_system_info(GC_Gen *gc_gen) 
-{
-  gc_gen->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0];
-  gc_gen->_num_processors = port_CPUs_number();
-  gc_gen->_system_alloc_unit = vm_get_system_alloc_unit();
-  SPACE_ALLOC_UNIT = max(gc_gen->_system_alloc_unit, GC_BLOCK_SIZE_BYTES);
-}
-
 void* alloc_large_pages(size_t size, const char* hint);
 
+void gc_gen_initial_verbose_info(GC_Gen *gc);
 void gc_gen_initialize(GC_Gen *gc_gen, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size) 
 {
-  assert(gc_gen); 
-  gc_gen_get_system_info(gc_gen); 
+  TRACE2("gc.process", "GC: GC_Gen heap init ... \n");
+  assert(gc_gen);
 
   max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT);
   min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT);
@@ -130,8 +124,8 @@
   assert((POINTER_SIZE_INT)nos_boundary%SPACE_ALLOC_UNIT == 0);
   nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size);
   if( nos_base != nos_boundary ){
-    printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size);  
-    printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n");
+    DIE2("gc.base","Warning: Static NOS mapping: Can't reserve memory at address"<<nos_boundary<<" for size "<<nos_reserve_size<<" for NOS.");
+    DIE2("gc.base","Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.");
     exit(0);
   }
   reserved_end = (void*)((POINTER_SIZE_INT)nos_base + nos_reserve_size);
@@ -142,7 +136,7 @@
   while( !reserved_base || reserved_base >= nos_base){
     los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT);
     if(los_mos_base < RESERVE_BOTTOM){
-      printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size);  
+      DIE2("gc.base","Static NOS mapping: Can't reserve memory at address"<<reserved_base<<" for specified size "<<los_mos_size);
       exit(0);      
     }
     reserved_base = vm_reserve_mem(los_mos_base, los_mos_size);
@@ -156,9 +150,9 @@
     if(reserved_base == NULL) {
       free(large_page_hint);
       large_page_hint = NULL;
-      printf("GC use small pages.\n");
+      WARN2("gc.base","GC use small pages.");
     }else{
-      printf("GC use large pages.\n");
+      WARN2("gc.base","GC use large pages.");
     }
   }
   
@@ -172,11 +166,11 @@
     }
 
     if(max_size_reduced){
-      printf("Max heap size: can't be reserved, reduced to %d MB according to virtual memory limitation.\n", max_heap_size/MB);
+      WARN2("gc.base","Max heap size: can't be reserved, reduced to "<< max_heap_size/MB<<" MB according to virtual memory limitation.");
     }
 
     if(max_heap_size < min_heap_size){
-      printf("Heap size: invalid, please reimput a smaller \"ms\" paramenter!\n");  
+      DIE2("gc.base","Heap size: invalid, please reimput a smaller \"ms\" paramenter!");
       exit(0);
     }
     reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
@@ -233,11 +227,17 @@
                                 space_committed_size((Space*)gc_gen->mos) +
                                 space_committed_size((Space*)gc_gen->los);
   
+#ifdef GC_GEN_STATS
+  gc_gen_stats_initialize(gc_gen);
+#endif
+
+  gc_gen_initial_verbose_info(gc_gen);
   return;
 }
 
 void gc_gen_destruct(GC_Gen *gc_gen) 
 {
+  TRACE2("gc.process", "GC: GC_Gen heap destruct ......");
   Space* nos = (Space*)gc_gen->nos;
   Space* mos = (Space*)gc_gen->mos;
   Space* los = (Space*)gc_gen->los;
@@ -262,6 +262,9 @@
   vm_unmap_mem(nos_start, nos_size);
   vm_unmap_mem(mos_start, mos_size);
   vm_unmap_mem(los_start, los_size);
+#ifdef GC_GEN_STATS
+  gc_gen_stats_destruct(gc_gen);
+#endif
 
   return;  
 }
@@ -280,8 +283,6 @@
 void* los_try_alloc(POINTER_SIZE_INT size, GC* gc){  return lspace_try_alloc((Lspace*)((GC_Gen*)gc)->los, size); }
 
 
-unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
-
 Boolean FORCE_FULL_COMPACT = FALSE;
 
 void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
@@ -294,8 +295,8 @@
   else
     gc->collect_kind = MINOR_COLLECTION;
 
-#ifdef ONLY_SSPACE_IN_HEAP
-  gc->collect_kind = UNIQUE_SWEEP_COLLECTION;
+#ifdef USE_MARK_SWEEP_GC
+  gc->collect_kind = MARK_SWEEP_GC;
 #endif
   return;
 }
@@ -318,7 +319,7 @@
       gc_enable_gen_mode();
     
     }else{
-      printf("\nGC algorithm setting incorrect. Will use default value.\n");  
+      WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
     
     }
   }
@@ -336,7 +337,7 @@
      MAJOR_ALGO= MAJOR_COMPACT_MOVE;
     
     }else{
-     printf("\nGC algorithm setting incorrect. Will use default algorithm.\n");  
+     WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
       
     }
   }
@@ -372,8 +373,9 @@
   Mspace* mos = gc->mos;
   Fspace* nos = gc->nos;
   Lspace* los = gc->los;
-  /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously.
-   *Or, we must adjust heap size */
+  /* We can not tolerate gc->survive_ratio be greater than threshold twice continuously.
+   * Or, we must adjust heap size
+   */
   static unsigned int tolerate = 0;
 
   POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size;
@@ -415,8 +417,41 @@
 #else
   assert(!large_page_hint);
   POINTER_SIZE_INT old_nos_size = nos->committed_heap_size;
+  INFO2("gc.process", "GC: gc_gen heap extension after GC["<<gc->num_collections<<"] ...");
   blocked_space_extend(nos, (unsigned int)adjust_size);
-  nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size;
+  INFO2("gc.space","GC: heap extension: from "<<heap_total_size/MB<<"MB  to  "<<new_heap_total_size/MB<<"MB\n");
+  if (!NOS_SIZE) {
+    nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size;
+    if( NOS_PARTIAL_FORWARD )
+      object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks >>1 ];
+    else
+      object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks];
+  }
+  else {
+    /*if user specified NOS_SIZE, adjust mos and nos size to keep nos size as an constant*/
+    old_nos_size = nos->committed_heap_size;
+    nos_boundary = (void*)((POINTER_SIZE_INT)nos->heap_end - NOS_SIZE);
+    nos->committed_heap_size = NOS_SIZE;
+    nos->heap_start = nos_boundary;
+    nos->blocks = (Block*)nos_boundary;
+    nos->first_block_idx = ((Block_Header*)nos_boundary)->block_idx;
+    nos->num_managed_blocks = (unsigned int)(NOS_SIZE >> GC_BLOCK_SHIFT_COUNT);
+    nos->num_total_blocks = nos->num_managed_blocks;
+    nos->free_block_idx = nos->first_block_idx;
+    if( NOS_PARTIAL_FORWARD )
+      object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks >>1 ];
+    else
+      object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks];
+
+    mos->heap_end = nos_boundary;
+    mos->committed_heap_size += old_nos_size-NOS_SIZE;
+    mos->num_managed_blocks = (unsigned int)(mos->committed_heap_size >> GC_BLOCK_SHIFT_COUNT);
+    mos->num_total_blocks = mos->num_managed_blocks;
+    mos->ceiling_block_idx = ((Block_Header*)nos_boundary)->block_idx - 1;
+
+    mos->survive_ratio = (float) mos->last_surviving_size / (float)mos->committed_heap_size;
+  }
+
   /*Fixme: gc fields should be modified according to nos extend*/
   gc->committed_heap_size += adjust_size;
   //debug_adjust
@@ -430,8 +465,11 @@
 Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
 static unsigned int mspace_num_used_blocks_before_minor;
 static unsigned int mspace_num_used_blocks_after_minor;
+void gc_gen_stats_verbose(GC_Gen* gc);
 void gc_gen_reclaim_heap(GC_Gen* gc)
 { 
+  INFO2("gc.process", "GC: start GC_Gen ...\n");
+
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
 
   Blocked_Space* fspace = (Blocked_Space*)gc->nos;
@@ -440,24 +478,44 @@
   fspace->num_used_blocks = fspace->free_block_idx - fspace->first_block_idx;
 
   gc->collect_result = TRUE;
+#ifdef GC_GEN_STATS
+  gc_gen_stats_reset_before_collection((GC_Gen*)gc);
+  gc_gen_collector_stats_reset((GC_Gen*)gc);
+#endif
   
   if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+
+    INFO2("gc.process", "GC: start minor collection ...\n");
+
     /* FIXME:: move_object is only useful for nongen_slide_copy */
     gc->mos->move_object = 0;
     /* This is for compute mspace->last_alloced_size */
 
     mspace_num_used_blocks_before_minor = mspace->free_block_idx - mspace->first_block_idx;
     fspace_collection(gc->nos);
+
+#ifdef GC_GEN_STATS
+    gc_gen_collector_stats_verbose_minor_collection(gc);
+#endif
     mspace_num_used_blocks_after_minor = mspace->free_block_idx - mspace->first_block_idx;
     assert( mspace_num_used_blocks_before_minor <= mspace_num_used_blocks_after_minor );
     mspace->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mspace_num_used_blocks_after_minor - mspace_num_used_blocks_before_minor );
 
     /*If the current minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS*/
-    if(gc->collect_result != FALSE && !gc_is_gen_mode())
+    if(gc->collect_result != FALSE && !gc_is_gen_mode()) {
+#ifdef GC_GEN_STATS
+      gc->stats->num_minor_collections++;
+#endif
       lspace_collection(gc->los);
-
+    }
     gc->mos->move_object = 1;      
+
+    INFO2("gc.process", "GC: end of minor collection ...\n");
+
   }else{
+
+    INFO2("gc.process", "GC: start major collection ...\n");
+
     /* process mos and nos together in one compaction */
     gc->los->move_object = 1;
 
@@ -465,9 +523,19 @@
     lspace_collection(gc->los);
 
     gc->los->move_object = 0;
+
+#ifdef GC_GEN_STATS
+    gc->stats->num_major_collections++;
+    gc_gen_collector_stats_verbose_major_collection(gc);
+#endif
+
+    INFO2("gc.process", "GC: end of major collection ...\n");
   }
 
   if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+
+    INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ...");
+
     if(gc_is_gen_mode()) gc_clear_remset((GC*)gc);  
     
     /* runout mspace in minor collection */
@@ -477,6 +545,11 @@
     IS_FALLBACK_COMPACTION = TRUE;
     gc_reset_collect_result((GC*)gc);
     gc->collect_kind = FALLBACK_COLLECTION;    
+#ifdef GC_GEN_STATS
+    /*since stats is changed in minor collection, we need to reset stats before fallback collection*/
+    gc_gen_stats_reset_before_collection((GC_Gen*)gc);
+    gc_gen_collector_stats_reset((GC_Gen*)gc);
+#endif
 
     if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc);
     
@@ -486,10 +559,18 @@
     gc->los->move_object = 0;    
 
     IS_FALLBACK_COMPACTION = FALSE;
+
+#ifdef GC_GEN_STATS
+    gc->stats->num_fallback_collections++;
+    gc_gen_collector_stats_verbose_major_collection(gc);
+#endif
+
+    INFO2("gc.process", "GC: end of fallback collection ...");
+
   }
   
   if( gc->collect_result == FALSE){
-    printf("Out of Memory!\n");
+    DIE2("gc.collect", "Out of Memory!\n");
     assert(0);
     exit(0);
   }
@@ -500,7 +581,15 @@
 #ifdef COMPRESS_REFERENCE
   gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool);
 #endif
+
   assert(!gc->los->move_object);
+#ifdef GC_GEN_STATS
+  gc_gen_stats_update_after_collection((GC_Gen*)gc);
+  gc_gen_stats_verbose(gc);
+#endif
+
+  INFO2("gc.process", "GC: end of GC_Gen\n");
+
   return;
 }
 
@@ -602,3 +691,95 @@
     }
   }
 }
+
+void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time)
+{
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Stats* stats = ((GC_Gen*)gc)->stats;
+  stats->total_mutator_time += mutator_time;
+  stats->total_pause_time += pause_time;
+#endif
+
+  INFO2("gc.collect","GC: GC_Gen Collection Info:"
+    <<"\nGC: GC id: GC["<<gc->num_collections<<"]"
+    <<"\nGC: current collection num: "<<gc->num_collections);
+
+  switch(gc->collect_kind) {
+  case MINOR_COLLECTION:
+    INFO2("gc.collect","GC: collection type: minor");
+#ifdef GC_GEN_STATS
+    INFO2("gc.collect","GC: current minor collection num: "<<gc->stats->num_minor_collections);
+#endif
+    break;
+  case MAJOR_COLLECTION:
+    INFO2("gc.collect","GC: collection type: major");
+#ifdef GC_GEN_STATS
+    INFO2("gc.collect","GC: current major collection num: "<<gc->stats->num_major_collections);
+#endif
+    break;
+  case FALLBACK_COLLECTION:
+    INFO2("gc.collect","GC: collection type: fallback");
+#ifdef GC_GEN_STATS
+    INFO2("gc.collect","GC: current fallback collection num: "<<gc->stats->num_fallback_collections);
+#endif
+  }
+
+  switch(gc->cause) {
+  case GC_CAUSE_NOS_IS_FULL:
+    INFO2("gc.collect","GC: collection cause: nursery object space is full");
+    break;
+  case GC_CAUSE_LOS_IS_FULL:
+    INFO2("gc.collect","GC: collection cause: large object space is full");
+    break;
+  case GC_CAUSE_RUNTIME_FORCE_GC:
+    INFO2("gc.collect","GC: collection cause: runtime force gc");
+  }
+
+  INFO2("gc.collect","GC: pause time: "<<(pause_time>>10)<<"ms"
+    <<"\nGC: mutator time from last collection: "<<(mutator_time>>10)<<"ms\n");
+
+}
+
+void gc_gen_space_verbose_info(GC_Gen *gc)
+{
+  INFO2("gc.space","GC: Heap info after GC["<<gc->num_collections<<"]:"
+    <<"\nGC: Heap size: "<<verbose_print_size(gc->committed_heap_size)<<", free size:"<<verbose_print_size(gc_gen_free_memory_size(gc))
+    <<"\nGC: LOS size: "<<verbose_print_size(gc->los->committed_heap_size)<<", free size:"<<verbose_print_size(lspace_free_memory_size(gc->los))
+    <<"\nGC: MOS size: "<<verbose_print_size(gc->mos->committed_heap_size)<<", free size:"<<verbose_print_size(space_free_memory_size((Blocked_Space*)gc->mos))
+    <<"\nGC: NOS size: "<<verbose_print_size(gc->nos->committed_heap_size)<<", free size:"<<verbose_print_size(space_free_memory_size((Blocked_Space*)gc->nos))<<"\n");
+}
+
+inline void gc_gen_initial_verbose_info(GC_Gen *gc)
+{
+  INFO2("gc.base","GC_Gen initial:"
+    <<"\nmax heap size: "<<verbose_print_size(max_heap_size_bytes)
+    <<"\nmin heap size: "<<verbose_print_size(min_heap_size_bytes)
+    <<"\ninitial heap size: "<<verbose_print_size(gc->committed_heap_size)
+    <<"\ninitial num collectors: "<<gc->num_collectors
+    <<"\ninitial nos size: "<<verbose_print_size(gc->nos->committed_heap_size)
+    <<"\nnos collection algo: "
+    <<((gc->nos->collect_algorithm==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward")
+    <<"\ninitial mos size: "<<verbose_print_size(gc->mos->committed_heap_size)
+    <<"\nmos collection algo: "
+    <<((gc->mos->collect_algorithm==MAJOR_COMPACT_MOVE)?"move compact":"slide compact")
+    <<"\ninitial los size: "<<verbose_print_size(gc->los->committed_heap_size)<<"\n");
+}
+
+void gc_gen_wrapup_verbose(GC_Gen* gc)
+{
+#ifdef GC_GEN_STATS
+  GC_Gen_Stats* stats = gc->stats;
+
+  INFO2("gc.base", "GC: All Collection info: "
+    <<"\nGC: total nos alloc obj size: "<<verbose_print_size(stats->total_size_nos_alloc)
+    <<"\nGC: total los alloc obj num: "<<stats->obj_num_los_alloc
+    <<"\nGC: total nos alloc obj size:"<<verbose_print_size(stats->total_size_los_alloc)
+    <<"\nGC: total collection num: "<<gc->num_collections
+    <<"\nGC: minor collection num: "<<stats->num_minor_collections
+    <<"\nGC: major collection num: "<<stats->num_major_collections
+    <<"\nGC: total collection time: "<<stats->total_pause_time
+    <<"\nGC: total appliction execution time: "<<stats->total_mutator_time<<"\n");
+#endif
+}
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Thu Aug 16 21:33:02 2007
@@ -30,6 +30,10 @@
 #include "../los/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref_metadata.h"
 
+#ifdef GC_GEN_STATS
+struct GC_Gen_Stats;
+#endif
+
 enum Write_Barrier_Kind{
   WRITE_BARRIER_NIL,  
   WRITE_BARRIER_SLOT,  
@@ -86,7 +90,12 @@
   Vector_Block* uncompressed_root_set;
   
   //For_LOS_extend
-  Space_Tuner* tuner;  
+  Space_Tuner* tuner;
+  
+  /* system info */
+  unsigned int _system_alloc_unit;
+  unsigned int _machine_page_size_bytes;
+  unsigned int _num_processors;
   /* END of GC --> */
   
   Block* blocks;
@@ -97,18 +106,21 @@
   Boolean force_major_collect;
   Gen_Mode_Adaptor* gen_mode_adaptor;
   Boolean force_gen_mode;
-  
-  /* system info */ 
-  unsigned int _system_alloc_unit;
-  unsigned int _machine_page_size_bytes;
-  unsigned int _num_processors;
-  
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Stats* stats; /*used to record stats when collection*/
+#endif
+
 } GC_Gen;
 
 //////////////////////////////////////////////////////////////////////////////////////////
 
 void gc_gen_initialize(GC_Gen *gc, POINTER_SIZE_INT initial_heap_size, POINTER_SIZE_INT final_heap_size);
 void gc_gen_destruct(GC_Gen *gc);
+void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time);
+void gc_gen_space_verbose_info(GC_Gen *gc);
+void gc_gen_initial_verbose_info(GC_Gen *gc);
+void gc_gen_wrapup_verbose(GC_Gen* gc);
                         
 inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
 {  return space_free_memory_size((Blocked_Space*)gc->nos) +
@@ -161,8 +173,6 @@
 void gc_set_mos(GC_Gen* gc, Space* mos);
 void gc_set_los(GC_Gen* gc, Space* los);
 
-unsigned int gc_get_processor_num(GC_Gen* gc);
-
 void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo);
 void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause);
 
@@ -184,6 +194,4 @@
 extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
-
-
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Thu Aug 16 21:33:02 2007
@@ -369,9 +369,22 @@
   POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace);
 
   //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA )
-  if( new_nos_size == curr_nos_size )
+  if( new_nos_size == curr_nos_size ){
     return;
-  
+  }else if ( new_nos_size >= curr_nos_size ){
+    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
+    POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
+    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
+      <<verbose_print_size(adapt_size)
+      <<" size was transfered from nos to mos)\n"); 
+  } else {
+    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
+    POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
+    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
+      <<verbose_print_size(adapt_size)
+      <<" size was transfered from mos to nos)\n"); 
+  }
+
   /* below are ajustment */  
   POINTER_SIZE_INT curr_heap_commit_end = 
                              (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
@@ -384,6 +397,10 @@
   fspace->num_total_blocks = fspace->num_managed_blocks;
   fspace->first_block_idx = ((Block_Header*)nos_boundary)->block_idx;
   fspace->free_block_idx = fspace->first_block_idx;
+  if( NOS_PARTIAL_FORWARD )
+    object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1];
+  else
+    object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks];
 
   mspace->heap_end = nos_boundary;
   mspace->committed_heap_size = new_mos_size;
@@ -423,9 +440,22 @@
   POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace);
 
   //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA )
-  if( new_nos_size == curr_nos_size) 
+  if( new_nos_size == curr_nos_size ){
     return;
-      
+  }else if ( new_nos_size >= curr_nos_size ){
+    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n");
+    POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
+    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
+      <<verbose_print_size(adapt_size)
+      <<" size was transfered from mos to nos)\n"); 
+  } else {
+    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n");
+    POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
+    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
+      <<verbose_print_size(adapt_size)
+      <<" size was transfered from nos to mos)\n"); 
+  }
+
   POINTER_SIZE_INT used_mos_size = space_used_memory_size((Blocked_Space*)mspace);  
   POINTER_SIZE_INT free_mos_size = space_free_memory_size((Blocked_Space*)mspace);  
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.cpp Thu Aug 16 21:33:02 2007
@@ -62,6 +62,7 @@
   lspace->num_collections = 0;
   lspace->time_collections = 0;
   lspace->survive_ratio = 0.5f;
+
   lspace->last_alloced_size = 0;
   lspace->accumu_alloced_size = 0;  
   lspace->total_alloced_size = 0;
@@ -129,3 +130,4 @@
 {
   return lspace->failure_size;
 }
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h Thu Aug 16 21:33:02 2007
@@ -84,7 +84,10 @@
 void lspace_reset_for_slide(Lspace* lspace);
 void lspace_collection(Lspace* lspace);
 
-inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; }
+inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace)
+{ /* FIXME:: */
+  return (lspace->committed_heap_size - (POINTER_SIZE_INT)lspace->last_surviving_size);
+}
 inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; }
 
 inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Thu Aug 16 21:33:02 2007
@@ -23,6 +23,9 @@
 #include "../gen/gen.h"
 #include "../common/space_tuner.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
 {
     Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
@@ -246,9 +249,15 @@
   collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
 #endif
   
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
   while( p_obj ){
     assert( obj_is_marked_in_vt(p_obj));
     unsigned int obj_size = vm_object_size(p_obj);
+#ifdef GC_GEN_STATS
+  gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj));
+#endif
     assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
 #ifdef USE_32BITS_HASHCODE 
     obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
@@ -276,6 +285,7 @@
   
   lspace->scompact_fa_start = dest_addr;
   lspace->scompact_fa_end= lspace->heap_end;
+  lspace->last_surviving_size = (POINTER_SIZE_INT)dest_addr - (POINTER_SIZE_INT)lspace->heap_start;
   return;
 }
 
@@ -353,7 +363,6 @@
         assert(tuner->kind == TRANS_NOTHING);
         assert(!tuner->tuning_size);
         new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
-        if(new_fa_size == 0) break;
         Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
         if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
         break;
@@ -377,6 +386,12 @@
 
 void lspace_sweep(Lspace* lspace)
 {
+  TRACE2("gc.process", "GC: lspace sweep algo start ...\n");
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Stats* stats = ((GC_Gen*)lspace->gc)->stats;
+  gc_gen_stats_set_los_collected_flag((GC_Gen*)lspace->gc, true);
+#endif
   unsigned int mark_bit_idx = 0;
   POINTER_SIZE_INT cur_size = 0;
   void *cur_area_start, *cur_area_end;
@@ -395,6 +410,10 @@
     obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
 #endif
     lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);    
+#ifdef GC_GEN_STATS
+    stats->los_suviving_obj_num++;
+    stats->los_suviving_obj_size += obj_size;
+#endif
   }
 
   cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
@@ -425,6 +444,10 @@
       obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
 #endif
       lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);
+#ifdef GC_GEN_STATS
+      stats->los_suviving_obj_num++;
+      stats->los_suviving_obj_size += obj_size;
+#endif
     }
 
 #ifdef USE_32BITS_HASHCODE
@@ -448,6 +471,8 @@
    mark_bit_idx = 0;
    assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
 
+  TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
   return;
 }
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Thu Aug 16 21:33:02 2007
@@ -24,6 +24,9 @@
 #include "../gen/gen.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 static void scan_slot(Collector* collector, REF *p_ref)
 {
   if( read_slot(p_ref) == NULL) return;
@@ -48,6 +51,10 @@
   if(!obj_mark_in_vt(p_obj))
     return;
   
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+  gc_gen_collector_update_marked_obj_stats_major(stats);
+#endif
   if( !object_has_ref_field(p_obj) ) return;
   
     /* scan array object */
@@ -102,6 +109,9 @@
 { 
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
   
   assert(gc_match_kind(gc, FALLBACK_COLLECTION));
 
@@ -125,6 +135,10 @@
       assert(*p_ref);
       
       collector_tracestack_push(collector, p_ref);
+
+#ifdef GC_GEN_STATS
+      gc_gen_collector_update_rootset_ref_num(stats);   
+#endif
 
     } 
     root_set = pool_iterator_next(metadata->gc_rootset_pool);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Thu Aug 16 21:33:02 2007
@@ -173,5 +173,3 @@
     return mspace->expected_threshold_ratio;
 }
 
-
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Thu Aug 16 21:33:02 2007
@@ -30,6 +30,9 @@
 static volatile Block_Header* next_block_for_compact;
 static volatile Block_Header* next_block_for_target;
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 void mspace_update_info_after_space_tuning(Mspace* mspace)
 {
   Space_Tuner *tuner = mspace->gc->tuner;
@@ -308,9 +311,25 @@
 
   //For_LOS_extend
   if(gc->tuner->kind != TRANS_NOTHING){
+
+    TRACE2("gc.process", "GC: slide compact algo start ... \n");
     collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
+    TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
+
+#ifdef GC_GEN_STATS
+    gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
+#endif
+
   }else if (gc_match_kind(gc, FALLBACK_COLLECTION)){
+
+    TRACE2("gc.process", "GC: slide compact algo start ... \n");
     collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);  
+    TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
+
+#ifdef GC_GEN_STATS
+    gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
+    gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
+#endif
     //IS_MOVE_COMPACT = TRUE;
     //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
     //IS_MOVE_COMPACT = FALSE;
@@ -318,17 +337,29 @@
 
     switch(mspace->collect_algorithm){
       case MAJOR_COMPACT_SLIDE:
-        collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);    
+  TRACE2("gc.process", "GC: slide compact algo start ... \n");
+  collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); 
+  TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
+#ifdef GC_GEN_STATS
+  gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
+  gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
+#endif
         break;
         
       case MAJOR_COMPACT_MOVE:
         IS_MOVE_COMPACT = TRUE;
-        collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
-        IS_MOVE_COMPACT = FALSE;
-        break;
-  
+
+  TRACE2("gc.process", "GC: move compact algo start ... \n");
+  collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
+  TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
+  IS_MOVE_COMPACT = FALSE;
+#ifdef GC_GEN_STATS
+  gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE);
+#endif
+  break;
+
       default:
-        printf("\nThe speficied major collection algorithm doesn't exist!\n");
+        DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!");
         exit(0);
         break;
     }
@@ -337,8 +368,4 @@
 
   return;  
 } 
-
-
-
-
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Thu Aug 16 21:33:02 2007
@@ -27,6 +27,10 @@
 #include "../common/hashcode.h"
 #endif
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_mos(GC_Gen* gc);
@@ -46,7 +50,11 @@
   Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
   hashcode_buf_init(new_hashcode_buf);
 #endif  
- 
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
+
   while( curr_block ){
     void* start_pos;
     Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);
@@ -66,7 +74,11 @@
       assert( obj_is_marked_in_vt(p_obj));
       /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
       obj_unmark_in_oi(p_obj); 
-      
+
+#ifdef GC_GEN_STATS
+      gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj));
+#endif
+
 #ifdef USE_32BITS_HASHCODE
       move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf);
 #endif 
@@ -168,6 +180,9 @@
   /* Pass 1: **************************************************
      mark all live objects in heap, and save all the slots that 
             have references  that are going to be repointed */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ...");
+
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
 
   if(!gc_match_kind(gc, FALLBACK_COLLECTION))
@@ -194,9 +209,14 @@
     num_marking_collectors++; 
   }
   while(num_marking_collectors != num_active_collectors + 1);
-  
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1");
+
   /* Pass 2: **************************************************
      move object and set the forwarding offset table */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ...");
+
   atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
 
   mspace_move_objects(collector, mspace);   
@@ -217,9 +237,14 @@
   }
   while(num_moving_collectors != num_active_collectors + 1);
   if(!gc->collect_result) return;
-    
+  
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass2");
+
   /* Pass 3: **************************************************
      update all references whose pointed objects were moved */  
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ...");
+
   old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
 
   mspace_fix_repointed_refs(collector, mspace);
@@ -234,9 +259,13 @@
   }
   while(num_fixing_collectors != num_active_collectors + 1);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass3");
 
   /* Pass 4: **************************************************
      restore obj_info                                         */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ...");
+
   atomic_cas32( &num_restoring_collectors, 0, num_active_collectors);
   
   collector_restore_obj_info(collector);
@@ -252,16 +281,21 @@
      atomic_inc32(&num_extending_collectors);    
      while(num_extending_collectors != num_active_collectors);  
   }
-  
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass4");
+
   /* Leftover: **************************************************
    */
-  if( collector->thread_handle != 0 ) return;
-
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){
+    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
+    return;
+  }
   mspace_reset_after_compaction(mspace);
   fspace_reset_for_allocation(fspace);
 
 
   gc_set_pool_clear(gc->metadata->gc_rootset_pool);
   
+  TRACE2("gc.process", "GC: collector[0]  finished");
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Thu Aug 16 21:33:02 2007
@@ -24,6 +24,10 @@
 #include "../los/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
@@ -51,7 +55,11 @@
 #ifdef USE_32BITS_HASHCODE  
   collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
 #endif
-  
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
+
   while( curr_block ){
     void* start_pos;
     Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
@@ -70,6 +78,11 @@
 
       unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);
       
+
+#ifdef GC_GEN_STATS
+      gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size);
+#endif
+
       Obj_Info_Type obj_info = get_obj_info(p_obj);
       
       unsigned int obj_size_precompute = obj_size;
@@ -417,6 +430,9 @@
     *mark all live objects in heap, and save all the slots that 
     *have references  that are going to be repointed.
     */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ...");
+
   unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
 
   if(gc_match_kind(gc, FALLBACK_COLLECTION))
@@ -454,6 +470,8 @@
   }
   while(num_marking_collectors != num_active_collectors + 1);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1");
+
   /* Pass 2: **************************************************
      assign target addresses for all to-be-moved objects */
   atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);
@@ -462,13 +480,16 @@
   if(gc_match_kind(gc, FALLBACK_COLLECTION))
     fallback_clear_fwd_obj_oi(collector);
 #endif
-
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in mos and nos ...");
   mspace_compute_object_target(collector, mspace);
   
   old_num = atomic_inc32(&num_repointing_collectors);
   /*last collector's world here*/
   if( ++old_num == num_active_collectors ){
-    if(lspace->move_object) lspace_compute_object_target(collector, lspace);
+    if(lspace->move_object) {
+      TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in los ...");
+      lspace_compute_object_target(collector, lspace);
+    }
     gc->collect_result = gc_collection_result(gc);
     if(!gc->collect_result){
       num_repointing_collectors++;
@@ -480,10 +501,12 @@
   }
   while(num_repointing_collectors != num_active_collectors + 1);
   if(!gc->collect_result) return;
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2");
 
   /* Pass 3: **************************************************
     *update all references whose objects are to be moved
     */
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ...");
   old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
   mspace_fix_repointed_refs(collector, mspace);
   old_num = atomic_inc32(&num_fixing_collectors);
@@ -506,8 +529,13 @@
   }
   while(num_fixing_collectors != num_active_collectors + 1);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3");
+
   /* Pass 4: **************************************************
      move objects                                             */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: move objects to target address ...");
+
   atomic_cas32( &num_moving_collectors, 0, num_active_collectors);
   
   mspace_sliding_compact(collector, mspace); 
@@ -515,8 +543,12 @@
   atomic_inc32(&num_moving_collectors);
   while(num_moving_collectors != num_active_collectors);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4");
+
   /* Pass 5: **************************************************
      restore obj_info                                         */
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass5: restore obj_info ...");
   atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);
   
   collector_restore_obj_info(collector);
@@ -541,8 +573,13 @@
     atomic_inc32(&num_extending_collectors);
     while(num_extending_collectors != num_active_collectors);
   }
-  if( collector->thread_handle != 0 )
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 ...");
+
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){
+    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
     return;
+  }
   
   /* Leftover: **************************************************
    */
@@ -555,5 +592,6 @@
 
   gc_set_pool_clear(gc->metadata->gc_rootset_pool);
   
+  TRACE2("gc.process", "GC: collector[0]  finished");
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp Thu Aug 16 21:33:02 2007
@@ -20,12 +20,20 @@
 #include "../gen/gen.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref)
 {
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
   if(p_obj == NULL) return;
 
   if(obj_mark_in_vt(p_obj)){
+#ifdef GC_GEN_STATS
+    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+    gc_gen_collector_update_marked_obj_stats_major(stats);
+#endif
     collector_tracestack_push(collector, p_obj);
     unsigned int obj_size = vm_object_size(p_obj);
 #ifdef USE_32BITS_HASHCODE
@@ -117,6 +125,9 @@
 {
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
 
   /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
   unsigned int num_active_collectors = gc->num_active_collectors;
@@ -146,6 +157,11 @@
       */
       if(obj_mark_in_vt(p_obj)){
         collector_tracestack_push(collector, p_obj);
+
+#ifdef GC_GEN_STATS 
+        gc_gen_collector_update_rootset_ref_num(stats);
+#endif
+
         unsigned int obj_size = vm_object_size(p_obj);
 #ifdef USE_32BITS_HASHCODE
         obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0;
@@ -210,5 +226,6 @@
 
 void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj)
 {
+  obj_mark_in_vt((Partial_Reveal_Object*)p_obj);
   trace_object(collector, (Partial_Reveal_Object *)p_obj);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Thu Aug 16 21:33:02 2007
@@ -17,29 +17,19 @@
 
 #include "../common/gc_common.h"
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
 
 #include "gc_ms.h"
-#include "port_sysinfo.h"
-
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../common/compressed_ref.h"
 #ifdef USE_32BITS_HASHCODE
 #include "../common/hashcode.h"
 #endif
 
-static void gc_ms_get_system_info(GC_MS *gc_ms)
-{
-  gc_ms->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0];
-  gc_ms->_num_processors = port_CPUs_number();
-  gc_ms->_system_alloc_unit = vm_get_system_alloc_unit();
-  SPACE_ALLOC_UNIT = max(gc_ms->_system_alloc_unit, GC_BLOCK_SIZE_BYTES);
-}
 
 void gc_ms_initialize(GC_MS *gc_ms, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size)
 {
   assert(gc_ms);
-  gc_ms_get_system_info(gc_ms);
   
   max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT);
   min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT);
@@ -83,7 +73,4 @@
 {
 }
 
-unsigned int gc_ms_get_processor_num(GC_MS *gc)
-{ return gc->_num_processors; }
-
-#endif // ONLY_SSPACE_IN_HEAP
+#endif // USE_MARK_SWEEP_GC

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Thu Aug 16 21:33:02 2007
@@ -18,7 +18,7 @@
 #ifndef _GC_MS_H_
 #define _GC_MS_H_
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
 
 #include "sspace.h"
 
@@ -64,14 +64,14 @@
   
   //For_LOS_extend
   Space_Tuner *tuner;
-  /* END of GC --> */
-  
-  Sspace *sspace;
   
   /* system info */
   unsigned int _system_alloc_unit;
   unsigned int _machine_page_size_bytes;
   unsigned int _num_processors;
+  /* END of GC --> */
+  
+  Sspace *sspace;
   
 } GC_MS;
 
@@ -101,9 +101,8 @@
 void gc_ms_destruct(GC_MS *gc);
 void gc_ms_reclaim_heap(GC_MS *gc);
 void gc_ms_iterate_heap(GC_MS *gc);
-unsigned int gc_ms_get_processor_num(GC_MS *gc);
 
 
-#endif // ONLY_SSPACE_IN_HEAP
+#endif // USE_MARK_SWEEP_GC
 
 #endif // _GC_MS_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp Thu Aug 16 21:33:02 2007
@@ -58,7 +58,7 @@
   
   sspace_init_chunks(sspace);
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
   gc_ms_set_sspace((GC_MS*)gc, sspace);
 #else
   gc_set_mos((GC_Gen*)gc, (Space*)sspace);
@@ -141,6 +141,7 @@
   STD_FREE(local_chunks);
 }
 
+extern void sspace_decide_compaction_need(Sspace *sspace);
 extern void mark_sweep_sspace(Collector *collector);
 
 void sspace_collection(Sspace *sspace) 
@@ -152,10 +153,15 @@
   sspace_alloc_info_summary();
 #endif
 #ifdef SSPACE_CHUNK_INFO
-  sspace_chunks_info(sspace, TRUE);
+  sspace_chunks_info(sspace, FALSE);
 #endif
 
+  sspace_decide_compaction_need(sspace);
+  if(sspace->need_compact)
+    gc->collect_kind = SWEEP_COMPACT_GC;
+  //printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", sspace->need_compact ? "SWEEP COMPACT" : "MARK SWEEP");
 #ifdef SSPACE_VERIFY
+  sspace_verify_before_collection(gc);
   sspace_verify_vtable_mark(gc);
 #endif
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h Thu Aug 16 21:33:02 2007
@@ -57,6 +57,7 @@
 
   /* END of Space --> */
   
+  Boolean need_compact;
   Size_Segment **size_segments;
   Pool ***pfc_pools;
   Free_Chunk_List *aligned_free_chunk_lists;
@@ -81,7 +82,7 @@
 POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace);
 
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
 #define gc_get_sspace(gc) ((Sspace*)gc_get_mos((GC_Gen*)(gc)))
 #else
 #define gc_get_sspace(gc) (gc_ms_get_sspace((GC_MS*)(gc)));



Mime
View raw message