harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r650422 [1/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ thread/ trace_forward/ verify/
Date Tue, 22 Apr 2008 08:52:41 GMT
Author: xli
Date: Tue Apr 22 01:52:29 2008
New Revision: 650422

URL: http://svn.apache.org/viewvc?rev=650422&view=rev
Log:
HARMONY-4325 : 06_tick_code_refactor_and_improvement.patch improves the collection scheduler of Tick, and Tick code is refactored to make it more modular.

Added:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp   (with props)
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h   (with props)
Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Apr 22 01:52:29 2008
@@ -16,263 +16,39 @@
  */
 
 #include "gc_common.h"
-#include "../gen/gen.h"
-#include "../mark_sweep/gc_ms.h"
-#include "../mark_sweep/wspace.h"
 #include "collection_scheduler.h"
+#include "concurrent_collection_scheduler.h"
 #include "gc_concurrent.h"
-#include "../thread/marker.h"
-#include "../verify/verify_live_heap.h"
-
-#define NUM_TRIAL_COLLECTION 10
-#define MAX_DELAY_TIME 0x7fFfFfFf
-#define MAX_TRACING_RATE 2
-
-static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
 
 void collection_scheduler_initialize(GC* gc)
 {
-  
-  Collection_Scheduler* collection_scheduler = (Collection_Scheduler*) STD_MALLOC(sizeof(Collection_Scheduler));
-  assert(collection_scheduler);
-  memset(collection_scheduler, 0, sizeof(Collection_Scheduler));
-  
-  collection_scheduler->gc = gc;
-  gc->collection_scheduler = collection_scheduler;
-  time_delay_to_start_mark = MAX_DELAY_TIME;
-  
+  if(gc_is_specify_con_gc()) con_collection_scheduler_initialize(gc);
   return;
 }
 void collection_scheduler_destruct(GC* gc)
 {
-  STD_FREE(gc->collection_scheduler);
-}
-
-Boolean gc_need_start_concurrent_mark(GC* gc)
-{
-  if(!USE_CONCURRENT_MARK) return FALSE;
-  //FIXME: GEN mode also needs the support of starting mark after thread resume.
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-  if(gc_is_concurrent_mark_phase() || gc_mark_is_concurrent()) return FALSE;
-
-  int64 time_current = time_now();
-  if( time_current - get_collection_end_time() > time_delay_to_start_mark) 
-    return TRUE;
-  else 
-    return FALSE;
-#else
-  /*FIXME: concurrent mark is not supported in GC_GEN*/
-  assert(0);
-  return FALSE;
-#endif
-}
-
-Boolean gc_need_start_concurrent_sweep(GC* gc)
-{
-  if(!USE_CONCURRENT_SWEEP) return FALSE;
-
-  if(gc_sweep_is_concurrent()) return FALSE;
-
-  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
-  if(gc_mark_is_concurrent() && !gc_is_concurrent_mark_phase(gc))
-    return TRUE;
-  else
-    return FALSE;
-}
-
-Boolean gc_need_reset_status(GC* gc)
-{
-  if(gc_sweep_is_concurrent() && !gc_is_concurrent_sweep_phase(gc))
-    return TRUE;
-  else
-    return FALSE;
-}
-
-Boolean gc_need_prepare_rootset(GC* gc)
-{
-  /*TODO: support on-the-fly root set enumeration.*/
-  return FALSE;
+  if(gc_is_specify_con_gc()) con_collection_scheduler_destruct(gc);
+  return;
 }
 
-void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_mark)
+void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
 {
-  //FIXME: support GEN GC.
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-
-  Collection_Scheduler* collection_scheduler = gc->collection_scheduler;   
-  Space* space = NULL;
-
-  space = (Space*) gc_get_wspace(gc);
-
-  Space_Statistics* space_stat = space->space_statistic;
-  
-  unsigned int slot_index = collection_scheduler->last_slot_index_in_window;
-  unsigned int num_slot   = collection_scheduler->num_slot_in_window;
-  
-  collection_scheduler->num_obj_traced_window[slot_index] = space_stat->num_live_obj;
-  collection_scheduler->size_alloced_window[slot_index] = space_stat->size_new_obj;
-  collection_scheduler->space_utilization_rate[slot_index] = space_stat->space_utilization_ratio;
-
-  collection_scheduler->last_mutator_time = time_mutator;
-  collection_scheduler->last_collector_time = time_mark;
-  INFO2("gc.con","last_size_free_space"<<(space_stat->last_size_free_space)<<"  new obj num "<<collection_scheduler->size_alloced_window[slot_index]<<" ");
-  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
-    return;
-  INFO2("gc.con","num_live_obj "<<(space_stat->num_live_obj)<<"  last_size_free_space"<<(space_stat->last_size_free_space)<<" ");
-  
-  collection_scheduler->alloc_rate_window[slot_index] 
-    = time_mutator == 0 ? 0 : (float)collection_scheduler->size_alloced_window[slot_index] / time_mutator; 
-      
-  collection_scheduler->trace_rate_window[slot_index]
-    = time_mark == 0 ? MAX_TRACING_RATE : (float)collection_scheduler->num_obj_traced_window[slot_index] / time_mark;
-  
-  INFO2("gc.con","mutator time "<<(time_mutator>>10)<<"  collection time "<<(time_mark>>10)<<" ");
-  
-  collection_scheduler->num_slot_in_window = num_slot >= STATISTICS_SAMPLING_WINDOW_SIZE ? num_slot : (++num_slot);
-  collection_scheduler->last_slot_index_in_window = (++slot_index)% STATISTICS_SAMPLING_WINDOW_SIZE;
-
-  float sum_alloc_rate = 0;
-  float sum_trace_rate = 0;
-  float sum_space_util_ratio = 0;
-
-  unsigned int i;
-  for(i = 0; i < collection_scheduler->num_slot_in_window; i++){
-    sum_alloc_rate += collection_scheduler->alloc_rate_window[i];
-    sum_trace_rate += collection_scheduler->trace_rate_window[i];
-    sum_space_util_ratio += collection_scheduler->space_utilization_rate[i];
+  if(gc_is_specify_con_gc()){
+    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
   }
-
-  TRACE2("gc.con","Allocation Rate: ");
-  for(i = 0; i < collection_scheduler->num_slot_in_window; i++){
-    TRACE2("gc.con",i+1<<"  "<<collection_scheduler->alloc_rate_window[i]);
-  }
-  
-  TRACE2("gc.con","Tracing Rate: ");
-
-  for(i = 0; i < collection_scheduler->num_slot_in_window; i++){
-    TRACE2("gc.con",i+1<<"  "<<collection_scheduler->trace_rate_window[i]);
-  }
-
-  float average_alloc_rate = sum_alloc_rate / collection_scheduler->num_slot_in_window;
-  float average_trace_rate = sum_trace_rate / collection_scheduler->num_slot_in_window;
-  float average_space_util_ratio = sum_space_util_ratio / collection_scheduler->num_slot_in_window;
-
-  INFO2("gc.con","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<" ");
-
-  if(average_alloc_rate == 0 ){
-    time_delay_to_start_mark = 0;
-  }else if(average_trace_rate == 0){
-    time_delay_to_start_mark = MAX_DELAY_TIME;
-  }else{
-    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
-    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
-
-    INFO2("gc.con","[Concurrent GC] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
-    if(time_alloc_expected > time_trace_expected){
-      if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)||gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){
-        collection_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*0.65);
-      }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){
-        collection_scheduler->time_delay_to_start_mark = (int64)(time_mutator* 0.5);
-      }
-      
-    }else{
-      collection_scheduler->time_delay_to_start_mark = 0;
-    }
-
-    time_delay_to_start_mark = collection_scheduler->time_delay_to_start_mark;
-  }
-  INFO2("gc.con","[Concurrent GC] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
-  //[DEBUG] set to 0 for debugging.
-  //time_delay_to_start_mark = 0; 
-#endif  
   return;
-  
 }
 
-unsigned int gc_decide_marker_number(GC* gc)
+Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
 {
-  unsigned int num_active_marker;
-  Collection_Scheduler* collection_scheduler = gc->collection_scheduler;   
-
-  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
-    /*Start trial cycle, collection set to 1 in trial cycle and */
-    num_active_marker = 1;
+  /*collection scheduler only schedules concurrent collection now.*/
+  if(GC_CAUSE_CONCURRENT_GC == gc_cause){
+    assert(gc_is_specify_con_gc());
+    return gc_sched_con_collection(gc, gc_cause);
   }else{
-    num_active_marker = collection_scheduler->last_marker_num;
-    int64 c_time = collection_scheduler->last_collector_time;
-    int64 m_time = collection_scheduler->last_mutator_time;
-    int64 d_time = collection_scheduler->time_delay_to_start_mark;
-
-    if(num_active_marker == 0) num_active_marker = 1;
-
-    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){      
-      INFO2("gc.con","[Concurrent GC] increase marker number.");
-      num_active_marker ++;
-      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
-    }else if((float)d_time > (m_time * 0.6)){
-      INFO2("gc.con","[Concurrent GC] decrease marker number.");
-      num_active_marker --;
-      if(num_active_marker == 0)  num_active_marker = 1;
-    }
-    
-    INFO2("gc.con","[Concurrent GC] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
-    INFO2("gc.con","[Concurrent GC] marker num : "<<num_active_marker<<" ");
+    return FALSE;
   }
-
-  
-  collection_scheduler->last_marker_num = num_active_marker;
-  return num_active_marker;
 }
 
-Boolean gc_try_schedule_collection(GC* gc, unsigned int gc_cause)
-{
-
-  if(!try_lock(gc->collection_scheduler_lock)) return FALSE;
-
-  gc_check_concurrent_phase(gc);
-
-  if(gc_need_prepare_rootset(gc)){
-    /*TODO:Enable concurrent rootset enumeration.*/
-    assert(0);
-  }
-  
-  if(gc_need_start_concurrent_mark(gc)){
-    vm_gc_lock_enum();    
-    int64 pause_start = time_now();
-    INFO2("gc.con", "[Concurrent GC] concurrent mark start ...");
-    gc_start_concurrent_mark(gc);
-    vm_gc_unlock_enum();
-    INFO2("gc.con","[Concurrent GC] pause time of concurrent enumeration:  "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms \n");
-    unlock(gc->collection_scheduler_lock);
-    return TRUE;
-  }
-
-  if(gc_need_start_concurrent_sweep(gc)){
-    gc->num_collections++;
-    INFO2("gc.con", "[Concurrent GC] collection number:"<< gc->num_collections<<" ");
-    gc_start_concurrent_sweep(gc);
-    unlock(gc->collection_scheduler_lock);
-    return TRUE;
-  }
-
-  if(gc_need_reset_status(gc)){
-    int64 pause_start = time_now();
-    vm_gc_lock_enum();
-    int disable_count = hythread_reset_suspend_disable();    
-    gc_prepare_rootset(gc);
-    gc_reset_after_concurrent_collection(gc);
-    gc_start_mutator_time_measurement(gc);
-    set_collection_end_time();
-    vm_resume_threads_after();
-    hythread_set_suspend_disable(disable_count);
-    vm_gc_unlock_enum();
-    INFO2("gc.con","[Concurrent GC] pause time after concurrent GC:  "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms \n");
-    unlock(gc->collection_scheduler_lock);
-    return TRUE;
-  }
-  unlock(gc->collection_scheduler_lock);
-  return FALSE;
-
-}
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Apr 22 01:52:29 2008
@@ -18,40 +18,20 @@
 #ifndef _COLLECTION_SCHEDULER_H_
 #define _COLLECTION_SCHEDULER_H_
 
-#define STATISTICS_SAMPLING_WINDOW_SIZE 5
-
 typedef struct Collection_Scheduler {
   /*common field*/
-  GC* gc;
-  
-  /*mark schedule */
-  int64 time_delay_to_start_mark;
-  
-  int64 last_mutator_time;
-  int64 last_collector_time;
-
-  unsigned int last_marker_num;
-
-  unsigned int num_slot_in_window;
-  unsigned int last_slot_index_in_window;
-  
-  float alloc_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE];
-  float trace_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE];
-  float space_utilization_rate[STATISTICS_SAMPLING_WINDOW_SIZE];
-  POINTER_SIZE_INT num_obj_traced_window[STATISTICS_SAMPLING_WINDOW_SIZE];
-  POINTER_SIZE_INT size_alloced_window[STATISTICS_SAMPLING_WINDOW_SIZE];
+  GC* gc;  
 } Collection_Scheduler;
 
 void collection_scheduler_initialize(GC* gc);
 void collection_scheduler_destruct(GC* gc);
 
-void gc_update_collection_scheduler(GC* gc, int64 mutator_time, int64 mark_time);
-Boolean gc_try_schedule_collection(GC* gc, unsigned int gc_cause);
-Boolean gc_need_start_concurrent_mark(GC* gc);
-unsigned int gc_decide_marker_number(GC* gc);
-
+void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
+Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
 
 #endif
+
+
 
 
 

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=650422&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Apr 22 01:52:29 2008
@@ -0,0 +1,373 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "gc_common.h"
+#include "../gen/gen.h"
+#include "../mark_sweep/gc_ms.h"
+#include "../mark_sweep/wspace.h"
+#include "collection_scheduler.h"
+#include "concurrent_collection_scheduler.h"
+#include "gc_concurrent.h"
+#include "../thread/marker.h"
+#include "../verify/verify_live_heap.h"
+
+#define NUM_TRIAL_COLLECTION 2
+#define MIN_DELAY_TIME 0x0
+#define MAX_DELAY_TIME 0x7fFfFfFf
+#define MAX_TRACING_RATE 100
+#define MIN_TRACING_RATE 1
+#define MAX_SPACE_THRESHOLD (POINTER_SIZE_INT)((POINTER_SIZE_INT)1<<(BITS_OF_POINTER_SIZE_INT-1))
+#define MIN_SPACE_THRESHOLD 0
+
+enum CC_Scheduler_Kind{
+  SCHEDULER_NIL = 0x00,
+  TIME_BASED_SCHEDULER = 0x01,
+  SPACE_BASED_SCHEDULER = 0x02
+};
+
+static unsigned int cc_scheduler_kind = SCHEDULER_NIL;
+
+void gc_enable_time_scheduler()
+{ cc_scheduler_kind |= TIME_BASED_SCHEDULER; }
+
+void gc_enable_space_scheduler()
+{ cc_scheduler_kind |= SPACE_BASED_SCHEDULER; }
+
+Boolean gc_use_time_scheduler()
+{ return cc_scheduler_kind & TIME_BASED_SCHEDULER; }
+
+Boolean gc_use_space_scheduler()
+{ return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
+
+static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
+static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
+
+void con_collection_scheduler_initialize(GC* gc)
+{
+  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*) STD_MALLOC(sizeof(Con_Collection_Scheduler));
+  assert(cc_scheduler);
+  memset(cc_scheduler, 0, sizeof(Con_Collection_Scheduler));
+  
+  cc_scheduler->gc = gc;
+  gc->collection_scheduler = (Collection_Scheduler*)cc_scheduler;
+  time_delay_to_start_mark = MAX_DELAY_TIME;
+  space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
+  
+  return;
+}
+
+void con_collection_scheduler_destruct(GC* gc)
+{
+  STD_FREE(gc->collection_scheduler);
+}
+
+void gc_decide_cc_scheduler_kind(char* cc_scheduler)
+{
+  string_to_upper(cc_scheduler);
+  if(!strcmp(cc_scheduler, "time")){
+    gc_enable_time_scheduler();
+  }else if(!strcmp(cc_scheduler, "space")){
+    gc_enable_space_scheduler();
+  }else if(!strcmp(cc_scheduler, "all")){
+    gc_enable_time_scheduler();
+    gc_enable_space_scheduler();
+  }
+}
+
+void gc_set_default_cc_scheduler_kind()
+{
+  gc_enable_time_scheduler();
+}
+
+static Boolean time_to_start_mark(GC* gc)
+{
+  if(!gc_use_time_scheduler()) return FALSE;
+  
+  int64 time_current = time_now();
+  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
+}
+
+static Boolean space_to_start_mark(GC* gc)
+{
+  if(!gc_use_space_scheduler()) return FALSE;
+
+  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
+  return (size_new_obj > space_threshold_to_start_mark); 
+}
+
+static Boolean gc_need_start_con_mark(GC* gc)
+{
+  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
+  
+  if(time_to_start_mark(gc) || space_to_start_mark(gc)) 
+    return TRUE;
+  else 
+    return FALSE;
+}
+
+static Boolean gc_need_start_con_sweep(GC* gc)
+{
+  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
+
+  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
+  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
+    return TRUE;
+  else
+    return FALSE;
+}
+
+static Boolean gc_need_reset_after_con_collect(GC* gc)
+{
+  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
+    return TRUE;
+  else
+    return FALSE;
+}
+
+static Boolean gc_need_start_con_enum(GC* gc)
+{
+  /*TODO: support on-the-fly root set enumeration.*/
+  return FALSE;
+}
+
+#define SPACE_UTIL_RATIO_CORRETION 0.2f
+#define TIME_CORRECTION_OTF_MARK 0.65f
+#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
+#define TIME_CORRECTION_MOSTLY_MARK 0.5f
+
+static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
+{  
+  Space* space = NULL;
+  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
+
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+  space = (Space*) gc_get_wspace(gc);
+#endif  
+  if(!space) return;
+
+  Space_Statistics* space_stat = space->space_statistic;
+  
+  unsigned int slot_index = cc_scheduler->last_window_index;
+  unsigned int num_slot   = cc_scheduler->num_window_slots;
+  
+  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
+  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
+  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
+
+  cc_scheduler->last_mutator_time = time_mutator;
+  cc_scheduler->last_collector_time = time_collection;
+  
+  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
+    return;
+  
+  cc_scheduler->alloc_rate_window[slot_index] 
+    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator; 
+
+  if(gc_mark_is_concurrent()){
+    cc_scheduler->trace_rate_window[slot_index]
+      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
+  }else{
+    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
+  }
+
+  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
+  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;  
+}
+
+static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
+{
+  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
+    return;
+
+  Space* space = NULL;
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+  space = (Space*) gc_get_wspace(gc);
+#endif  
+  if(!space) return;
+
+  Space_Statistics* space_stat = space->space_statistic;
+
+  float sum_alloc_rate = 0;
+  float sum_trace_rate = 0;
+  float sum_space_util_ratio = 0;
+
+  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;   
+  
+  int64 time_this_collection_correction = 0;
+#if 0
+  float space_util_ratio = space_stat->space_utilization_ratio;
+  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
+    time_this_collection_correction = 0;
+  }else{
+    time_this_collection_correction 
+      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
+  }
+#endif
+
+  unsigned int i;
+  for(i = 0; i < cc_scheduler->num_window_slots; i++){
+    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
+    sum_trace_rate += cc_scheduler->trace_rate_window[i];
+    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
+  }
+
+  TRACE2("gc.con.cs","Allocation Rate: ");
+  for(i = 0; i < cc_scheduler->num_window_slots; i++){
+    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
+  }
+
+  TRACE2("gc.con.cs","Tracing Rate: ");
+  for(i = 0; i < cc_scheduler->num_window_slots; i++){
+    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
+  }
+
+  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
+  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
+  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
+
+  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
+
+  if(average_alloc_rate == 0 ){
+    time_delay_to_start_mark = MIN_DELAY_TIME;
+    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
+  }else if(average_trace_rate == 0){
+    time_delay_to_start_mark = MAX_DELAY_TIME;
+    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
+  }else{
+    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
+    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
+    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
+
+    if(time_alloc_expected > time_trace_expected){
+      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
+        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
+        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
+      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
+        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
+      }
+    }else{
+      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
+    }
+
+    cc_scheduler->space_threshold_to_start_mark = 
+      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
+
+    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
+    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
+  }
+  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
+  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
+
+}
+
+void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
+{
+  assert(gc_is_specify_con_gc());
+  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
+  
+  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
+  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
+
+  return;
+}
+
+Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
+{
+  if(!try_lock(gc->lock_collect_sched)) return FALSE;
+  vm_gc_lock_enum();    
+
+  gc_try_finish_con_phase(gc);
+
+  if(gc_need_start_con_enum(gc)){
+    /*TODO:Concurrent rootset enumeration.*/
+    assert(0);
+  }
+  
+  if(gc_need_start_con_mark(gc)){
+    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
+    gc_start_con_mark(gc);
+    vm_gc_unlock_enum();
+    unlock(gc->lock_collect_sched);
+    return TRUE;
+  }
+
+  if(gc_need_start_con_sweep(gc)){
+    gc->num_collections++;
+    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
+    gc_start_con_sweep(gc);
+    vm_gc_unlock_enum();
+    unlock(gc->lock_collect_sched);
+    return TRUE;
+  }
+
+  if(gc_need_reset_after_con_collect(gc)){
+    int64 pause_start = time_now();
+    int disable_count = vm_suspend_all_threads();
+    gc_reset_after_con_collect(gc);
+    gc_start_mutator_time_measure(gc);
+    set_collection_end_time();
+    vm_resume_all_threads(disable_count);
+    vm_gc_unlock_enum();
+    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
+    unlock(gc->lock_collect_sched);
+    return TRUE;
+  }
+  vm_gc_unlock_enum();
+  unlock(gc->lock_collect_sched);
+  return FALSE;
+}
+
+extern unsigned int NUM_MARKERS;
+
+unsigned int gc_decide_marker_number(GC* gc)
+{
+  unsigned int num_active_marker;
+  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;   
+
+  /*If the number of markers is specfied, just return the specified value.*/
+  if(NUM_MARKERS != 0) return NUM_MARKERS;
+
+  /*If the number of markers isn't specified, we decide the value dynamically.*/
+  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
+    /*Start trial cycle, collection set to 1 in trial cycle and */
+    num_active_marker = 1;
+  }else{
+    num_active_marker = cc_scheduler->last_marker_num;
+    int64 c_time = cc_scheduler->last_collector_time;
+    int64 m_time = cc_scheduler->last_mutator_time;
+    int64 d_time = cc_scheduler->time_delay_to_start_mark;
+
+    if(num_active_marker == 0) num_active_marker = 1;
+
+    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){      
+      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
+      num_active_marker ++;
+      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
+    }else if((float)d_time > (m_time * 0.6)){
+      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
+      num_active_marker --;
+      if(num_active_marker == 0)  num_active_marker = 1;
+    }
+    
+    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
+    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
+  }
+
+  cc_scheduler->last_marker_num = num_active_marker;
+  return num_active_marker;
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=650422&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Apr 22 01:52:29 2008
@@ -0,0 +1,55 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef __CONCURRENT_COLLECTION_SCHEDULER_H_
+#define __CONCURRENT_COLLECTION_SCHEDULER_H_
+
+#define STAT_SAMPLE_WINDOW_SIZE 5
+
+typedef struct Con_Collection_Scheduler {
+  /*common field*/
+  GC* gc;
+  
+  /*concurrent scheduler */
+  int64 time_delay_to_start_mark;
+  POINTER_SIZE_INT space_threshold_to_start_mark;
+  
+  int64 last_mutator_time;
+  int64 last_collector_time;
+
+  unsigned int last_marker_num;
+
+  unsigned int num_window_slots;
+  unsigned int last_window_index;
+  
+  float alloc_rate_window[STAT_SAMPLE_WINDOW_SIZE];
+  float trace_rate_window[STAT_SAMPLE_WINDOW_SIZE];
+  float space_utilization_ratio[STAT_SAMPLE_WINDOW_SIZE];
+  POINTER_SIZE_INT trace_load_window[STAT_SAMPLE_WINDOW_SIZE];
+  POINTER_SIZE_INT alloc_load_window[STAT_SAMPLE_WINDOW_SIZE];
+} Con_Collection_Scheduler;
+
+void con_collection_scheduler_initialize(GC* gc);
+void con_collection_scheduler_destruct(GC* gc);
+
+Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
+void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
+
+void gc_decide_cc_scheduler_kind(char* cc_scheduler);
+void gc_set_default_cc_scheduler_kind();
+#endif
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Apr 22 01:52:29 2008
@@ -36,6 +36,7 @@
 unsigned int Cur_Forward_Bit = 0x2;
 
 unsigned int SPACE_ALLOC_UNIT;
+Boolean IGNORE_FORCE_GC = FALSE;
 
 void gc_assign_free_area_to_mutators(GC* gc)
 {
@@ -92,143 +93,141 @@
 
 }
 
+void gc_update_space_stat(GC_MS* gc)
+{
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+    gc_ms_update_space_stat((GC_MS*)gc);
+#endif
+}
+
+void gc_reset_space_stat(GC_MS* gc)
+{
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+    gc_ms_reset_space_stat((GC_MS*)gc);
+#endif
+}
+
 void gc_prepare_rootset(GC* gc)
 {
   /* Stop the threads and collect the roots. */
-  lock(gc->enumerate_rootset_lock);
   INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n");
   gc_clear_rootset(gc);
   gc_reset_rootset(gc);
   vm_enumerate_root_set_all_threads();
   gc_copy_interior_pointer_table_to_rootset();
   gc_set_rootset(gc);
-  unlock(gc->enumerate_rootset_lock);
+}
+
+void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
+{
+  if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
+
+  /* Clear rootset pools here rather than in each collection algorithm */
+  gc_clear_rootset(gc);
+  
+  if(!gc_is_specify_con_gc()) gc_metadata_verify(gc, FALSE);
+  
+  if(!IGNORE_FINREF ){
+    INFO2("gc.process", "GC: finref process after collection ...\n");
+    gc_put_finref_to_vm(gc);
+    gc_reset_finref_metadata(gc);
+    gc_activate_finref_threads((GC*)gc);
+#ifndef BUILD_IN_REFERENT
+  } else {
+    gc_clear_weakref_pools(gc);
+    gc_clear_finref_repset_pool(gc);
+#endif
+  }
+
+  gc_update_space_stat((GC_MS*)gc);
+  
+  gc_update_collection_scheduler(gc, time_mutator, time_collection);
+
+  gc_reset_space_stat((GC_MS*)gc);
+
+  gc_reset_collector_state(gc);
+
+  gc_clear_dirty_set(gc);
+    
+  vm_reclaim_native_objs();
+  gc->in_collection = FALSE;
 
 }
 
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
-{
+{  
   INFO2("gc.process", "\nGC: GC start ...\n");
-  
+
   collection_start_time = time_now();
-  int64 mutator_time = collection_start_time - collection_end_time;
+  int64 time_mutator = collection_start_time - collection_end_time;
 
-  /* FIXME:: before mutators suspended, the ops below should be very careful
-     to avoid racing with mutators. */
   gc->num_collections++;
   gc->cause = gc_cause;
 
+  if(gc_is_specify_con_gc()){
+    gc_finish_con_GC(gc, time_mutator);    
+    collection_end_time = time_now();
+    INFO2("gc.process", "GC: GC end\n");
+    return;
+  }
+
+  /* FIXME:: before mutators suspended, the ops below should be very careful
+     to avoid racing with mutators. */
+
   gc_decide_collection_kind(gc, gc_cause);
 
 #ifdef MARK_BIT_FLIPPING
   if(collect_is_minor()) mark_bit_flip();
 #endif
 
-  if(!USE_CONCURRENT_GC){
-    gc_metadata_verify(gc, TRUE);
+  gc_metadata_verify(gc, TRUE);
 #ifndef BUILD_IN_REFERENT
-    gc_finref_metadata_verify((GC*)gc, TRUE);
+  gc_finref_metadata_verify((GC*)gc, TRUE);
 #endif
-  }
-  int disable_count = hythread_reset_suspend_disable();
+
   /* Stop the threads and collect the roots. */
+  lock(gc->lock_enum);
+  int disable_count = hythread_reset_suspend_disable();
+  gc_set_rootset_type(ROOTSET_IS_REF);
   gc_prepare_rootset(gc);
+  unlock(gc->lock_enum);
+    
+  gc->in_collection = TRUE;
   
-  if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){
-    if(gc_is_concurrent_sweep_phase())
-      gc_finish_concurrent_sweep(gc);
-  }else{
-    if(USE_CONCURRENT_GC && gc_is_concurrent_mark_phase()){
-      gc_finish_concurrent_mark(gc, TRUE);
-    }  
+  /* this has to be done after all mutators are suspended */
+  gc_reset_mutator_context(gc);
   
-    gc->in_collection = TRUE;
-    
-    /* this has to be done after all mutators are suspended */
-    gc_reset_mutator_context(gc);
-    
-    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
+  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
 
 #if defined(USE_UNIQUE_MARK_SWEEP_GC)
-    gc_ms_reclaim_heap((GC_MS*)gc);
+  gc_ms_reclaim_heap((GC_MS*)gc);
 #elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
-    gc_mc_reclaim_heap((GC_MC*)gc);
+  gc_mc_reclaim_heap((GC_MC*)gc);
 #else
-    gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
+  gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
 #endif
 
-  }
-
   collection_end_time = time_now(); 
 
-#if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
-  gc_gen_collection_verbose_info((GC_Gen*)gc, collection_end_time - collection_start_time, mutator_time);
-  gc_gen_space_verbose_info((GC_Gen*)gc);
-#endif
-
-  if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
-  
-  int64 collection_time = 0;
-  if(USE_CONCURRENT_GC && gc_mark_is_concurrent()){
-    collection_time = gc_get_concurrent_mark_time(gc);
-    gc_reset_concurrent_mark(gc);
-  }else{
-    collection_time = time_now()-collection_start_time;
-   }
-
-  if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){
-    gc_reset_concurrent_sweep(gc);
-  }
+  int64 time_collection = collection_end_time - collection_start_time;
 
 #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
-  if(USE_CONCURRENT_GC && gc_need_start_concurrent_mark(gc))
-    gc_start_concurrent_mark(gc);
-#endif
-
-  /* Clear rootset pools here rather than in each collection algorithm */
-  gc_clear_rootset(gc);
-  
-  gc_metadata_verify(gc, FALSE);
-  
-  if(!IGNORE_FINREF ){
-    INFO2("gc.process", "GC: finref process after collection ...\n");
-    gc_put_finref_to_vm(gc);
-    gc_reset_finref_metadata(gc);
-    gc_activate_finref_threads((GC*)gc);
-#ifndef BUILD_IN_REFERENT
-  } else {
-    gc_clear_weakref_pools(gc);
-    gc_clear_finref_repset_pool(gc);
+  gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
+  gc_gen_space_verbose_info((GC_Gen*)gc);
 #endif
-  }
 
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-  gc_ms_update_space_statistics((GC_MS*)gc);
-#endif
+  gc_reset_after_collection(gc, time_mutator, time_collection);
 
   gc_assign_free_area_to_mutators(gc);
   
-  if(USE_CONCURRENT_GC) gc_update_collection_scheduler(gc, mutator_time, collection_time);
-
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-  gc_ms_reset_space_statistics((GC_MS*)gc);
-#endif
-
-  vm_reclaim_native_objs();
-  gc->in_collection = FALSE;
-
-  gc_reset_collector_state(gc);
-
-  gc_clear_dirty_set(gc);
-  
   vm_resume_threads_after();
   assert(hythread_is_suspend_enabled());
   hythread_set_suspend_disable(disable_count);
   INFO2("gc.process", "GC: GC end\n");
-  int64 pause_time = time_now()-collection_start_time;
-  INFO2("gc.con","pause time:  "<<((unsigned int)(pause_time>>10))<<"  ms \n");
   return;
 }
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Apr 22 01:52:29 2008
@@ -427,10 +427,10 @@
   unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
   Collection_Scheduler* collection_scheduler;
 
-  SpinLock concurrent_mark_lock;
-  SpinLock enumerate_rootset_lock;
-  SpinLock concurrent_sweep_lock;
-  SpinLock collection_scheduler_lock;
+  SpinLock lock_con_mark;
+  SpinLock lock_enum;
+  SpinLock lock_con_sweep;
+  SpinLock lock_collect_sched;
   
   /* system info */
   unsigned int _system_alloc_unit;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Apr 22 01:52:29 2008
@@ -27,160 +27,105 @@
 #include "gc_concurrent.h"
 #include "../common/gc_for_barrier.h"
 
-Boolean USE_CONCURRENT_GC           = FALSE;
-Boolean USE_CONCURRENT_ENUMERATION  = FALSE;
-Boolean USE_CONCURRENT_MARK         = FALSE;
-Boolean USE_CONCURRENT_SWEEP        = FALSE;
-
-volatile Boolean concurrent_mark_phase  = FALSE;
+volatile Boolean concurrent_in_marking  = FALSE;
+volatile Boolean concurrent_in_sweeping = FALSE;
 volatile Boolean mark_is_concurrent     = FALSE;
-volatile Boolean concurrent_sweep_phase = FALSE;
 volatile Boolean sweep_is_concurrent    = FALSE;
 
-volatile Boolean gc_sweeping_global_normal_chunk = FALSE;
-
-unsigned int CONCURRENT_ALGO = 0; 
+volatile Boolean gc_sweep_global_normal_chunk = FALSE;
 
-static void gc_check_concurrent_mark(GC* gc)
+static void gc_check_con_mark(GC* gc)
 {
   if(!is_mark_finished(gc)){
-    lock(gc->concurrent_mark_lock);
-#ifndef USE_UNIQUE_MARK_SWEEP_GC
-    gc_gen_start_concurrent_mark((GC_Gen*)gc);
-#else
-    if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){
-      gc_ms_start_concurrent_mark((GC_MS*)gc, MIN_NUM_MARKERS);
-    }else if(gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){  
-      gc_ms_start_concurrent_mark((GC_MS*)gc, MIN_NUM_MARKERS);
-    }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){
-      //ignore.       
+    lock(gc->lock_con_mark);
+    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
+      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
+    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
+      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
+    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
+      //ignore.   
     }
-#endif  
-    unlock(gc->concurrent_mark_lock);
+    unlock(gc->lock_con_mark);
   }
 }
 
-void gc_check_mutator_barrier(GC* gc){
-  lock(gc->mutator_list_lock);    
-
-  Mutator *mutator = gc->mutator_list;
-  while(mutator){
-    wait_mutator_signal(mutator, MUTATOR_ENTER_BARRIER);
-    mutator = mutator->next;
-  }
-
-  unlock(gc->mutator_list_lock);
-}
-
-static void gc_wait_concurrent_mark_finish(GC* gc)
+static void gc_wait_con_mark_finish(GC* gc)
 {
   wait_mark_finish(gc);
-  gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
-  //mem_fence(); we do not need memory fence here.
-  gc_check_mutator_barrier(gc);
-  gc_set_concurrent_status(gc,GC_CONCURRENT_STATUS_NIL);
+  gc_set_barrier_function(WB_REM_NIL);
+  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
 }
 
-void gc_start_concurrent_mark(GC* gc)
+unsigned int gc_decide_marker_number(GC* gc);
+
+void gc_start_con_mark(GC* gc)
 {
   int disable_count;
   unsigned int num_marker;
   
-  if(!try_lock(gc->concurrent_mark_lock) || gc_mark_is_concurrent()) return;
+  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
     
-  /*prepare rootset*/
-  if(TRUE){
-    lock(gc->enumerate_rootset_lock);
-    gc_metadata_verify(gc, TRUE);
-    gc_reset_rootset(gc);
-    disable_count = hythread_reset_suspend_disable();
-    vm_enumerate_root_set_all_threads();
-    gc_copy_interior_pointer_table_to_rootset();
-    gc_set_rootset(gc); 
-  }else{
-    gc_clear_remset((GC*)gc); 
-    if(!IGNORE_FINREF){
-      gc_copy_finaliable_obj_to_rootset(gc);
-    }
-    gc->root_set = NULL;
-  }
-  gc_set_concurrent_status(gc, GC_CONCURRENT_MARK_PHASE);
-
-#ifndef USE_UNIQUE_MARK_SWEEP_GC
-  gc_decide_collection_kind((GC*)gc, GC_CAUSE_NIL);
-#endif
+  lock(gc->lock_enum);
+  disable_count = hythread_reset_suspend_disable();
+  int64 pause_start = time_now();
+  gc_set_rootset_type(ROOTSET_IS_OBJ);  
+  gc_prepare_rootset(gc);
+  
+  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
 
   num_marker = gc_decide_marker_number(gc);
   
   /*start concurrent mark*/
-#ifndef USE_UNIQUE_MARK_SWEEP_GC
-  gc_gen_start_concurrent_mark((GC_Gen*)gc);
-#else
-  if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){
-    gc_set_barrier_function(WRITE_BARRIER_REM_OBJ_SNAPSHOT);
-    gc_check_mutator_barrier(gc);
-    gc_ms_start_concurrent_mark((GC_MS*)gc, num_marker);
-  }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){
-    gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_OBJ);
-    gc_check_mutator_barrier(gc);
-    gc_ms_start_most_concurrent_mark((GC_MS*)gc, num_marker);
-  }else if(gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){  
-    gc_set_barrier_function(WRITE_BARRIER_REM_OLD_VAR);
-    gc_check_mutator_barrier(gc);
-    gc_ms_start_concurrent_mark((GC_MS*)gc, num_marker);
+  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
+    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
+    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
+  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
+    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
+    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
+  }else if(gc_is_kind(ALGO_CON_OTF_REF)){  
+    gc_set_barrier_function(WB_REM_OLD_VAR);
+    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
   }
-#endif
 
-  if(TRUE){ 
-    unlock(gc->enumerate_rootset_lock);
-    vm_resume_threads_after();    
-    assert(hythread_is_suspend_enabled());
-    hythread_set_suspend_disable(disable_count);
-  }
+  unlock(gc->lock_enum);      
+  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
+  vm_resume_threads_after();    
+  assert(hythread_is_suspend_enabled());
+  hythread_set_suspend_disable(disable_count);
 
-  unlock(gc->concurrent_mark_lock);
+  unlock(gc->lock_con_mark);
 }
 
-void wspace_mark_scan_mostly_concurrent_reset();
-void wspace_mark_scan_mostly_concurrent_terminate();
+void mostly_con_mark_terminate_reset();
+void terminate_mostly_con_mark();
 
-void gc_finish_concurrent_mark(GC* gc, Boolean is_STW)
+void gc_finish_con_mark(GC* gc, Boolean need_STW)
 {
-  gc_check_concurrent_mark(gc);
+  gc_check_con_mark(gc);
   
-  if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO))
-    wspace_mark_scan_mostly_concurrent_terminate();
+  if(gc_is_kind(ALGO_CON_MOSTLY))
+    terminate_mostly_con_mark();
   
-  gc_wait_concurrent_mark_finish(gc);
+  gc_wait_con_mark_finish(gc);
 
   int disable_count;   
-  if(!is_STW){
+  if(need_STW){
     /*suspend the mutators.*/   
-    lock(gc->enumerate_rootset_lock);    
-    gc_clear_rootset(gc);
-    gc_metadata_verify(gc, TRUE);
-    gc_reset_rootset(gc);    
-    disable_count = hythread_reset_suspend_disable();
-    vm_enumerate_root_set_all_threads();
-    gc_copy_interior_pointer_table_to_rootset();
-    gc_set_rootset(gc); 
+    lock(gc->lock_enum);
+    if(gc_is_kind(ALGO_CON_MOSTLY)){
+      /*In mostly concurrent algorithm, there's a final marking pause. 
+            Prepare root set for final marking.*/
+      disable_count = hythread_reset_suspend_disable();      
+      gc_set_rootset_type(ROOTSET_IS_OBJ);
+      gc_prepare_rootset(gc);
+    }else{
+      disable_count = vm_suspend_all_threads();
+    }
   }
 
-  
-  if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){
-    /*If gc use mostly concurrent algorithm, there's a final marking pause. 
+  if(gc_is_kind(ALGO_CON_MOSTLY)){
+    /*In mostly concurrent algorithm, there's a final marking pause. 
           Suspend the mutators once again and finish the marking phase.*/
-//    int disable_count;   
-//    if(!is_STW){
-//      /*suspend the mutators.*/   
-//      lock(gc->enumerate_rootset_lock);
-//      gc_metadata_verify(gc, TRUE);
-//      gc_reset_rootset(gc);    
-//      disable_count = hythread_reset_suspend_disable();
-//      vm_enumerate_root_set_all_threads();
-//      gc_copy_interior_pointer_table_to_rootset();
-//      gc_set_rootset(gc); 
-//    }
 
     /*prepare dirty object*/
     gc_prepare_dirty_set(gc);
@@ -188,40 +133,34 @@
     gc_set_weakref_sets(gc);
         
     /*start STW mark*/
-#ifndef USE_UNIQUE_MARK_SWEEP_GC
-    assert(0);
-#else
-    gc_ms_start_final_mark_after_concurrent((GC_MS*)gc, MIN_NUM_MARKERS);
-#endif
+    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
     
-    wspace_mark_scan_mostly_concurrent_reset();
+    mostly_con_mark_terminate_reset();
     gc_clear_dirty_set(gc);
-//    if(!is_STW){
-//      unlock(gc->enumerate_rootset_lock);
-//      vm_resume_threads_after();    
-//      assert(hythread_is_suspend_enabled());
-//      hythread_set_suspend_disable(disable_count);
-//    }
   }
 
+  gc_reset_dirty_set(gc);
   
-  if(!is_STW){    
-    unlock(gc->enumerate_rootset_lock);
-    vm_resume_threads_after();    
-    assert(hythread_is_suspend_enabled());
-    hythread_set_suspend_disable(disable_count);
+  if(need_STW){
+    unlock(gc->lock_enum);
+    if(gc_is_kind(ALGO_CON_MOSTLY)){
+      vm_resume_threads_after();    
+      assert(hythread_is_suspend_enabled());
+      hythread_set_suspend_disable(disable_count);
+    }else{
+      vm_resume_all_threads(disable_count);
+    }
   }
   
-  gc_reset_dirty_set(gc);
 }
 
-void gc_reset_concurrent_mark(GC* gc)
+void gc_reset_con_mark(GC* gc)
 {
   gc->num_active_markers = 0;
   gc_mark_unset_concurrent();
 }
 
-int64 gc_get_concurrent_mark_time(GC* gc)
+int64 gc_get_con_mark_time(GC* gc)
 {
   int64 time_mark = 0;
   Marker** markers = gc->markers;
@@ -236,10 +175,9 @@
   return time_mark;
 }
 
-void gc_start_concurrent_sweep(GC* gc)
+void gc_start_con_sweep(GC* gc)
 {
-
-  if(!try_lock(gc->concurrent_sweep_lock) || gc_sweep_is_concurrent()) return;
+  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
 
   /*FIXME: enable finref*/
   if(!IGNORE_FINREF ){ 
@@ -253,7 +191,7 @@
 #endif
   }
 
-  gc_set_concurrent_status(gc, GC_CONCURRENT_SWEEP_PHASE);
+  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
 
   gc_set_weakref_sets(gc);
 
@@ -262,120 +200,142 @@
   gc_identify_dead_weak_roots(gc);
   
   /*start concurrent mark*/
-#ifndef USE_UNIQUE_MARK_SWEEP_GC
-  assert(0);
-#else
-  gc_ms_start_concurrent_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
-#endif
+  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
 
-  unlock(gc->concurrent_sweep_lock);
+  unlock(gc->lock_con_sweep);
 }
 
-void gc_reset_concurrent_sweep(GC* gc)
+void gc_reset_con_sweep(GC* gc)
 {
   gc->num_active_collectors = 0;
   gc_sweep_unset_concurrent();
 }
 
-void gc_wait_concurrent_sweep_finish(GC* gc)
+void gc_wait_con_sweep_finish(GC* gc)
 {
   wait_collection_finish(gc);  
-  gc_set_concurrent_status(gc,GC_CONCURRENT_STATUS_NIL);
+  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
 }
 
-void gc_finish_concurrent_sweep(GC * gc)
+void gc_finish_con_sweep(GC * gc)
 {
-  gc_wait_concurrent_sweep_finish(gc);
+  gc_wait_con_sweep_finish(gc);
 }
 
-void gc_check_concurrent_phase(GC * gc)
+void gc_try_finish_con_phase(GC * gc)
 {
   /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
-  if(gc_is_concurrent_mark_phase(gc) && is_mark_finished(gc) && USE_CONCURRENT_SWEEP){
+  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
     /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
           Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
           here to guarantee this occasional case.*/
-    if(try_lock(gc->concurrent_mark_lock)){
-      unlock(gc->concurrent_mark_lock);
-      gc_finish_concurrent_mark(gc, FALSE);
+    if(try_lock(gc->lock_con_mark)){
+      unlock(gc->lock_con_mark);
+      gc_finish_con_mark(gc, TRUE);
     }
   }
 
-  if(gc_is_concurrent_sweep_phase(gc) && is_collector_finished(gc)){
+  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
     //The reason is same as concurrent mark above.
-    if(try_lock(gc->concurrent_sweep_lock)){
-      unlock(gc->concurrent_sweep_lock);
-      gc_finish_concurrent_sweep(gc);
+    if(try_lock(gc->lock_con_sweep)){
+      unlock(gc->lock_con_sweep);
+      gc_finish_con_sweep(gc);
     }
   }
 }
 
+void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
 
-void gc_reset_after_concurrent_collection(GC* gc)
+void gc_reset_after_con_collect(GC* gc)
 {
+  assert(gc_is_specify_con_gc());
+  
+  int64 time_mutator = gc_get_mutator_time(gc);
+  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
 
-  int64 mutator_time = gc_get_mutator_time(gc);
-  int64 collection_time = gc_get_collector_time(gc) + gc_get_marker_time(gc);
-
-  /*FIXME: enable concurrent GEN mode.*/
   gc_reset_interior_pointer_table();
-  if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
+
+  gc_reset_after_collection(gc, time_mutator, time_collection);
   
-  /* Clear rootset pools here rather than in each collection algorithm */
-  gc_clear_rootset(gc);
-    
-  if(!IGNORE_FINREF ){
-    INFO2("gc.process", "GC: finref process after collection ...\n");
-    gc_put_finref_to_vm(gc);
-    gc_reset_finref_metadata(gc);
-    gc_activate_finref_threads((GC*)gc);
-#ifndef BUILD_IN_REFERENT
-  } else {
-    gc_clear_weakref_pools(gc);    
-    gc_clear_finref_repset_pool(gc);
-#endif
+  if(gc_mark_is_concurrent()){
+    gc_reset_con_mark(gc);    
   }
 
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-  gc_ms_update_space_statistics((GC_MS*)gc);
-#endif
+  if(gc_sweep_is_concurrent()){
+    gc_reset_con_sweep(gc);
+  }
+}
 
-  gc_reset_collector_state(gc);
+void gc_finish_con_GC(GC* gc, int64 time_mutator)
+{
+  int64 time_collection_start = time_now();
+  
+  gc->num_collections++;
 
-  gc_clear_dirty_set(gc);
+  lock(gc->lock_enum);
 
-  vm_reclaim_native_objs();
-  gc->in_collection = FALSE;
+  int disable_count = hythread_reset_suspend_disable();  
+  gc_set_rootset_type(ROOTSET_IS_REF);
+  gc_prepare_rootset(gc);
+  unlock(gc->lock_enum);
   
-  if(USE_CONCURRENT_GC && gc_mark_is_concurrent()){
-    gc_reset_concurrent_mark(gc);    
+  if(gc_sweep_is_concurrent()){
+    if(gc_con_is_in_sweeping())
+      gc_finish_con_sweep(gc);
+  }else{
+    if(gc_con_is_in_marking()){
+      gc_finish_con_mark(gc, FALSE);
+    }
+    gc->in_collection = TRUE;
+    gc_reset_mutator_context(gc);
+    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
+    gc_ms_reclaim_heap((GC_MS*)gc);
+  }
+  
+  int64 time_collection = 0;
+  if(gc_mark_is_concurrent()){
+    time_collection = gc_get_con_mark_time(gc);
+    gc_reset_con_mark(gc);
+  }else{
+    time_collection = time_now()-time_collection_start;
   }
 
-  if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){
-    gc_reset_concurrent_sweep(gc);
+  if(gc_sweep_is_concurrent()){
+    gc_reset_con_sweep(gc);
   }
+
+  gc_reset_after_collection(gc, time_mutator, time_collection);
   
-  gc_update_collection_scheduler(gc, mutator_time, collection_time);
+  gc_start_mutator_time_measure(gc);
+  
+  vm_resume_threads_after();
+  assert(hythread_is_suspend_enabled());
+  hythread_set_suspend_disable(disable_count);  
+  int64 pause_time = time_now()-time_collection_start;
 
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-  gc_ms_reset_space_statistics((GC_MS*)gc);
-#endif
+  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
+    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
+  }else{
+    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
+  }
+  return;
 }
 
-void gc_decide_concurrent_algorithm(char* concurrent_algo)
+void gc_set_default_con_algo()
 {
-  if(!concurrent_algo){
-    CONCURRENT_ALGO = OTF_REM_OBJ_SNAPSHOT_ALGO;
-  }else{
-    string_to_upper(concurrent_algo);
-     
-    if(!strcmp(concurrent_algo, "OTF_OBJ")){  
-      CONCURRENT_ALGO = OTF_REM_OBJ_SNAPSHOT_ALGO;
-      
-    }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
-      CONCURRENT_ALGO = MOSTLY_CONCURRENT_ALGO;  
-    }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
-      CONCURRENT_ALGO = OTF_REM_NEW_TARGET_ALGO;  
-    }
+  assert((GC_PROP & ALGO_CON_MASK) == 0);
+  GC_PROP |= ALGO_CON_OTF_OBJ;
+}
+
+void gc_decide_con_algo(char* concurrent_algo)
+{
+  string_to_upper(concurrent_algo);
+  GC_PROP &= ~ALGO_CON_MASK;
+  if(!strcmp(concurrent_algo, "OTF_OBJ")){ 
+    GC_PROP |= ALGO_CON_OTF_OBJ;
+  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
+    GC_PROP |= ALGO_CON_MOSTLY;
+  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
+    GC_PROP |= ALGO_CON_OTF_REF;
   }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Apr 22 01:52:29 2008
@@ -20,51 +20,48 @@
 #include "gc_common.h"
 
 enum GC_CONCURRENT_STATUS{
-  GC_CONCURRENT_STATUS_NIL = 0x00,
-  GC_CONCURRENT_MARK_PHASE = 0x01,  
-  GC_CONCURRENT_MARK_FINAL_PAUSE_PHASE = 0x11, // for mostly concurrent only.
-  GC_CONCURRENT_SWEEP_PHASE = 0x02
+  GC_CON_STATUS_NIL = 0x00,
+  GC_CON_MARK_PHASE = 0x01,  
+  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
+  GC_CON_SWEEP_PHASE = 0x02
 };
 
 enum HANDSHAKE_SINGAL{
-  HANDSHAKE_NIL = 0x00,
-    
-  /*mutator to collector*/
-  ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS = 0x01,
-  DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS = 0x02,
+  HSIG_MUTATOR_SAFE = 0x0,
 
-  
-  ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS = 0x03,
-  DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS = 0x04,
+  HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
+  HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
+  HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
+};
 
-  MUTATOR_ENTER_BARRIER = 0x05,
-  MUTATOR_EXIT_BARRIER = 0x06,
+inline void gc_set_con_gc(unsigned int con_phase)
+{ GC_PROP |= con_phase;  }
 
-  MUTATOR_ENTER_ALLOCATION_MARK = 0x07,
-  MUTATOR_EXIT_ALLOCATION_MARK = 0x08
-};
+inline void gc_specify_con_enum()
+{ gc_set_con_gc(ALGO_CON_ENUM); }
 
-extern Boolean USE_CONCURRENT_GC;
-extern Boolean USE_CONCURRENT_ENUMERATION;
-extern Boolean USE_CONCURRENT_MARK;
-extern Boolean USE_CONCURRENT_SWEEP;
+inline void gc_specify_con_mark()
+{ gc_set_con_gc(ALGO_CON_MARK);  }
 
-extern volatile Boolean concurrent_mark_phase;
-extern volatile Boolean mark_is_concurrent;
-extern volatile Boolean concurrent_sweep_phase;
-extern volatile Boolean sweep_is_concurrent;
-extern unsigned int CONCURRENT_ALGO; 
+inline void gc_specify_con_sweep()
+{ gc_set_con_gc(ALGO_CON_SWEEP); }
 
-enum CONCURRENT_MARK_ALGORITHM{
-  OTF_REM_OBJ_SNAPSHOT_ALGO = 0x01,
-  OTF_REM_NEW_TARGET_ALGO = 0x02,
-  MOSTLY_CONCURRENT_ALGO = 0x03
-};
+inline Boolean gc_is_specify_con_gc()
+{ return (GC_PROP & ALGO_CON) != 0; }
 
-inline Boolean gc_concurrent_match_algorithm(unsigned int concurrent_algo)
-{
-  return CONCURRENT_ALGO == concurrent_algo;
-}
+inline Boolean gc_is_specify_con_enum()
+{ return (GC_PROP & ALGO_CON_ENUM) == ALGO_CON_ENUM;  }
+
+inline Boolean gc_is_specify_con_mark()
+{ return (GC_PROP & ALGO_CON_MARK) == ALGO_CON_MARK;  }
+
+inline Boolean gc_is_specify_con_sweep()
+{ return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
+
+extern volatile Boolean concurrent_in_marking;
+extern volatile Boolean concurrent_in_sweeping;
+extern volatile Boolean mark_is_concurrent;
+extern volatile Boolean sweep_is_concurrent;
 
 inline Boolean gc_mark_is_concurrent()
 {
@@ -73,8 +70,7 @@
 
 inline void gc_mark_set_concurrent()
 {
-  if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)
-      ||gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)) 
+  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF)) 
     gc_enable_alloc_obj_live();
   mark_is_concurrent = TRUE;
 }
@@ -85,14 +81,14 @@
   mark_is_concurrent = FALSE;
 }
 
-inline Boolean gc_is_concurrent_mark_phase()
+inline Boolean gc_con_is_in_marking()
 {
-  return concurrent_mark_phase;
+  return concurrent_in_marking;
 }
 
-inline Boolean gc_is_concurrent_mark_phase(GC* gc)
+inline Boolean gc_con_is_in_marking(GC* gc)
 {
-  return gc->gc_concurrent_status == GC_CONCURRENT_MARK_PHASE;
+  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
 }
 
 inline Boolean gc_sweep_is_concurrent()
@@ -110,62 +106,65 @@
   sweep_is_concurrent = FALSE;
 }
 
-inline Boolean gc_is_concurrent_sweep_phase()
+inline Boolean gc_con_is_in_sweeping()
 {
-  return concurrent_sweep_phase;
+  return concurrent_in_sweeping;
 }
 
-inline Boolean gc_is_concurrent_sweep_phase(GC* gc)
+inline Boolean gc_con_is_in_sweeping(GC* gc)
 {
-  return gc->gc_concurrent_status == GC_CONCURRENT_SWEEP_PHASE;
+  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
 }
 
 inline void gc_set_concurrent_status(GC*gc, unsigned int status)
 {
   /*Reset status*/
-  concurrent_mark_phase = FALSE;
-  concurrent_sweep_phase = FALSE;
+  concurrent_in_marking = FALSE;
+  concurrent_in_sweeping = FALSE;
 
   gc->gc_concurrent_status = status;
   switch(status){
-    case GC_CONCURRENT_MARK_PHASE: 
+    case GC_CON_MARK_PHASE: 
       gc_mark_set_concurrent();
-      concurrent_mark_phase = TRUE;
+      concurrent_in_marking = TRUE;
       break;
-    case GC_CONCURRENT_SWEEP_PHASE:
+    case GC_CON_SWEEP_PHASE:
       gc_sweep_set_concurrent();
-      concurrent_sweep_phase = TRUE;
+      concurrent_in_sweeping = TRUE;
       break;
     default:
-      assert(!concurrent_mark_phase && !concurrent_sweep_phase);
+      assert(!concurrent_in_marking && !concurrent_in_sweeping);
   }
   
   return;
 }
 
-void gc_reset_concurrent_mark(GC* gc);
-void gc_start_concurrent_mark(GC* gc);
-void gc_finish_concurrent_mark(GC* gc, Boolean is_STW);
-int64 gc_get_concurrent_mark_time(GC* gc);
+void gc_reset_con_mark(GC* gc);
+void gc_start_con_mark(GC* gc);
+void gc_finish_con_mark(GC* gc, Boolean need_STW);
+int64 gc_get_con_mark_time(GC* gc);
+
+void gc_start_con_sweep(GC* gc);
+void gc_finish_con_sweep(GC * gc);
 
-void gc_start_concurrent_sweep(GC* gc);
-void gc_finish_concurrent_sweep(GC * gc);
+void gc_reset_after_con_collect(GC* gc);
+void gc_try_finish_con_phase(GC * gc);
 
-void gc_reset_after_concurrent_collection(GC* gc);
-void gc_check_concurrent_phase(GC * gc);
+void gc_decide_con_algo(char* concurrent_algo);
+void gc_set_default_con_algo();
 
-void gc_decide_concurrent_algorithm(char* concurrent_algo);
+void gc_reset_con_sweep(GC* gc);
 
-void gc_reset_concurrent_sweep(GC* gc);
+void gc_finish_con_GC(GC* gc, int64 time_mutator);
 
-extern volatile Boolean gc_sweeping_global_normal_chunk;
+extern volatile Boolean gc_sweep_global_normal_chunk;
 
-inline Boolean gc_is_sweeping_global_normal_chunk()
-{ return gc_sweeping_global_normal_chunk; }
+inline Boolean gc_is_sweep_global_normal_chunk()
+{ return gc_sweep_global_normal_chunk; }
 
-inline void gc_set_sweeping_global_normal_chunk()
-{ gc_sweeping_global_normal_chunk = TRUE; }
+inline void gc_set_sweep_global_normal_chunk()
+{ gc_sweep_global_normal_chunk = TRUE; }
 
-inline void gc_unset_sweeping_global_normal_chunk()
-{ gc_sweeping_global_normal_chunk = FALSE; }
+inline void gc_unset_sweep_global_normal_chunk()
+{ gc_sweep_global_normal_chunk = FALSE; }
 #endif

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Apr 22 01:52:29 2008
@@ -229,12 +229,11 @@
 {
   /*Concurrent Mark: Since object clone and array copy do not modify object slots, 
       we treat it as an new object. It has already been marked when dest object was created.
-      We use WRITE_BARRIER_REM_SOURCE_OBJ function here to debug.
+      We use WB_REM_SOURCE_OBJ function here to debug.
     */  
-  Mutator *mutator = (Mutator *)gc_get_tls();  
-  mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER);
 
-  if(WRITE_BARRIER_REM_SOURCE_OBJ == write_barrier_function){
+  if(WB_REM_SOURCE_OBJ == write_barrier_function){    
+    Mutator *mutator = (Mutator *)gc_get_tls();  
     lock(mutator->dirty_set_lock);
     
     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
@@ -242,7 +241,6 @@
     
     unlock(mutator->dirty_set_lock);
   }
-  mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER);
 
   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written)) 
     return;
@@ -261,14 +259,11 @@
 /* FIXME:: this is not the right interface for write barrier */
 void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
 { 
-  //Mutator *mutator = (Mutator *)gc_get_tls();  
-  //mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER);
-
   switch(write_barrier_function){
-    case WRITE_BARRIER_REM_NIL:
+    case WB_REM_NIL:
       *p_slot = p_target;
       break;
-    case WRITE_BARRIER_REM_SOURCE_REF:
+    case WB_REM_SOURCE_REF:
       *p_slot = p_target;
 #ifdef USE_REM_SLOTS
       gen_write_barrier_rem_slot(p_slot, p_target); 
@@ -276,15 +271,15 @@
       gen_write_barrier_rem_obj(p_obj_holding_ref, p_target);
 #endif
       break;      
-    case WRITE_BARRIER_REM_SOURCE_OBJ:
+    case WB_REM_SOURCE_OBJ:
       *p_slot = p_target;
       write_barrier_rem_source_obj(p_obj_holding_ref);
       break;
-    case WRITE_BARRIER_REM_OBJ_SNAPSHOT:
+    case WB_REM_OBJ_SNAPSHOT:
       write_barrier_rem_obj_snapshot(p_obj_holding_ref);
       *p_slot = p_target;
       break;
-    case WRITE_BARRIER_REM_OLD_VAR:
+    case WB_REM_OLD_VAR:
       write_barrier_rem_slot_oldvar(p_slot);      
       *p_slot = p_target;
       break;
@@ -292,8 +287,6 @@
       assert(0);
       return;
   }
-
-  //mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER);
   return;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Apr 22 01:52:29 2008
@@ -27,12 +27,12 @@
 extern volatile unsigned int write_barrier_function;
 
 enum Write_Barrier_Function{
-  WRITE_BARRIER_REM_NIL           = 0x00,
-  WRITE_BARRIER_REM_SOURCE_OBJ    = 0x01,
-  WRITE_BARRIER_REM_SOURCE_REF    = 0x02,
-  WRITE_BARRIER_REM_OLD_VAR       = 0x03,
-  WRITE_BARRIER_REM_NEW_VAR       = 0x04,
-  WRITE_BARRIER_REM_OBJ_SNAPSHOT  = 0x05
+  WB_REM_NIL           = 0x00,
+  WB_REM_SOURCE_OBJ    = 0x01,
+  WB_REM_SOURCE_REF    = 0x02,
+  WB_REM_OLD_VAR       = 0x03,
+  WB_REM_NEW_VAR       = 0x04,
+  WB_REM_OBJ_SNAPSHOT  = 0x05
 };
 
 inline void gc_set_barrier_function(unsigned int wb_function)
@@ -41,4 +41,6 @@
 }
 
 #endif /* _GC_FOR_BARRIER_H_ */
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Apr 22 01:52:29 2008
@@ -159,7 +159,7 @@
     Class_Handle array_element_class = class_get_array_element_class(ch);
     gc_set_prop_array(gcvt);
     
-    gcvt->array_elem_size = class_get_array_element_size(ch);
+        gcvt->array_elem_size = class_get_array_element_size(ch);
     unsigned int the_offset = vector_first_element_offset_unboxed(array_element_class);
     gcvt->array_first_elem_offset = the_offset;
   
@@ -177,7 +177,7 @@
   WeakReferenceType type = class_is_reference(ch);
   gc_set_prop_reference(gcvt, type);
   
-  unsigned int size = class_get_object_size(ch);
+    unsigned int size = class_get_object_size(ch);
   gcvt->gc_allocated_size = size;
   
   gcvt->gc_class_name = class_get_name(ch);
@@ -197,6 +197,8 @@
 
   return;
 }  /* gc_class_prepared */
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Apr 22 01:52:29 2008
@@ -21,11 +21,11 @@
 
 #include <cxxlog.h>
 #include "open/vm_properties.h"
+#include "open/vm_properties.h"
 #include "port_sysinfo.h"
 #include "vm_threads.h"
 #include "jit_runtime_support.h"
 #include "compressed_ref.h"
-
 #include "../gen/gen.h"
 #include "../mark_sweep/gc_ms.h"
 #include "../move_compact/gc_mc.h"
@@ -111,8 +111,10 @@
 #ifndef BUILD_IN_REFERENT
   gc_finref_metadata_initialize(gc);
 #endif
-  if(USE_CONCURRENT_GC){
-    collection_scheduler_initialize(gc);
+
+  collection_scheduler_initialize(gc);
+
+  if(gc_is_specify_con_gc()){
     marker_initialize(gc);
   }
   
@@ -231,12 +233,15 @@
   gc_weak_rootset_add_entry(p_global_gc, p_ref, is_short_weak);
 }
 
+extern Boolean IGNORE_FORCE_GC;
+
 /* VM to force GC */
 void gc_force_gc() 
 {
   vm_gc_lock_enum();
-
-  gc_reclaim_heap(p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC);  
+  
+  if(!IGNORE_FORCE_GC)
+    gc_reclaim_heap(p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC);  
 
   vm_gc_unlock_enum();
 }
@@ -437,6 +442,7 @@
 {
   return address_belongs_to_gc_heap(p_obj, p_global_gc);  
 }
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Tue Apr 22 01:52:29 2008
@@ -33,6 +33,7 @@
 #define METADATA_BLOCK_SIZE_BYTES VECTOR_BLOCK_DATA_SIZE_BYTES
 
 GC_Metadata gc_metadata;
+unsigned int rootset_type;
 
 void gc_metadata_initialize(GC* gc)
 {
@@ -283,17 +284,6 @@
     mutator->rem_set = NULL;
     mutator = mutator->next;
   }
-
-  /* put back last remset block of each collector (saved in last collection) */  
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  for(unsigned int i=0; i<num_active_collectors; i++)
-  {
-    Collector* collector = gc->collectors[i];
-    /* 1. in the first time GC, rem_set is NULL. 2. it should be NULL when NOS is forwarding_all */
-    if(collector->rem_set == NULL) continue;
-    pool_put_entry(metadata->collector_remset_pool, collector->rem_set);
-    collector->rem_set = NULL;
-  }
   
   assert( collect_is_major_normal() || collect_is_minor());
   if( collect_is_major_normal() ){
@@ -608,10 +598,7 @@
       pool_put_entry(metadata->free_set_pool,dirty_set);
       dirty_set = pool_get_entry(global_dirty_set_pool);
     }
-  }
-
-
-  
+  }  
 }
 
 void gc_prepare_dirty_set(GC* gc)
@@ -651,6 +638,21 @@
 void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata)
 { pool_put_entry(metadata->free_set_pool, block); }
 
+
+void gc_reset_collectors_rem_set(GC *gc) 
+{  
+  /* put back last remset block of each collector (saved in last collection) */  
+  GC_Metadata* metadata = gc->metadata;
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  for(unsigned int i=0; i<num_active_collectors; i++)
+  {
+    Collector* collector = gc->collectors[i];
+    /* 1. in the first time GC, rem_set is NULL. 2. it should be NULL when NOS is forwarding_all */
+    if(collector->rem_set == NULL) continue;
+    pool_put_entry(metadata->collector_remset_pool, collector->rem_set);
+    collector->rem_set = NULL;
+  }
+}
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h Tue Apr 22 01:52:29 2008
@@ -72,7 +72,7 @@
 
 void gc_identify_dead_weak_roots(GC *gc);
 void gc_update_weak_roots(GC *gc, Boolean double_fix);
-
+void gc_reset_collectors_rem_set(GC *gc); 
 
 inline void  gc_task_pool_clear(Pool* task_pool)
 {
@@ -223,6 +223,19 @@
   assert(gc->weakroot_set);
 }
 
+extern unsigned int rootset_type;
+
+enum ROOTSET_TYPE{
+  ROOTSET_IS_OBJ = 0x01,
+  ROOTSET_IS_REF = 0x02
+};
+
+
+inline void gc_set_rootset_type(unsigned int rs_type)
+{
+  rootset_type = rs_type;
+}
+
 #ifdef COMPRESS_REFERENCE
 
 inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
@@ -239,7 +252,10 @@
   /* construct an Uncompressed_Root */
   vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_ref);
   assert(!vector_block_is_full(uncompressed_root_set));
-  vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)ref);
+  if(rootset_type == ROOTSET_IS_REF)
+    vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)ref);
+  else if(rootset_type == ROOTSET_IS_OBJ)
+    vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_obj);
   
   if(!vector_block_is_full(uncompressed_root_set)) return;
   
@@ -252,9 +268,15 @@
 inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
 {
   assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); 
+
+  Partial_Reveal_Object *p_obj = *p_ref;
   
-  Vector_Block* root_set = gc->root_set;  
-  vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref);
+  Vector_Block* root_set = gc->root_set;
+
+  if(rootset_type == ROOTSET_IS_REF)
+    vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref);
+  else if(rootset_type == ROOTSET_IS_OBJ)
+    vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_obj);
   
   if( !vector_block_is_full(root_set)) return;
     

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp Tue Apr 22 01:52:29 2008
@@ -17,6 +17,8 @@
 #define LOG_DOMAIN "gc.base"
 #include "gc_common.h"
 #include "open/vm_properties.h"
+#include "gc_concurrent.h"
+#include "concurrent_collection_scheduler.h"
 
 /* FIXME:: need refactoring this function to distribute the options 
    interpretation to their respective modules. */
@@ -44,12 +46,7 @@
 extern Boolean IGNORE_FINREF;
 
 extern Boolean JVMTI_HEAP_ITERATION ;
-
-extern Boolean USE_CONCURRENT_GC;
-extern Boolean USE_CONCURRENT_ENUMERATION;
-extern Boolean USE_CONCURRENT_MARK;
-extern Boolean USE_CONCURRENT_SWEEP;
-
+extern Boolean IGNORE_FORCE_GC;
 
 POINTER_SIZE_INT HEAP_SIZE_DEFAULT = 256 * MB;
 POINTER_SIZE_INT min_heap_size_bytes = 16 * MB;
@@ -125,7 +122,7 @@
     return vm_property_get_size(property_name, 0, VM_PROPERTIES);
 }
 
-void gc_decide_concurrent_algorithm(char* concurrent_algo);
+void gc_decide_con_algo(char* concurrent_algo);
 GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los);
 void gc_set_gen_mode(Boolean status);
 
@@ -149,7 +146,7 @@
     major_algo = vm_properties_get_value("gc.major_algorithm", VM_PROPERTIES);
   }
 
-  if (vm_property_is_set("gc.uniqe_algorithm", VM_PROPERTIES) == 1) {
+  if (vm_property_is_set("gc.unique_algorithm", VM_PROPERTIES) == 1) {
     unique_algo = vm_properties_get_value("gc.unique_algorithm", VM_PROPERTIES);
   }
 
@@ -308,46 +305,76 @@
   if (vm_property_is_set("gc.share_los_boundary", VM_PROPERTIES) == 1){
     share_los_boundary = vm_property_get_boolean("gc.share_los_boundary");
   }
+
+  if (vm_property_is_set("gc.ignore_force_gc", VM_PROPERTIES) == 1){
+    IGNORE_FORCE_GC = vm_property_get_boolean("gc.ignore_force_gc");
+  }
+  
   if (vm_property_is_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
     Boolean use_all_concurrent_phase= vm_property_get_boolean("gc.concurrent_gc");
     if(use_all_concurrent_phase){
-      USE_CONCURRENT_ENUMERATION = TRUE;
-      USE_CONCURRENT_MARK = TRUE;
-      USE_CONCURRENT_SWEEP = TRUE;
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
+      DIE(( "Please define USE_UNIQUE_MARK_SWEEP_GC macro."));
+#endif
+      gc_specify_con_enum();
+      gc_specify_con_mark();
+      gc_specify_con_sweep();
       gc->generate_barrier = TRUE;
     }
   }
 
   if (vm_property_is_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
-    USE_CONCURRENT_ENUMERATION= vm_property_get_boolean("gc.concurrent_enumeration");
+    Boolean USE_CONCURRENT_ENUMERATION = vm_property_get_boolean("gc.concurrent_enumeration");
     if(USE_CONCURRENT_ENUMERATION){
-      USE_CONCURRENT_GC = TRUE;      
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
+      DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro."));
+#endif
+      gc_specify_con_enum();
       gc->generate_barrier = TRUE;
     }
   }
 
   if (vm_property_is_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
-    USE_CONCURRENT_MARK= vm_property_get_boolean("gc.concurrent_mark");
+    Boolean USE_CONCURRENT_MARK = vm_property_get_boolean("gc.concurrent_mark");
     if(USE_CONCURRENT_MARK){
-      USE_CONCURRENT_GC = TRUE;      
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
+      DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro."));
+#endif
+      gc_specify_con_mark();
       gc->generate_barrier = TRUE;
+      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
     }
   }
 
   if (vm_property_is_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
-    USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep");
+    Boolean USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep");
     if(USE_CONCURRENT_SWEEP){
-      USE_CONCURRENT_GC = TRUE;
+      /*currently, concurrent sweeping only starts after concurrent marking.*/
+      assert(gc_is_specify_con_mark());
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
+      DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro."));
+#endif
+      gc_specify_con_sweep();
+      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
     }
   }
  
   char* concurrent_algo = NULL;
   
   if (vm_property_is_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
-    concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES);
+    concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES);    
+    gc_decide_con_algo(concurrent_algo);
+  }else if(gc_is_specify_con_gc()){
+    gc_set_default_con_algo();
+  }
+
+  char* cc_scheduler = NULL;
+  if (vm_property_is_set("gc.cc_scheduler", VM_PROPERTIES) == 1) {
+    cc_scheduler = vm_properties_get_value("gc.cc_scheduler", VM_PROPERTIES);    
+    gc_decide_cc_scheduler_kind(cc_scheduler);
+  }else if(gc_is_specify_con_gc()){
+    gc_set_default_cc_scheduler_kind();
   }
-  
-  gc_decide_concurrent_algorithm(concurrent_algo);
 
 #if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
   if(vm_property_is_set("gc.prefetch",VM_PROPERTIES) ==1) {
@@ -381,6 +408,8 @@
 
   return gc;
 }
+
+
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h Tue Apr 22 01:52:29 2008
@@ -146,6 +146,21 @@
   return (int)hythread_is_suspend_enabled();
 }
 
+inline int vm_suspend_all_threads()
+{
+  int disable_count = hythread_reset_suspend_disable();
+  hythread_suspend_all(NULL, NULL);
+  hythread_suspend_disable();
+  return disable_count;
+}
+
+inline void vm_resume_all_threads(int disable_count)
+{
+  hythread_suspend_enable();
+  hythread_resume_all(NULL);
+  hythread_set_suspend_disable(disable_count);
+}
+
 inline void *atomic_casptr(volatile void **mem, void *with, const void *cmp) 
 {  return apr_atomic_casptr(mem, with, cmp); }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h Tue Apr 22 01:52:29 2008
@@ -23,7 +23,8 @@
   GC_CAUSE_NOS_IS_FULL,
   GC_CAUSE_LOS_IS_FULL,
   GC_CAUSE_MOS_IS_FULL,
-  GC_CAUSE_RUNTIME_FORCE_GC
+  GC_CAUSE_RUNTIME_FORCE_GC,
+  GC_CAUSE_CONCURRENT_GC
 };
 
 extern unsigned int GC_PROP;
@@ -85,6 +86,10 @@
   ALGO_CON_SWEEP        = 0x5000000,  /* ALGO_CON|0x4000000 */
   ALGO_CON_ENUM         = 0x9000000,  /* ALGO_CON|0x8000000 */
 
+  ALGO_CON_OTF_OBJ      = 0x10000000,
+  ALGO_CON_OTF_REF      = 0x20000000,
+  ALGO_CON_MOSTLY       = 0x40000000,
+  ALGO_CON_MASK         = 0x70000000,
 };
 
 FORCE_INLINE Boolean gc_is_kind(unsigned int kind)
@@ -223,7 +228,7 @@
    root slots after collection in an extra phase. i.e., collect_mark_and_move */
 FORCE_INLINE Boolean collect_need_update_repset()
 {
-  return (gc_is_kind(ALGO_MAJOR) || gc_is_kind(ALGO_MS_COMPACT));
+  return (gc_is_kind(ALGO_MAJOR) || gc_is_kind(ALGO_MS_COMPACT) || !gc_has_nos());
 }
 
 #endif /* #ifndef _GC_PROPERTIES */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h Tue Apr 22 01:52:29 2008
@@ -177,6 +177,34 @@
   return;
 }
 
+inline void hashcode_buf_update(Partial_Reveal_Object* p_obj, int32 hashcode, Hashcode_Buf* hashcode_buf)
+{
+  POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj;
+  lock(hashcode_buf->lock);
+  Seq_List* list = hashcode_buf->list; 
+  seq_list_iterate_init(list);
+  while(seq_list_has_next(list)){
+    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list); 
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block);
+    
+    while(!vector_block_iterator_end(curr_block, iter)){  
+      POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter;
+      if(obj_addr != addr){
+        iter = vector_block_iterator_advance(curr_block, iter);
+        iter = vector_block_iterator_advance(curr_block, iter);
+      }else{
+        iter = vector_block_iterator_advance(curr_block, iter);
+        *iter = (POINTER_SIZE_INT)hashcode;
+        iter = vector_block_iterator_advance(curr_block, iter);
+        unlock(hashcode_buf->lock);
+        return;
+      }
+    }
+  }
+  unlock(hashcode_buf->lock);
+  hashcode_buf_add(p_obj, hashcode, hashcode_buf);
+}
+
 inline void hashcode_buf_refresh_all(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist)
 {
   Seq_List* list = hashcode_buf->list; 
@@ -288,6 +316,7 @@
 }
 
 int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj);
+int obj_lookup_hashcode_in_chunk_buf(Partial_Reveal_Object *p_obj);
 
 inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info)
 {
@@ -297,7 +326,11 @@
     unsigned char* pos = (unsigned char *)p_obj;
     hash = *(int*) (pos + offset);
   }else if(hashcode_is_buffered(p_obj)){
+#ifdef  USE_UNIQUE_MARK_SWEEP_GC
+    hash = obj_lookup_hashcode_in_chunk_buf(p_obj);
+#else
     hash = obj_lookup_hashcode_in_buf(p_obj);
+#endif
   }
   return hash;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?rev=650422&r1=650421&r2=650422&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Tue Apr 22 01:52:29 2008
@@ -151,7 +151,7 @@
       else 
         assert(0);
     }
-  } else if(collect_is_major_normal()){
+  } else if(collect_is_major_normal() || !gc_has_nos()){
     p_ref_or_obj = p_obj;
     if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
       trace_object = trace_obj_in_space_tune_marking;
@@ -165,7 +165,7 @@
       } else {
         collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
       }
-    } else if(major_is_marksweep()){
+    } else if(!gc_has_nos()){
       trace_object = trace_obj_in_ms_marking;
     } else {
       trace_object = trace_obj_in_normal_marking;
@@ -195,7 +195,7 @@
     while(!vector_block_iterator_end(task_block, iter)){
       void *p_ref_or_obj = (void*)*iter;
       assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
-              || ((collect_is_major_normal()||major_is_marksweep()) && p_ref_or_obj));
+              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
       trace_object(collector, p_ref_or_obj);
       if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
       
@@ -850,6 +850,8 @@
   finref_copy_pool(finalizable_obj_pool, finalizable_obj_pool_copy, gc);
   finref_copy_pool_to_rootset(gc, finalizable_obj_pool_copy);
 }
+
+
 
 
 



Mime
View raw message