harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r476946 [1/2] - in /harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc_gen/src/common/ vm/gc_gen/src/gen/ vm/gc_gen/src/mark_compact/ vm/gc_gen/src/mark_sweep/ vm/gc_gen/src/thread/ vm/gc_gen/src/trace_forward/
Date Sun, 19 Nov 2006 22:16:27 GMT
Author: wjwashburn
Date: Sun Nov 19 14:16:25 2006
New Revision: 476946

URL: http://svn.apache.org/viewvc?view=rev&rev=476946
Log:
Harmony-2101, patch that enables GCV5 to do parallel compaction
build and build test run successfully on linux w/ gcc 4.0.2, build runs
successfully on windows

Added:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_par.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_seq.cpp
Modified:
    harmony/enhanced/drlvm/trunk/build/make/components/vm/gc_gen.xml
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/thread_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp

Modified: harmony/enhanced/drlvm/trunk/build/make/components/vm/gc_gen.xml
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/build/make/components/vm/gc_gen.xml?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/build/make/components/vm/gc_gen.xml (original)
+++ harmony/enhanced/drlvm/trunk/build/make/components/vm/gc_gen.xml Sun Nov 19 14:16:25 2006
@@ -59,18 +59,20 @@
                 <include name="mark_sweep/*.cpp" />            
                 <include name="thread/*.cpp" />            
                 <include name="trace_forward/*.cpp" />            
+                <include name="utils/*.cpp" /> 
                 <include name="verify/*.cpp" />            
               </select>
 
-                <select os="lnx">
-                  <exclude name="common/*.cpp" />
-                  <exclude name="gen/*.cpp" />
-                  <exclude name="mark_compact/*.cpp" />
-                  <exclude name="mark_sweep/*.cpp" />
-                  <exclude name="thread/*.cpp" />
-                  <exclude name="trace_forward/*.cpp" />
-                  <exclude name="verify/*.cpp" />
-                </select>
+              <select os="lnx">
+                <exclude name="common/*.cpp" />
+                <exclude name="gen/*.cpp" />
+                <exclude name="mark_compact/*.cpp" />
+                <exclude name="mark_sweep/*.cpp" />
+                <exclude name="thread/*.cpp" />
+                <exclude name="trace_forward/*.cpp" />
+                <exclude name="utils/*.cpp" /> 
+                <exclude name="verify/*.cpp" />
+              </select>
             </fileset>
 
             <defineset define="BUILDING_GC" />

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h Sun Nov 19 14:16:25 2006
@@ -27,10 +27,13 @@
 #define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT)
 
 enum Block_Status {
-  BLOCK_NIL,
-  BLOCK_FREE,
-  BLOCK_IN_USE,
-  BLOCK_USED
+  BLOCK_NIL = 0,
+  BLOCK_FREE = 0x1,
+  BLOCK_IN_USE = 0x2,
+  BLOCK_USED = 0x4,
+  BLOCK_IN_COMPACT = 0x8,
+  BLOCK_COMPACTED = 0x10,
+  BLOCK_TARGET = 0x20
 };
 
 typedef struct Block_Header {
@@ -39,7 +42,6 @@
   void* ceiling;                    
   unsigned int block_idx;           
   unsigned int status;
-  SlotVector* reloc_table;
   Block_Header* next;
   unsigned int mark_table[1];  /* entry num == MARKBIT_TABLE_SIZE_WORDS */
 }Block_Header;
@@ -91,7 +93,7 @@
         if( !(markbits& (1<<k)) ){ k++; continue;}
         unsigned int word_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
         Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((unsigned int*)GC_BLOCK_BODY(block) + word_index);
-        /* only valid before compaction: assert(obj_is_marked_in_vt(p_obj)); */
+        assert(obj_is_marked_in_vt(p_obj)); 
         
         *mark_bit_idx = word_index;
       return p_obj;
@@ -120,7 +122,7 @@
       
       unsigned int word_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
       Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((unsigned int*)GC_BLOCK_BODY(block) + word_index);      
-      /* only valid before compaction: assert(obj_is_marked_in_vt(p_obj)); */
+      assert(obj_is_marked_in_vt(p_obj));
       
       *mark_bit_idx = word_index;
       return p_obj;
@@ -165,5 +167,30 @@
   block_clear_mark_table(block);
   return;     
 }
+
+typedef struct Blocked_Space {
+  /* <-- first couple of fields are overloadded as Space */
+  void* heap_start;
+  void* heap_end;
+  unsigned int reserved_heap_size;
+  unsigned int committed_heap_size;
+  unsigned int num_collections;
+  GC* gc;
+  Boolean move_object;
+  Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj);
+  /* END of Space --> */
+
+  Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
+  
+  /* FIXME:: the block indices should be replaced with block header addresses */
+  unsigned int first_block_idx;
+  unsigned int ceiling_block_idx;
+  volatile unsigned int free_block_idx;
+  
+  unsigned int num_used_blocks;
+  unsigned int num_managed_blocks;
+  unsigned int num_total_blocks;
+  /* END of Blocked_Space --> */
+}Blocked_Space;
 
 #endif //#ifndef _BLOCK_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Sun Nov 19 14:16:25 2006
@@ -1,105 +0,0 @@
-/*
- *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "../thread/collector.h"
-#include "../gen/gen.h"
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
-{
-  Partial_Reveal_Object* p_obj = *p_ref;
-  if(p_obj==NULL) return;
-
-  MarkStack* mark_stack = collector->mark_stack;
-  Space* obj_space = space_of_addr(collector->gc, p_obj);
-  Space* ref_space = space_of_addr(collector->gc, p_ref);
-
-  /* if obj to be moved, its ref slot needs remembering for later update */
-  if(obj_space->move_object) 
-    ref_space->save_reloc_func(ref_space, p_ref);
-
-  if(obj_space->mark_object_func(obj_space, p_obj))   
-    mark_stack->push(p_obj);
-    
-  return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
-  if( !object_has_slots(p_obj) ) return;
-  
-    /* scan array object */
-  if (object_is_array(p_obj)) {
-    Partial_Reveal_Object* array = p_obj;
-    assert(!obj_is_primitive_array(array));
-    
-    int32 array_length = vector_get_length((Vector_Handle) array);
-    for (int i = 0; i < array_length; i++) {
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
-      scan_slot(collector, p_ref);
-    }   
-    return;
-  }
-
-  /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
-    if (p_ref == NULL) break; /* terminating ref slot */
-  
-    scan_slot(collector, p_ref);
-    offset_scanner = offset_next_ref(offset_scanner);
-  }
-
-  return;
-}
-
-static void scan_root(Collector* collector, Partial_Reveal_Object *p_obj)
-{
-  assert(p_obj);
-  Space* space = space_of_addr(collector->gc, p_obj);
-  if( !space->mark_object_func(space, p_obj) ) return;  
-      
-  MarkStack* mark_stack = collector->mark_stack;
-  mark_stack->push(p_obj);
-  
-  while(!mark_stack->empty()){
-  	p_obj = mark_stack->top();
-  	mark_stack->pop();
-	  scan_object(collector, p_obj);
-  }
-  
-  return;
-}
-
-/* NOTE:: Only marking in object header is idempotent */
-void mark_scan_heap(Collector* collector)
-{
-  GC* gc = collector->gc;
-
-  int size = gc->root_set->size();
-  
-  for(int i=0; i<size; i++){
-    Partial_Reveal_Object **p_ref = (*gc->root_set)[i];
-	  assert(*p_ref); /* root ref should never by NULL */
-	  scan_root(collector, *p_ref);	
-  }	
-  
-  return;
-}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Sun Nov 19 14:16:25 2006
@@ -64,14 +64,11 @@
 
 typedef void (*TaskType)(void*);
 
-#define GC_NUM_ROOTS_HINT  10000
+extern Boolean NEED_BARRIER;
+extern unsigned int NUM_COLLECTORS;
 
 typedef std::stack<Partial_Reveal_Object *> MarkStack;
 typedef std::stack<Partial_Reveal_Object**> TraceStack;
-typedef std::vector<Partial_Reveal_Object*> RemobjSet;
-typedef std::vector<Partial_Reveal_Object**> RootSet;
-typedef std::vector<Partial_Reveal_Object**> RemslotSet;
-typedef std::vector<Partial_Reveal_Object**> SlotVector;
 typedef std::map<Partial_Reveal_Object*, Obj_Info_Type> ObjectMap;
 #include <hash_set>
 typedef stdext::hash_set<void *> HashSet;
@@ -190,8 +187,6 @@
   GC* gc;
   Boolean move_object;
   Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj);
-  void (*save_reloc_func)(Space* space, Partial_Reveal_Object** p_ref);
-  void (*update_reloc_func)(Space* space);
 }Space;
 
 inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;}
@@ -211,7 +206,8 @@
 /* all GCs inherit this GC structure */
 struct Mutator;
 struct Collector;
-
+struct GC_Metadata;
+struct Vector_Block;
 typedef struct GC{
   void* heap_start;
   void* heap_end;
@@ -229,9 +225,11 @@
   unsigned int num_collectors;
   unsigned int num_active_collectors; /* not all collectors are working */
   
-  /* rootsets for collection (FIXME:: should be distributed to collectors) */
-  RootSet* root_set;
+  /* metadata is the pool for rootset, markstack, etc. */  
+  GC_Metadata* metadata;
   unsigned int collect_kind; /* MAJOR or MINOR */
+  /* FIXME:: this is wrong! root_set belongs to mutator */
+  Vector_Block* root_set;
 
   /* mem info */
   apr_pool_t *aux_pool;
@@ -239,22 +237,10 @@
 
 }GC;
 
-inline void gc_init_rootset(GC* gc) 
-{
-	gc->root_set = new RootSet();
-	gc->root_set->reserve(GC_NUM_ROOTS_HINT);
-	gc->root_set->clear();
-}
-
-inline void gc_reset_rootset(GC* gc) 
-{
-	gc->root_set->clear();
-}
-
-void mark_scan_heap(Collector* collector);
+void mark_scan_heap_par(Collector* collector);
+void mark_scan_heap_seq(Collector* collector);
 
 inline void* gc_heap_base(GC* gc){ return gc->heap_start; }
 inline void* gc_heap_ceiling(GC* gc){ return gc->heap_end; }
-
 
 #endif //_GC_COMMON_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Sun Nov 19 14:16:25 2006
@@ -18,36 +18,125 @@
  * @author Xiao-Feng Li, 2006/10/05
  */
 
+#include <cxxlog.h>
 #include "vm_threads.h"
 
 #include "../gen/gen.h"
 #include "interior_pointer.h"
 
+unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
+
+/* heap size limit is not interesting. only for manual tuning purpose */
+unsigned int min_heap_size_bytes = 32 * MB;
+unsigned int max_heap_size_bytes = 256 * MB;
+
+static size_t parse_size_string(const char* size_string) 
+{
+  size_t len = strlen(size_string);
+  size_t unit = 1;
+  if (tolower(size_string[len - 1]) == 'k') {
+    unit = 1024;
+  } else if (tolower(size_string[len - 1]) == 'm') {
+    unit = 1024 * 1024;
+  } else if (tolower(size_string[len - 1]) == 'g') {
+    unit = 1024 * 1024 * 1024;
+  }
+  size_t size = atol(size_string);
+  size_t res = size * unit;
+  if (res / unit != size) {
+    /* overflow happened */
+    return 0;
+  }
+  return res;
+}
+
+static bool get_property_value_boolean(char* name) 
+{
+  const char* value = vm_get_property_value(name);
+  
+  return (strcmp("0", value) != 0
+    && strcmp("off", value) != 0 
+    && strcmp("false", value) != 0);
+}
+
+static int get_property_value_int(char* name) 
+{
+  const char* value = vm_get_property_value(name);
+  return (NULL == value) ? 0 : atoi(value);
+}
+
+static bool is_property_set(char* name) 
+{
+  const char* value = vm_get_property_value(name);
+  return (NULL != value && 0 != value[0]);
+}
+
+static void parse_configuration_properties() 
+{
+  unsigned int max_heap_size = HEAP_SIZE_DEFAULT;
+  unsigned int min_heap_size = min_heap_size_bytes;
+  
+  if (is_property_set("gc.mx")) {
+    max_heap_size = parse_size_string(vm_get_property_value("gc.mx"));
+
+    if (max_heap_size < min_heap_size)
+      max_heap_size = min_heap_size;
+    if (0 == max_heap_size) 
+      max_heap_size = HEAP_SIZE_DEFAULT;
+ 
+    min_heap_size = max_heap_size / 10;
+    if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes;
+  }
+
+  if (is_property_set("gc.ms")) {
+    min_heap_size = parse_size_string(vm_get_property_value("gc.ms"));
+    if (min_heap_size < min_heap_size_bytes) 
+      min_heap_size = min_heap_size_bytes;
+  }
+
+  if (min_heap_size > max_heap_size)
+    max_heap_size = min_heap_size;
+
+  min_heap_size_bytes = min_heap_size;
+  max_heap_size_bytes = max_heap_size;
+
+  if (is_property_set("gc.num_collectors")) {
+    unsigned int num = get_property_value_int("gc.num_collectors");
+    NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
+  }
+
+  if (is_property_set("gc.gen_mode")) {
+    NEED_BARRIER = get_property_value_boolean("gc.gen_mode");
+  }
+  
+  return;  
+}
+
 static GC* p_global_gc = NULL;
 
 void gc_init() 
 {  
+  parse_configuration_properties();
+  
   assert(p_global_gc == NULL);
   GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
   assert(gc);
   memset(gc, 0, sizeof(GC));  
   p_global_gc = gc;
   gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
-  /* initialize the main thread*/
-  // gc_thread_init(vm_get_gc_thread_local());
   
   return;
 }
 
 /* this interface need reconsidering. is_pinned is unused. */
 void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) 
-{  	
+{   
   Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref;
-  if (*p_ref == NULL) return;	
-	assert( !obj_is_marked_in_vt(*p_ref));
-	assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref)); 
-	assert( obj_is_in_gc_heap(*p_ref));
-	p_global_gc->root_set->push_back(p_ref);
+  if (*p_ref == NULL) return;
+  assert( !obj_is_marked_in_vt(*p_ref));
+  assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref)); 
+  assert( obj_is_in_gc_heap(*p_ref));
+  gc_rootset_add_entry(p_global_gc, p_ref);
 } 
 
 void gc_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned) 
@@ -86,13 +175,13 @@
 
 int64 gc_free_memory() 
 {
-	return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
+  return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
 }
 
 /* java heap size.*/
 int64 gc_total_memory() 
 {
-	return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc)); 
+  return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc)); 
 }
 
 void gc_vm_initialized()

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?view=auto&rev=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Sun Nov 19 14:16:25 2006
@@ -0,0 +1,274 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#include "gc_metadata.h"
+#include "../thread/mutator.h"
+#include "../thread/collector.h"
+
+#define GC_METADATA_SIZE_BYTES 32*MB
+
+#define METADATA_BLOCK_SIZE_BIT_SHIFT 12
+#define METADATA_BLOCK_SIZE_BYTES (1<<METADATA_BLOCK_SIZE_BIT_SHIFT)
+
+static GC_Metadata gc_metadata;
+
+void gc_metadata_initialize(GC* gc)
+{
+  /* FIXME:: since we use a list to arrange the root sets and tasks, we can
+     dynamically alloc space for metadata. 
+     We just don't have this dynamic support at the moment. */
+
+  void* metadata = STD_MALLOC(GC_METADATA_SIZE_BYTES);
+  memset(metadata, 0, GC_METADATA_SIZE_BYTES);
+  gc_metadata.heap_start = metadata;
+  gc_metadata.heap_end = (void*)((unsigned int)metadata + GC_METADATA_SIZE_BYTES);
+
+  unsigned int i=0;       
+  unsigned int num_blocks =  GC_METADATA_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
+  for(i=0; i<num_blocks; i++){
+    Vector_Block* block = (Vector_Block*)((unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES);
+    vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
+  }
+  
+  /* half of the metadata space is used for mark_stack */
+  unsigned num_tasks = num_blocks >> 1;
+  gc_metadata.free_task_pool = sync_pool_create(num_tasks);
+  for(i=0; i<num_tasks; i++){
+    unsigned int block = (unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES;    
+    pool_put_entry(gc_metadata.free_task_pool, (void*)block); 
+  }
+  gc_metadata.mark_task_pool = sync_pool_create(num_tasks);
+
+  /* the other half is used for root sets (including rem sets) */
+  unsigned num_sets = num_blocks >> 1;
+  gc_metadata.free_set_pool = sync_pool_create(num_sets);
+  /* initialize free rootset pool so that mutators can use them */  
+  for(; i<num_sets+num_tasks; i++){
+    unsigned int block = (unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES;    
+    pool_put_entry(gc_metadata.free_set_pool, (void*)block); 
+  }
+
+  gc_metadata.gc_rootset_pool = sync_pool_create(num_sets);
+  gc_metadata.mutator_remset_pool = sync_pool_create(num_sets);
+  gc_metadata.collector_remset_pool = sync_pool_create(num_sets);
+  gc_metadata.collector_repset_pool = sync_pool_create(num_sets);
+ 
+  gc->metadata = &gc_metadata; 
+  return;  
+}
+
+void gc_metadata_destruct(GC* gc)
+{
+  GC_Metadata* metadata = gc->metadata;
+  sync_pool_destruct(metadata->free_task_pool);
+  sync_pool_destruct(metadata->mark_task_pool);
+  
+  sync_pool_destruct(metadata->free_set_pool);
+  sync_pool_destruct(metadata->gc_rootset_pool); 
+  sync_pool_destruct(metadata->mutator_remset_pool);  
+  sync_pool_destruct(metadata->collector_remset_pool);
+  sync_pool_destruct(metadata->collector_repset_pool);
+
+  STD_FREE(metadata->heap_start);
+  gc->metadata = NULL;  
+}
+
+void gc_metadata_reset(GC* gc)
+{
+  GC_Metadata* metadata = gc->metadata;
+  Pool* gc_rootset_pool = metadata->gc_rootset_pool;
+  Pool* mutator_remset_pool = metadata->mutator_remset_pool;
+  Pool* collector_remset_pool = metadata->collector_remset_pool;
+  Pool* free_set_pool = metadata->free_set_pool;
+
+  Vector_Block* root_set = NULL;
+  
+  /* put back last rootset block */
+  pool_put_entry(gc_rootset_pool, gc->root_set);
+  gc->root_set = NULL;
+  
+  if(!gc_requires_barriers()) return;
+
+  Mutator *mutator = gc->mutator_list;
+  while (mutator) {
+    pool_put_entry(mutator_remset_pool, mutator->rem_set);
+    mutator->rem_set = NULL;
+  }  
+  
+  for(unsigned int i=0; i<gc->num_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    pool_put_entry(collector_remset_pool, collector->rem_set);
+    collector->rem_set = NULL;
+  }
+
+  if( gc->collect_kind == MAJOR_COLLECTION ){
+    /* all the remsets are useless now */
+    /* clean and put back mutator remsets */  
+    root_set = pool_get_entry( mutator_remset_pool );
+    while(root_set){
+        vector_block_clear(root_set);
+        pool_put_entry(free_set_pool, root_set);
+        root_set = pool_get_entry( mutator_remset_pool );
+    }
+  
+    /* clean and put back collector remsets */  
+    root_set = pool_get_entry( collector_remset_pool );
+    while(root_set){
+        vector_block_clear(root_set);
+        pool_put_entry(free_set_pool, root_set);
+        root_set = pool_get_entry( collector_remset_pool );
+    }
+
+  }else{ /* MINOR_COLLECTION */
+    /* all the remsets are put into the shared pool */
+    root_set = pool_get_entry( mutator_remset_pool );
+    while(root_set){
+        pool_put_entry(gc_rootset_pool, root_set);
+        root_set = pool_get_entry( mutator_remset_pool );
+    }
+  
+    /* put back collector remsets */  
+    root_set = pool_get_entry( collector_remset_pool );
+    while(root_set){
+        pool_put_entry(gc_rootset_pool, root_set);
+        root_set = pool_get_entry( collector_remset_pool );
+    }
+  }
+  
+  return;
+
+}
+
+void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
+{
+  Vector_Block* root_set = mutator->rem_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.mutator_remset_pool, root_set);
+  mutator->rem_set = pool_get_entry(gc_metadata.free_set_pool);  
+}
+
+void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
+
+  Vector_Block* root_set = collector->rep_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.collector_repset_pool, root_set);
+  collector->rep_set = pool_get_entry(gc_metadata.free_set_pool);  
+}
+
+void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  Vector_Block* root_set = collector->rem_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.collector_remset_pool, root_set);
+  collector->rem_set = pool_get_entry(gc_metadata.free_set_pool);  
+}
+
+void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj)
+{
+  assert( p_obj>= gc_heap_base_address() && p_obj < gc_heap_ceiling_address()); 
+
+  Vector_Block* mark_task = (Vector_Block*)collector->mark_stack;
+  vector_block_add_entry(mark_task, (unsigned int)p_obj);
+
+  if( !vector_block_is_full(mark_task)) return;
+
+  pool_put_entry(gc_metadata.mark_task_pool, mark_task);
+  collector->mark_stack = (MarkStack*)pool_get_entry(gc_metadata.free_task_pool);
+}
+
+void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
+{
+  assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); 
+  
+  Vector_Block* root_set = gc->root_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.gc_rootset_pool, root_set);
+  gc->root_set = pool_get_entry(gc_metadata.free_set_pool);  
+}
+
+
+static void gc_update_repointed_sets(GC* gc, Pool* pool)
+{
+  GC_Metadata* metadata = gc->metadata;
+  
+  /* NOTE:: this is destructive to the root sets. */
+  Vector_Block* root_set = pool_get_entry(pool);
+
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      /* For repset, this check is unnecessary, since all slots are repointed; otherwise
+         they will not be recorded. For root set, it is possible to point to LOS or other
+         non-moved space.  */
+#ifdef _DEBUG
+      if( pool != metadata->gc_rootset_pool)
+        assert(obj_is_forwarded_in_obj_info(p_obj));
+      else
+#endif
+      if(!obj_is_forwarded_in_obj_info(p_obj)) continue;
+      Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
+      *p_ref = p_target_obj; 
+    }
+    vector_block_clear(root_set);
+    pool_put_entry(metadata->free_set_pool, root_set);
+    root_set = pool_get_entry(pool);
+  } 
+  
+  return;
+}
+
+void update_rootset_interior_pointer();
+
+void gc_update_repointed_refs(Collector* collector)
+{  
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  gc_update_repointed_sets(gc, metadata->gc_rootset_pool);
+  gc_update_repointed_sets(gc, metadata->collector_repset_pool);   
+  update_rootset_interior_pointer();
+  
+  return;
+}
+
+void gc_reset_rootset(GC* gc)
+{
+  gc->root_set = pool_get_entry(gc_metadata.free_set_pool);  
+  return;
+}  
+
+

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h?view=auto&rev=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h Sun Nov 19 14:16:25 2006
@@ -0,0 +1,58 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+#ifndef _GC_METADATA_H_
+#define _GC_METADATA_H_
+
+#include "gc_common.h"
+#include "../utils/vector_block.h"
+#include "../utils/sync_pool.h"
+
+typedef struct GC_Metadata{  
+  void* heap_start;
+  void* heap_end;
+  
+  Pool* free_task_pool; /* list of free buffers for mark tasks */
+  Pool* mark_task_pool; /* list of mark tasks */
+  
+  /* FIXME:: the mutator remset pool can be merged with the rootset pool*/
+  Pool* free_set_pool; /* list of free buffers for rootsets remsets */
+  Pool* gc_rootset_pool; /* list of root sets for enumeration */
+  Pool* mutator_remset_pool; /* list of remsets generated by app during execution */
+  Pool* collector_remset_pool; /* list of remsets generated by gc during collection */
+  Pool* collector_repset_pool; /* list of repointed ref slot sets */
+      
+}GC_Metadata;
+
+void gc_metadata_initialize(GC* gc);
+void gc_metadata_destruct(GC* gc);
+void gc_metadata_reset(GC* gc);
+
+void gc_reset_rootset(GC* gc);
+void gc_update_repointed_refs(Collector* collector);
+
+void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj);
+
+void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot);
+void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
+void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_slot);
+
+void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
+
+#endif /* #ifndef _GC_METADATA_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h Sun Nov 19 14:16:25 2006
@@ -29,7 +29,7 @@
 
 #define USEC_PER_SEC INT64_C(1000000)
 
-#define VmThreadHandle void*
+#define VmThreadHandle  void*
 #define VmEventHandle   hysem_t
 #define THREAD_OK       TM_ERROR_NONE
 
@@ -71,27 +71,32 @@
 inline uint32 atomic_cas32(volatile apr_uint32_t *mem,
                                            apr_uint32_t swap,
                                            apr_uint32_t cmp) {
-	return (uint32)apr_atomic_cas32(mem, swap, cmp);
+  return (uint32)apr_atomic_cas32(mem, swap, cmp);
 }
 
-inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) {
-	return (Boolean)apr_pool_create(newpool, parent);
+inline uint32 atomic_inc32(volatile apr_uint32_t *mem){
+  return (uint32)apr_atomic_inc32(mem);
 }
 
-inline void pool_destroy(apr_pool_t *p) {
-	apr_pool_destroy(p);
+inline uint32 atomic_dec32(volatile apr_uint32_t *mem){
+  return (uint32)apr_atomic_dec32(mem);
 }
 
-inline uint32 atomic_inc32(volatile apr_uint32_t *mem) {
-	return (uint32)apr_atomic_inc32(mem);
+inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) {
+  return (uint32)apr_atomic_add32(mem, val);
 }
 
-inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) {
-	return (uint32)apr_atomic_add32(mem, val);
+inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) {
+  return (Boolean)apr_pool_create(newpool, parent);
 }
 
+inline void pool_destroy(apr_pool_t *p) {
+  apr_pool_destroy(p);
+}
+
+
 inline int64 time_now() {
-	return apr_time_now();
+  return apr_time_now();
 }
 
 typedef volatile unsigned int SpinLock;
@@ -102,7 +107,7 @@
 };
 
 #define try_lock(x) (!atomic_cas32(&(x), LOCKED, FREE_LOCK))
-#define lock(x)	while( !try_lock(x)){ while( x==LOCKED );}
+#define lock(x) while( !try_lock(x)){ while( x==LOCKED );}
 #define unlock(x) do{ x = FREE_LOCK;}while(0)
 
 #endif //_GC_PLATFORM_H_

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_par.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_par.cpp?view=auto&rev=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_par.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_par.cpp Sun Nov 19 14:16:25 2006
@@ -0,0 +1,193 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+
+static void scan_slot_par(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if(p_obj==NULL) return;
+
+  Space* obj_space = space_of_addr(collector->gc, p_obj);
+
+  /* if obj to be moved, its ref slot needs remembering for later update */
+  if(obj_space->move_object) 
+    collector_repset_add_entry(collector, p_ref);
+
+  if(obj_space->mark_object_func(obj_space, p_obj))   
+    collector_marktask_add_entry(collector, p_obj);
+  
+  return;
+}
+
+static void scan_object_par(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+  if( !object_has_slots(p_obj) ) return;
+  
+    /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Object* array = p_obj;
+    assert(!obj_is_primitive_array(array));
+    
+    int32 array_length = vector_get_length((Vector_Handle) array);
+    for (int i = 0; i < array_length; i++) {
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+      scan_slot_par(collector, p_ref);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+    if (p_ref == NULL) break; /* terminating ref slot */
+  
+    scan_slot_par(collector, p_ref);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+  return;
+}
+
+extern void scan_object_seq(Collector*, Partial_Reveal_Object *); 
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+/* NOTE:: Only marking in object header is idempotent */
+void mark_scan_heap_par(Collector* collector)
+{
+  GC* gc = collector->gc;
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+  
+  GC_Metadata* metadata = gc->metadata;
+ 
+  collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
+
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to mark tasks. 
+      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      assert(!p_obj == NULL); /* root ref can't be NULL */
+      /* we have to mark the object before put it into marktask, because
+         it is possible to have two slots containing a same object. They will
+         be scanned twice and their ref slots will be recorded twice. Problem
+         occurs after the ref slot is updated first time with new position
+         and the second time the value is the ref slot is the old position as expected.
+         This can be worked around if we want. 
+      */
+      Space* space = space_of_addr(gc, p_obj);
+      if( !space->mark_object_func(space, p_obj) ) continue;   
+    
+      collector_marktask_add_entry(collector, p_obj);
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+      
+  pool_put_entry(metadata->mark_task_pool, collector->mark_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf to push new tasks */
+  collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
+  
+retry:
+  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+  while(mark_task){
+    unsigned int* iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+
+      scan_object_par(collector, p_obj);
+    } 
+    /* run out one task, put back to the pool and grab another task */
+   vector_block_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);      
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: actually we don't need this complexity. We can simply
+     spin waiting for num_finished_collectors, because each generated new
+     task would surely be processed by its generating collector eventually. 
+     So code below is for load balance. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( !pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;  
+    }
+  }
+  
+  /* up to now, we donot have any tasks in task_pool, but
+     each collector has remaining tasks in its local mark_stack. */
+     
+  /* Lets process remaining tasks.  
+     NOTE:: this is the key difference from work-stealing, which uses
+     same stack for both working and sharing. So it has no problem
+     with remaining tasks in the shared stack. */
+
+  /* to simplify the processing, we turn back to use a single stack for
+     the remaining objects scanning. The assumption is, there are only limited
+     tasks for processing, no need to share the tasks. 
+     FIXME:: a better way is to reduce the task block size into half till 
+     the size becomes one, then the collectors actually share a same stack */      
+     
+  mark_task = (Vector_Block*)collector->mark_stack;
+  MarkStack* mark_stack = new MarkStack();
+ 
+  unsigned int* iter = vector_block_iterator_init(mark_task);
+  while(!vector_block_iterator_end(mark_task,iter)){
+    Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+    iter = vector_block_iterator_advance(mark_task,iter);
+    mark_stack->push(p_obj);
+  }
+  /* put back the last task to the free pool */
+  vector_block_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+   
+  collector->mark_stack = mark_stack;
+  while(!mark_stack->empty()){
+    Partial_Reveal_Object* p_obj = mark_stack->top();
+    mark_stack->pop();
+    scan_object_seq(collector, p_obj);
+  } 
+  
+  delete mark_stack;
+  collector->mark_stack = NULL;
+  
+  /* put back last repointed refs set recorded during marking */
+  pool_put_entry(metadata->collector_repset_pool, collector->rep_set);
+  collector->rep_set = NULL;
+
+  return;
+}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_seq.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_seq.cpp?view=auto&rev=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_seq.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_seq.cpp Sun Nov 19 14:16:25 2006
@@ -0,0 +1,108 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+
+static void scan_slot_seq(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if(p_obj==NULL) return;
+
+  MarkStack* mark_stack = (MarkStack*)collector->mark_stack;
+  Space* obj_space = space_of_addr(collector->gc, p_obj);
+
+  /* if obj to be moved, its ref slot needs remembering for later update */
+  if(obj_space->move_object) 
+    collector_repset_add_entry(collector, p_ref);
+
+  if(obj_space->mark_object_func(obj_space, p_obj))   
+    mark_stack->push(p_obj);
+    
+  return;
+}
+
+void scan_object_seq(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+  if( !object_has_slots(p_obj) ) return;
+  
+    /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Object* array = p_obj;
+    assert(!obj_is_primitive_array(array));
+    
+    int32 array_length = vector_get_length((Vector_Handle) array);
+    for (int i = 0; i < array_length; i++) {
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+      scan_slot_seq(collector, p_ref);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+    if (p_ref == NULL) break; /* terminating ref slot */
+  
+    scan_slot_seq(collector, p_ref);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+  return;
+}
+
+/* NOTE:: Only marking in object header is idempotent */
+void mark_scan_heap_seq(Collector* collector)
+{
+  GC* gc = collector->gc;
+  MarkStack* mark_stack = new MarkStack();
+  collector->mark_stack = mark_stack;
+
+  GC_Metadata* metadata = gc->metadata;
+ 
+  pool_iterator_init(metadata->gc_rootset_pool);
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      assert(!p_obj == NULL); /* root ref can't be NULL */
+  
+      Space* space = space_of_addr(collector->gc, p_obj);
+      if( !space->mark_object_func(space, p_obj) ) continue;   
+      mark_stack->push(p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  } 
+
+  while(!mark_stack->empty()){
+    Partial_Reveal_Object* p_obj = mark_stack->top();
+    mark_stack->pop();
+    scan_object_seq(collector, p_obj);
+  }
+  
+  return;
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp Sun Nov 19 14:16:25 2006
@@ -24,7 +24,7 @@
 
 /* All the write barrier interfaces need cleanup */
 
-static Boolean NEED_BARRIER = TRUE;
+Boolean NEED_BARRIER = FALSE;
 
 Boolean gc_requires_barriers() 
 {   return NEED_BARRIER; }
@@ -38,7 +38,7 @@
   if( address_belongs_to_nursery((void *)p_target, gc) && 
        !address_belongs_to_nursery((void *)p_slot, gc)) 
   {
-    mutator->remslot->push_back((Partial_Reveal_Object **)p_slot);
+    mutator_remset_add_entry(mutator, (Partial_Reveal_Object**)p_slot);
   }
 }
 
@@ -58,7 +58,7 @@
     for (int i = 0; i < array_length; i++) {
       p_slot = (Partial_Reveal_Object **)vector_get_element_address_ref((Vector_Handle) array, i);
       if( *p_slot != NULL && address_belongs_to_nursery((void *)*p_slot, gc)){
-        mutator->remslot->push_back(p_slot);
+        mutator_remset_add_entry(mutator, p_slot);
       }
     }   
     return;
@@ -71,7 +71,7 @@
     p_slot = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
     if (p_slot == NULL) break;  
     if( address_belongs_to_nursery((void *)*p_slot, gc)){
-      mutator->remslot->push_back(p_slot);
+      mutator_remset_add_entry(mutator, p_slot);
     }
     offset_scanner = offset_next_ref(offset_scanner);
   }
@@ -96,12 +96,15 @@
 void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
 {  
   *p_slot = p_target;
+  
   if( !NEED_BARRIER ) return;
   gc_slot_write_barrier(p_slot, p_target); 
 }
 
-/* this is used for global object update, e.g., strings. Since globals are roots, no barrier here */
+/* this is used for global object update, e.g., strings. */
 void gc_heap_write_global_slot(Managed_Object_Handle *p_slot,Managed_Object_Handle p_target)
 {
   *p_slot = p_target;
+  
+  /* Since globals are roots, no barrier here */
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Sun Nov 19 14:16:25 2006
@@ -25,9 +25,6 @@
 #include "../thread/collector.h"
 #include "../verify/verify_live_heap.h"
 
-/* heap size limit is not interesting. only for manual tuning purpose */
-unsigned int min_heap_size_bytes = 32 * MB;
-unsigned int max_heap_size_bytes = 128 * MB;
 
 /* fspace size limit is not interesting. only for manual tuning purpose */
 unsigned int min_nos_size_bytes = 2 * MB;
@@ -41,18 +38,13 @@
 
 void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size) 
 {
-	assert(gc_gen);	
+  assert(gc_gen); 
   assert(max_heap_size <= max_heap_size_bytes);
-	/* FIXME:: we need let virtual space to include unmapped region.
-	   Heuristically for Nursery+MatureFrom+MatureTo(unmapped)+LOS(mapped+unmapped), 
-	   we need almost half more than the user specified virtual space size. 
-	   That's why we have the below. */
-	max_heap_size += max_heap_size>>1;
 
-	min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES);
-	max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES);
+  min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES);
+  max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES);
 
-	gc_gen_get_system_info(gc_gen); 
+  gc_gen_get_system_info(gc_gen); 
 
   void *reserved_base = NULL;
 
@@ -80,29 +72,31 @@
   gc_gen->num_collections = 0;
 
   /* heuristic nos + mos + LOS */
+  unsigned int los_size = max_heap_size >> 2;
+  gc_los_initialize(gc_gen, reserved_base, los_size);
+
+  unsigned int mos_size = max_heap_size >> 1;
+  reserved_base = (void*)((unsigned int)reserved_base + los_size);
+  gc_mos_initialize(gc_gen, reserved_base, mos_size);
+  
   unsigned int nos_size =  max_heap_size >> 2; 
   assert(nos_size > min_nos_size_bytes);
-	gc_nos_initialize(gc_gen, reserved_base, nos_size);	
+  reserved_base = (void*)((unsigned int)reserved_base + mos_size);
+  gc_nos_initialize(gc_gen, reserved_base, nos_size); 
 
-	unsigned int mos_size = max_heap_size >> 1;
-	reserved_base = (void*)((unsigned int)reserved_base + nos_size);
-	gc_mos_initialize(gc_gen, reserved_base, mos_size);
+  /* connect mos and nos, so that they can be compacted as one space */
+  Blocked_Space* mos = (Blocked_Space*)gc_get_mos(gc_gen);
+  Blocked_Space* nos = (Blocked_Space*)gc_get_nos(gc_gen);
+  Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1];
+  Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0];
+  mos_last_block->next = nos_first_block;
+  assert(space_heap_end((Space*)mos) == space_heap_start((Space*)nos));
     
-	unsigned int los_size = max_heap_size >> 2;
-	reserved_base = (void*)((unsigned int)gc_gen->heap_end - los_size);
-	gc_los_initialize(gc_gen, reserved_base, los_size);
-
   gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) +
                                 space_committed_size((Space*)gc_gen->mos) +
                                 space_committed_size((Space*)gc_gen->los);
   
-  gc_init_rootset((GC*)gc_gen);	
-
-	gc_gen->mutator_list = NULL;
-	gc_gen->mutator_list_lock = FREE_LOCK;
-
-  gc_gen->num_mutators = 0;
-  
+  gc_metadata_initialize((GC*)gc_gen); /* root set and mark stack */
   collector_initialize((GC*)gc_gen);
   
   if( verify_live_heap ){  /* for live heap verify*/
@@ -114,25 +108,25 @@
 
 void gc_gen_destruct(GC_Gen *gc_gen) 
 {
-	gc_nos_destruct(gc_gen);
-	gc_gen->nos = NULL;
-	
-	gc_mos_destruct(gc_gen);	
-	gc_gen->mos = NULL;
+  gc_nos_destruct(gc_gen);
+  gc_gen->nos = NULL;
+  
+  gc_mos_destruct(gc_gen);  
+  gc_gen->mos = NULL;
 
-	gc_los_destruct(gc_gen);	
+  gc_los_destruct(gc_gen);  
   gc_gen->los = NULL;
   
+  gc_metadata_destruct((GC*)gc_gen); /* root set and mark stack */
   collector_destruct((GC*)gc_gen);
 
   if( verify_live_heap ){
     gc_terminate_heap_verification((GC*)gc_gen);
   }
 
-	STD_FREE(gc_gen);
+  STD_FREE(gc_gen);
 }
 
-
 Boolean major_collection_needed(GC_Gen* gc)
 {
   return mspace_free_memory_size(gc->mos) < fspace_used_memory_size(gc->nos);  
@@ -149,111 +143,13 @@
 void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
 unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
 
-static void gc_gen_update_rootset(GC* gc)
-{
-  RootSet* root_set = gc->root_set;
-  /* update refs in root set after moving collection */
-  for(unsigned int i=0; i < root_set->size(); i++){
-      Partial_Reveal_Object** p_ref = (*root_set)[i];
-      Partial_Reveal_Object* p_obj = *p_ref;
-      assert(p_obj); /* root ref should never by NULL*/
-      /* FIXME:: this should be reconsidered: forwarded in vt or obj_info */
-      if(!obj_is_forwarded_in_obj_info(p_obj)){
-        /* if an obj is not moved, it must be in LOS or otherwise in MOS for MINOR_COLLECTION */
-#ifdef _DEBUG
-        if( gc->collect_kind == MINOR_COLLECTION )
-          assert( !obj_belongs_to_space(p_obj, gc_get_nos((GC_Gen*)gc)) );
-        else
-          assert( obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc)) ); 
-#endif
-        continue;
-      }
-      Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
-      *p_ref = p_target_obj; 
-  }
-  
-  return;
-}
-
-void update_rootset_interior_pointer();
-
-void gc_gen_update_repointed_refs(Collector* collector)
-{
-  GC_Gen* gc = (GC_Gen*)collector->gc;
-  Space* space;
-  space = gc_get_nos(gc);  space->update_reloc_func(space);
-  space = gc_get_mos(gc);  space->update_reloc_func(space);
-  space = gc_get_los(gc);  space->update_reloc_func(space);
-
-  gc_gen_update_rootset((GC*)gc);   
-  update_rootset_interior_pointer();
-  
-  return;
-}
-
-void gc_preprocess_collector(Collector *collector)
-{
-  /* for MAJOR_COLLECTION, all the remsets are useless */
-  GC_Gen* gc = (GC_Gen*)collector->gc;
-  if( gc->collect_kind == MAJOR_COLLECTION ){
-    collector->last_cycle_remset->clear();
-    return;
-  }
-
-  Fspace* fspace = (Fspace*)gc_get_nos(gc);
-  fspace->remslot_sets->push_back(collector->last_cycle_remset);
-    
-  /* this_cycle_remset is ready to be used */
-  assert(collector->this_cycle_remset->empty());
-
-  return;
-}
-
-void gc_postprocess_collector(Collector *collector)
-{ 
-  /* for MAJOR_COLLECTION we do nothing */
-  GC_Gen* gc = (GC_Gen*)collector->gc;
-  if( gc->collect_kind == MAJOR_COLLECTION )
-    return;
-      
-  /* for MINOR_COLLECTION */
-  /* switch its remsets, this_cycle_remset data kept in space->remslot_sets */
-  /* last_cycle_remset was in space->remslot_sets and cleared during collection */
-  assert(collector->last_cycle_remset->empty());
-
-  RemslotSet* temp_set = collector->this_cycle_remset;
-  collector->this_cycle_remset = collector->last_cycle_remset;
-  collector->last_cycle_remset = temp_set;
-  
-  return;
-}
-
-void gc_preprocess_mutator(GC_Gen* gc)
-{       
-  Mutator *mutator = gc->mutator_list;
-  Fspace* fspace = (Fspace*)mutator->alloc_space;
-  /* for MAJOR_COLLECTION, all the remsets are useless */
-  while (mutator) {
-    if(gc->collect_kind == MAJOR_COLLECTION){
-      mutator->remslot->clear();
-    }else{        
-      fspace->remslot_sets->push_back(mutator->remslot);
-    }
-    mutator = mutator->next;
-  }
- 
-  return;
-} /////////FIXME::: need clear space remsets
-
-void gc_postprocess_mutator(GC_Gen* gc)
+void reset_mutator_allocation_context(GC_Gen* gc)
 {
   Mutator *mutator = gc->mutator_list;
   while (mutator) {
-    assert(mutator->remslot->empty());
     alloc_context_reset((Allocator*)mutator);    
     mutator = mutator->next;
-  }
-  
+  }  
   return;
 }
 
@@ -274,9 +170,10 @@
   /* Stop the threads and collect the roots. */
   gc_reset_rootset((GC*)gc);  
   vm_enumerate_root_set_all_threads();
-  
-  gc_preprocess_mutator(gc);
-  
+
+  /* reset metadata (all the rootsets and markstack) */  
+  gc_metadata_reset((GC*)gc); 
+    
   if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
 
   if(gc->collect_kind == MINOR_COLLECTION){
@@ -306,8 +203,7 @@
   
   if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
       
-  gc_postprocess_mutator(gc);
-
+  reset_mutator_allocation_context(gc);
   vm_resume_threads_after();
 
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Sun Nov 19 14:16:25 2006
@@ -61,10 +61,12 @@
   Collector** collectors;
   unsigned int num_collectors;
   unsigned int num_active_collectors; /* not all collectors are working */
-  
-  /* rootsets for collection (FIXME:: should be distributed to collectors) */
-  RootSet* root_set;
+
+  /* metadata is the pool for rootset, markstack, etc. */  
+  GC_Metadata* metadata;
   unsigned int collect_kind; /* MAJOR or MINOR */
+  /* FIXME:: this is wrong! root_set belongs to mutator */
+  Vector_Block* root_set;
   
   /* mem info */
   apr_pool_t *aux_pool;
@@ -93,7 +95,6 @@
          lspace_free_memory_size(gc->los);  }
                         
 void gc_gen_reclaim_heap(GC_Gen* gc, unsigned int cause);
-void gc_gen_update_repointed_refs(Collector* collector);
 
 /////////////////////////////////////////////////////////////////////////////////////////
 
@@ -101,7 +102,7 @@
 { fspace_initialize((GC*)gc, start, nos_size); }
 
 inline void gc_nos_destruct(GC_Gen* gc)
-{	fspace_destruct(gc->nos); }
+{ fspace_destruct(gc->nos); }
 
 inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size)
 { mspace_initialize((GC*)gc, start, mos_size); }
@@ -113,7 +114,7 @@
 { lspace_initialize((GC*)gc, start, los_size); }
 
 inline void gc_los_destruct(GC_Gen* gc)
-{	lspace_destruct(gc->los); }
+{ lspace_destruct(gc->los); }
 
 inline Boolean address_belongs_to_nursery(void* addr, GC_Gen* gc)
 { return address_belongs_to_space(addr, (Space*)gc->nos); }
@@ -123,8 +124,8 @@
 
 inline Space* space_of_addr(GC* gc, void* addr)
 {
-  if( addr < nos_boundary) return (Space*)((GC_Gen*)gc)->nos;
-  if( addr < los_boundary) return (Space*)((GC_Gen*)gc)->mos;
+  if( addr > nos_boundary) return (Space*)((GC_Gen*)gc)->nos;
+  if( addr > los_boundary) return (Space*)((GC_Gen*)gc)->mos;
   return (Space*)((GC_Gen*)gc)->los;
 }
 
@@ -138,11 +139,6 @@
 void gc_set_mos(GC_Gen* gc, Space* mos);
 void gc_set_los(GC_Gen* gc, Space* los);
 unsigned int gc_get_processor_num(GC_Gen* gc);
-
-void gc_preprocess_mutator(GC_Gen* gc);
-void gc_postprocess_mutator(GC_Gen* gc);
-void gc_preprocess_collector(Collector* collector);
-void gc_postprocess_collector(Collector* collector);
 
 #endif /* ifndef _GC_GEN_H_ */
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Sun Nov 19 14:16:25 2006
@@ -21,14 +21,7 @@
 #include "mspace.h"
 
 static void mspace_destruct_blocks(Mspace* mspace)
-{ 
-  Block* blocks = (Block*)mspace->blocks; 
-  for(unsigned int i=0; i < mspace->num_managed_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    delete block->reloc_table;
-    block->reloc_table = NULL;
-  }
-  
+{   
   return;
 }
 
@@ -44,7 +37,6 @@
     block->base = block->free;
     block->block_idx = i + start_idx;
     block->status = BLOCK_FREE;  
-    block->reloc_table = new SlotVector();
     last_block->next = block;
     last_block = block;
   }
@@ -56,6 +48,7 @@
 
 struct GC_Gen;
 extern void gc_set_mos(GC_Gen* gc, Space* space);
+extern Space* gc_set_nos(GC_Gen* gc);
 void mspace_initialize(GC* gc, void* start, unsigned int mspace_size)
 {
   Mspace* mspace = (Mspace*)STD_MALLOC( sizeof(Mspace));
@@ -83,10 +76,7 @@
   
   mspace_init_blocks(mspace);
   
-  mspace->obj_info_map = new ObjectMap();
   mspace->mark_object_func = mspace_mark_object;
-  mspace->save_reloc_func = mspace_save_reloc;
-  mspace->update_reloc_func = mspace_update_reloc;
 
   mspace->move_object = TRUE;
   mspace->gc = gc;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Sun Nov 19 14:16:25 2006
@@ -35,8 +35,6 @@
   GC* gc;
   Boolean move_object;
   Boolean (*mark_object_func)(Mspace* space, Partial_Reveal_Object* p_obj);
-  void (*save_reloc_func)(Mspace* space, Partial_Reveal_Object** p_ref);
-  void (*update_reloc_func)(Mspace* space);
   /* END of Space --> */
     
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -49,9 +47,7 @@
   unsigned int num_used_blocks;
   unsigned int num_managed_blocks;
   unsigned int num_total_blocks;
-
-  /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
-  ObjectMap*  obj_info_map; 
+  /* END of Blocked_Space --> */
     
 }Mspace;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Sun Nov 19 14:16:25 2006
@@ -21,96 +21,201 @@
 #include "mspace.h"
 #include "../thread/collector.h"
 #include "../trace_forward/fspace.h"
+
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_mos(GC_Gen* gc);
 Space* gc_get_los(GC_Gen* gc);
 
-static Block_Header* mspace_get_first_compact_block(Mspace* mspace)
-{ return (Block_Header*)mspace->blocks; }
+static void reset_mspace_after_compaction(Mspace* mspace)
+{ 
+  unsigned int old_num_used = mspace->num_used_blocks;
+  unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
+  unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used;
+  
+  Block* blocks = mspace->blocks;
+  unsigned int i;
+  for(i=0; i < num_used; i++){
+    Block_Header* block = (Block_Header*)&(blocks[i]);
+    block_clear_mark_table(block); 
+    block->status = BLOCK_USED;
 
-static Block_Header* mspace_get_next_compact_block(Mspace* mspace, Block_Header* block)
-{ return block->next; }
+    if(i >= new_num_used){
+      block->status = BLOCK_FREE; 
+      block->free = GC_BLOCK_BODY(block);
+    }
+  }
+  mspace->num_used_blocks = new_num_used;
+  
+  /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */
+  for(; i < mspace->num_managed_blocks; i++){
+    Block_Header* block = (Block_Header*)&(blocks[i]);
+    assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET));
+    block->status = BLOCK_FREE;
+  }
+}
 
-static Block_Header* mspace_get_first_target_block(Mspace* mspace)
-{ return (Block_Header*)mspace->blocks; }
+static volatile Block_Header* next_block_for_compact;
+static volatile Block_Header* next_block_for_target;
 
-static Block_Header* mspace_get_next_target_block(Mspace* mspace, Block_Header* block)
-{ return block->next; }
+static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace)
+{
+  unsigned int free_blk_idx = mspace->free_block_idx;
+  for(unsigned int i=0; i<gc->num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    unsigned int collector_target_idx = collector->cur_target_block->block_idx;
+    if(collector_target_idx > free_blk_idx)
+      free_blk_idx = collector_target_idx;
+    collector->cur_target_block = NULL;
+    collector->cur_compact_block = NULL;
+  }
+  mspace->free_block_idx = free_blk_idx+1;
+  return;
+}
 
-void mspace_save_reloc(Mspace* mspace, Partial_Reveal_Object** p_ref)
+static void gc_init_block_for_collectors(GC* gc, Mspace* mspace)
 {
-  Block_Header* block = GC_BLOCK_HEADER(p_ref);
-  block->reloc_table->push_back(p_ref);
+  unsigned int i;
+  Block_Header* block;
+  for(i=0; i<gc->num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    block = (Block_Header*)&mspace->blocks[i];
+    collector->cur_target_block = block;
+    collector->cur_compact_block = block;
+    block->status = BLOCK_TARGET;
+  }
+  
+  block = (Block_Header*)&mspace->blocks[i];
+  next_block_for_target = block;
+  next_block_for_compact = block;
   return;
 }
 
-void  mspace_update_reloc(Mspace* mspace)
+static unsigned int gc_collection_result(GC* gc)
 {
-  SlotVector* reloc_table;
-  /* update refs in mspace */
-  Block* blocks = mspace->blocks;
-  for(unsigned int i=0; i < mspace->num_used_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    reloc_table = block->reloc_table;
-    for(unsigned int j=0; j < reloc_table->size(); j++){
-      Partial_Reveal_Object** p_ref = (*reloc_table)[j];
-      Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
-      *p_ref = p_target_obj;
+  unsigned int result = TRUE;
+  for(unsigned i=0; i<gc->num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    result &= collector->result;
+  }  
+  return result;
+}
+
+static Block_Header* mspace_get_first_compact_block(Mspace* mspace)
+{ return (Block_Header*)mspace->blocks; }
+
+static Block_Header* mspace_get_first_target_block(Mspace* mspace)
+{ return (Block_Header*)mspace->blocks; }
+
+
+static Block_Header* mspace_get_next_compact_block1(Mspace* mspace, Block_Header* block)
+{  return block->next; }
+
+static Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace)
+{ 
+  /* firstly put back the compacted block. If it's not BLOCK_TARGET, it will be set to BLOCK_COMPACTED */
+  unsigned int block_status = collector->cur_compact_block->status;
+  assert( block_status & (BLOCK_IN_COMPACT|BLOCK_TARGET));
+  if( block_status == BLOCK_IN_COMPACT)
+    collector->cur_compact_block->status = BLOCK_COMPACTED;
+
+  Block_Header* cur_compact_block = (Block_Header*)next_block_for_compact;
+  
+  while(cur_compact_block != NULL){
+    Block_Header* next_compact_block = cur_compact_block->next;
+
+    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&next_block_for_compact, next_compact_block, cur_compact_block);
+    if(temp != cur_compact_block){
+      cur_compact_block = (Block_Header*)next_block_for_compact;
+      continue;
+    }
+    /* got it, set its state to be BLOCK_IN_COMPACT. It must be the first time touched by compactor */
+    block_status = cur_compact_block->status;
+    assert( !(block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)));
+    cur_compact_block->status = BLOCK_IN_COMPACT;
+    collector->cur_compact_block = cur_compact_block;
+    return cur_compact_block;
+      
+  }
+  /* run out space blocks for compacting */
+  return NULL;
+}
+
+static Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
+{    
+  Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
+  /* firstly, we bump the next_block_for_target global var to the first non BLOCK_TARGET block
+     This need not atomic op, because only one thread can own the next_block_for_target */
+
+  while(cur_target_block->status == BLOCK_TARGET){
+      cur_target_block = cur_target_block->next;
+  }
+  next_block_for_target = cur_target_block;
+
+  /* nos is higher than mos, we cant use nos block for compaction target */
+  Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
+  while(cur_target_block < mspace_heap_end){
+    Block_Header* next_target_block = cur_target_block->next;
+    volatile unsigned int* p_block_status = &cur_target_block->status;
+    unsigned int block_status = cur_target_block->status;
+    /* block status has to be BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET 
+       but we care only the BLOCK_COMPACTED ones or own BLOCK_IN_COMPACT */
+    assert( block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));
+    /* if it is not BLOCK_COMPACTED, let's move on to next */
+    if(block_status != BLOCK_COMPACTED){
+      if(cur_target_block == collector->cur_compact_block){
+        assert( block_status == BLOCK_IN_COMPACT);
+        *p_block_status = BLOCK_TARGET;
+        collector->cur_target_block = cur_target_block;
+        return cur_target_block;
+      }
+      cur_target_block = next_target_block;
+      continue;
+    }    
+    /* ok, find the first BLOCK_COMPACTED before own compact block */    
+    unsigned int temp = atomic_cas32(p_block_status, BLOCK_TARGET, BLOCK_COMPACTED);
+    if(temp == BLOCK_COMPACTED){
+      collector->cur_target_block = cur_target_block;
+      return cur_target_block;
     }
-    reloc_table->clear();
+    /* missed it, it must be set by other into BLOCK_TARGET */
+    assert(temp == BLOCK_TARGET); 
+    cur_target_block = next_target_block;     
   }
-  
-  return;  
-}  
+  /* mos is run out for major collection */
+  return NULL;  
+}
 
 Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj)
 {  
+#ifdef _DEBUG 
+  if( obj_is_marked_in_vt(p_obj)) return FALSE;
+#endif
+
   obj_mark_in_vt(p_obj);
 
   unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
-  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); 	
-	
+  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj);   
+  
   unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
   unsigned int word_mask = (1<<obj_offset_in_word);
-	
-  unsigned int result = (*p_word)|word_mask;
-	
-  if( result==(*p_word) ) return FALSE;
-  
-  *p_word = result; 
   
-   return TRUE;
-}
-
-Boolean mspace_object_is_marked(Partial_Reveal_Object *p_obj, Mspace* mspace)
-{
-  assert(p_obj);
+  unsigned int old_value = *p_word;
+  unsigned int new_value = old_value|word_mask;
   
-#ifdef _DEBUG //TODO:: Cleanup
-  unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
-  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); 	
-	
-  unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
-  unsigned int word_mask = (1<<obj_offset_in_word);
-	
-  unsigned int result = (*p_word)|word_mask;
-	
-  if( result==(*p_word) )
-    assert( obj_is_marked_in_vt(p_obj));
-  else 
-    assert(!obj_is_marked_in_vt(p_obj));
-    
-#endif
-
-  return (obj_is_marked_in_vt(p_obj));
-    
+  while(old_value != new_value){
+    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
+    if(temp == old_value) return TRUE;
+    old_value = *p_word;
+    new_value = old_value|word_mask;
+  }
+  return FALSE;
 }
 
-static Boolean mspace_compute_object_target(Mspace* mspace)
+static Boolean mspace_compute_object_target(Collector* collector, Mspace* mspace)
 {  
-  Block_Header* dest_block = mspace_get_first_target_block(mspace);    
-  Block_Header* curr_block = mspace_get_first_compact_block(mspace);
+  Block_Header* curr_block = collector->cur_compact_block;
+  Block_Header* dest_block = collector->cur_target_block;
 
   void* dest_addr = GC_BLOCK_BODY(dest_block);
  
@@ -125,17 +230,21 @@
       
       if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
         dest_block->free = dest_addr;
-        dest_block = mspace_get_next_target_block(mspace, dest_block);
+        dest_block = mspace_get_next_target_block(collector, mspace);
+        if(dest_block == NULL){ 
+          collector->result = 0; 
+          return FALSE; 
+        }
+        
         dest_addr = GC_BLOCK_BODY(dest_block);
       }
       assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
       
       Obj_Info_Type obj_info = get_obj_info(p_obj);
       if( obj_info != 0 ) {
-        mspace->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
+        collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
       }
       
-      assert( (unsigned int) p_obj >= (unsigned int)dest_addr );
       set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
 
       /* FIXME: should use alloc to handle alignment requirement */
@@ -143,52 +252,12 @@
       p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
   
     }
-    curr_block = mspace_get_next_compact_block(mspace, curr_block);
+    curr_block = mspace_get_next_compact_block(collector, mspace);
   }
-
-
-  mspace->free_block_idx = dest_block->block_idx+1;
-
-  /* fail to evacuate any room, FIXME:: do nothing at the moment */
-  if( mspace->free_block_idx == mspace->first_block_idx + mspace->num_used_blocks) 
-    return FALSE;
   
   return TRUE;
 }   
 
-static void mspace_restore_obj_info(Mspace* mspace)
-{
-  ObjectMap* objmap = mspace->obj_info_map;
-  ObjectMap::iterator obj_iter;
-  for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
-    Partial_Reveal_Object* p_target_obj = obj_iter->first;
-    Obj_Info_Type obj_info = obj_iter->second;
-    set_obj_info(p_target_obj, obj_info);     
-  }
-  objmap->clear();
-  return;  
-}
-  
-static void reset_mspace_after_compaction(Mspace* mspace)
-{ 
-  unsigned int old_num_used = mspace->num_used_blocks;
-  unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
-  unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used;
-  
-  Block* blocks = mspace->blocks;
-  for(unsigned int i=0; i < num_used; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    block_clear_mark_table(block); 
-    block->status = BLOCK_USED;
-
-    if(i >= new_num_used){
-      block->status = BLOCK_FREE; 
-      block->free = GC_BLOCK_BODY(block);
-    }
-  }
-  mspace->num_used_blocks = new_num_used;
-}
-
 #include "../verify/verify_live_heap.h"
 
 static void mspace_sliding_compact(Collector* collector, Mspace* mspace)
@@ -218,43 +287,77 @@
       p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);  
     }
         
-    curr_block = mspace_get_next_compact_block(mspace, curr_block);
+    curr_block = mspace_get_next_compact_block1(mspace, curr_block);
   }
 
-  mspace_restore_obj_info(mspace);
-  reset_mspace_after_compaction(mspace);
-  
   return;
 } 
 
-void gc_gen_update_repointed_refs(Collector* collector);
+void gc_update_repointed_refs(Collector* collector);
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_installing_collectors = 0;
 
 static void mark_compact_mspace(Collector* collector) 
 {
-  GC_Gen* gc = (GC_Gen*)collector->gc;
-  Mspace* mspace = (Mspace*)gc_get_mos(gc);
-  Fspace* fspace = (Fspace*)gc_get_nos(gc);
-
-  /* FIXME:: Single-threaded mark-compaction for mspace currently */
+  GC* gc = collector->gc;
+  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
+  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
 
   /* Pass 1: mark all live objects in heap, and save all the slots that 
              have references  that are going to be repointed */
-  mark_scan_heap(collector);
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: mark all live objects in heap, and save all the slots that 
+             have references  that are going to be repointed */
+  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+  mark_scan_heap_par(collector);
+
+  old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+    /* prepare for next phase */
+    gc_init_block_for_collectors(gc, mspace); 
+    /* let other collectors go */
+    num_marking_collectors++; 
+  }
+  
+  while(num_marking_collectors != num_active_collectors + 1);
   
   /* Pass 2: assign target addresses for all to-be-moved objects */
-  Boolean ok;
-  ok = mspace_compute_object_target(mspace); 
-  assert(ok); /* free at least one block */
-  ok = fspace_compute_object_target(collector, fspace); 
-  assert(ok); /* FIXME:: throw out-of-memory exception if not ok */
+  atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
+
+  mspace_compute_object_target(collector, mspace);   
+  
+  old_num = atomic_inc32(&num_installing_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* single thread world */
+    if(!gc_collection_result(gc)){
+      printf("Out of Memory!\n");
+      assert(0); /* mos is out. FIXME:: throw exception */
+    }
+    gc_reset_block_for_collectors(gc, mspace);
+    num_installing_collectors++; 
+  }
   
+  while(num_installing_collectors != num_active_collectors + 1);
+
+  /* FIXME:: temporary. let only one thread go forward */
+  if( collector->thread_handle != 0 ) return;
+    
   /* Pass 3: update all references whose objects are to be moved */  
-  gc_gen_update_repointed_refs(collector);
+  gc_update_repointed_refs(collector);
     
   /* Pass 4: do the compaction and reset blocks */  
+  next_block_for_compact = mspace_get_first_compact_block(mspace);
   mspace_sliding_compact(collector, mspace);
-  fspace_copy_collect(collector, fspace);
-     
+  /* FIXME:: should be collector_restore_obj_info(collector) */
+  gc_restore_obj_info(gc);
+
+  reset_mspace_after_compaction(mspace);
+  reset_fspace_for_allocation(fspace);
+  
   return;
 }
 
@@ -263,6 +366,8 @@
   mspace->num_collections++;
 
   GC* gc = mspace->gc;  
+
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
 
   collector_execute_task(gc, (TaskType)mark_compact_mspace, (Space*)mspace);
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp Sun Nov 19 14:16:25 2006
@@ -61,7 +61,7 @@
   memset(lspace, 0, sizeof(Lspace));
   
   void* reserved_base = start;
-  unsigned int committed_size = lspace_size >> 1;
+  unsigned int committed_size = lspace_size;
   int status = port_vmem_commit(&reserved_base, committed_size, gc->allocated_memory); 
   assert(status == APR_SUCCESS && reserved_base == start);
   
@@ -77,16 +77,13 @@
   lspace->mark_table = (unsigned int*)STD_MALLOC( num_words*BYTES_PER_WORD );
   memset(lspace->mark_table, 0, num_words*BYTES_PER_WORD);
   
-  lspace->reloc_table = new SlotVector();
   lspace->mark_object_func = lspace_mark_object;
-  lspace->save_reloc_func = lspace_save_reloc;
-  lspace->update_reloc_func = lspace_update_reloc;
   
   lspace->move_object = FALSE;
   lspace->gc = gc;
   gc_set_los((GC_Gen*)gc, (Space*)lspace);
   
-  los_boundary = start;
+  los_boundary = lspace->heap_end;
   
   return;
 }
@@ -123,33 +120,17 @@
   unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj);
   unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj);
  
-  unsigned int* p_markbits = &(lspace->mark_table[word_index]);
+  unsigned int* p_word = &(lspace->mark_table[word_index]);
   unsigned int word_mask = (1<<bit_offset_in_word);
-	
-  unsigned int result = (*p_markbits)|word_mask;
-	
-  if( result==(*p_markbits) ) return FALSE;
-  
-  *p_markbits = result; 
-      
-  return TRUE;
-}
-
-void lspace_save_reloc(Lspace* lspace, Partial_Reveal_Object** p_ref)
-{
-  lspace->reloc_table->push_back(p_ref);
-}
-
-void lspace_update_reloc(Lspace* lspace)
-{
-  SlotVector* reloc_table;
   
-  reloc_table = lspace->reloc_table;
-  for(unsigned int j=0; j < reloc_table->size(); j++){
-    Partial_Reveal_Object** p_ref = (*reloc_table)[j];
-    Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
-    *p_ref = p_target_obj;
+  unsigned int old_value = *p_word;
+  unsigned int new_value = old_value|word_mask;
+  
+  while(old_value != new_value){
+    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
+    if(temp == old_value) return TRUE;
+    old_value = *p_word;
+    new_value = old_value|word_mask;
   }
-  reloc_table->clear();
-  return;
+  return FALSE;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h Sun Nov 19 14:16:25 2006
@@ -34,16 +34,11 @@
   GC* gc;
   Boolean move_object;
   Boolean (*mark_object_func)(Lspace* space, Partial_Reveal_Object* p_obj);
-  void (*save_reloc_func)(Lspace* space, Partial_Reveal_Object** p_ref);
-  void (*update_reloc_func)(Lspace* space);
   /* END of Space --> */
 
   void* alloc_free;
     
   unsigned int* mark_table;
-
-  /* support other space moving collection */
-  SlotVector* reloc_table;
 
 }Lspace;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Sun Nov 19 14:16:25 2006
@@ -23,55 +23,87 @@
 #include "collector.h"
 #include "../mark_compact/mspace.h"
 
+
+static void collector_restore_obj_info(Collector* collector)
+{
+  ObjectMap* objmap = collector->obj_info_map;
+  ObjectMap::iterator obj_iter;
+  for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
+    Partial_Reveal_Object* p_target_obj = obj_iter->first;
+    Obj_Info_Type obj_info = obj_iter->second;
+    set_obj_info(p_target_obj, obj_info);     
+  }
+  objmap->clear();
+  return;  
+}
+
+void gc_restore_obj_info(GC* gc)
+{
+  for(unsigned int i=0; i<gc->num_active_collectors; i++)
+  {
+    Collector* collector = gc->collectors[i];    
+    collector_restore_obj_info(collector);
+  }
+  return;
+  
+}
+
 static void collector_reset_thread(Collector *collector) 
 {
   collector->task_func = NULL;
 
-	vm_reset_event(collector->task_assigned_event);
-	vm_reset_event(collector->task_finished_event);
-	
-	alloc_context_reset((Allocator*)collector);
-	
-	return;
+  vm_reset_event(collector->task_assigned_event);
+  vm_reset_event(collector->task_finished_event);
+  
+  alloc_context_reset((Allocator*)collector);
+  
+  GC_Metadata* metadata = collector->gc->metadata;
+  assert(collector->rep_set==NULL);
+  collector->rep_set = pool_get_entry(metadata->free_set_pool);
+  collector->result = 1;
+
+  if(gc_requires_barriers()){
+    assert(collector->rem_set==NULL);
+    collector->rem_set = pool_get_entry(metadata->free_set_pool);
+  }
+
+  return;
 }
 
 static void wait_collector_to_finish(Collector *collector) 
 {
-	vm_wait_event(collector->task_finished_event);
+  vm_wait_event(collector->task_finished_event);
 }
 
 static void notify_collector_to_work(Collector* collector)
 {
-	vm_set_event(collector->task_assigned_event);  
+  vm_set_event(collector->task_assigned_event);  
 }
 
 static void collector_wait_for_task(Collector *collector) 
 {
-	vm_wait_event(collector->task_assigned_event);
+  vm_wait_event(collector->task_assigned_event);
 }
 
 static void collector_notify_work_done(Collector *collector) 
 {
-	vm_set_event(collector->task_finished_event);
+  vm_set_event(collector->task_finished_event);
 }
 
-void gc_preprocess_collector(Collector*);
-void gc_postprocess_collector(Collector*);
 static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space)
 {
-  unsigned int num_collectors_to_activate = gc->num_collectors;
-  for(unsigned int i=0; i<num_collectors_to_activate; i++)
+  /* FIXME:: to adaptively identify the num_collectors_to_activate */
+  gc->num_active_collectors = gc->num_collectors;
+  for(unsigned int i=0; i<gc->num_active_collectors; i++)
   {
     Collector* collector = gc->collectors[i];
     
-    gc_preprocess_collector(collector);
     collector_reset_thread(collector);
     collector->task_func = task_func;
     collector->collect_space = space;
     notify_collector_to_work(collector);
   }
-  gc->num_active_collectors = num_collectors_to_activate;
-
+  return;
 }
 
 static void wait_collection_finish(GC* gc)
@@ -81,73 +113,63 @@
   {
     Collector* collector = gc->collectors[i];
     wait_collector_to_finish(collector);
-    gc_postprocess_collector(collector);
   }
   gc->num_active_collectors = 0;
-
+  return;
 }
 
 static int collector_thread_func(void *arg) 
 {
-	Collector *collector = (Collector *)arg;
-	assert(collector);
-	
-	while(true){
-		/* Waiting for newly assigned task */
-		collector_wait_for_task(collector);	
-		
-		/* waken up and check for new task */
+  Collector *collector = (Collector *)arg;
+  assert(collector);
+  
+  while(true){
+    /* Waiting for newly assigned task */
+    collector_wait_for_task(collector); 
+    
+    /* waken up and check for new task */
     TaskType task_func = collector->task_func;
     if(task_func == NULL) return 1;
       
     task_func(collector);
     
-		collector_notify_work_done(collector);
-	}
+    collector_notify_work_done(collector);
+  }
 
-	return 0;
+  return 0;
 }
 
 static void collector_init_thread(Collector *collector) 
 {
-	collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */
-	collector->mark_stack = new MarkStack(); /* only for MAJOR_COLLECTION */
+  collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */
+  collector->obj_info_map = new ObjectMap();
+  collector->rem_set = NULL;
+  collector->rep_set = NULL;
+
+  int status = vm_create_event(&collector->task_assigned_event,0,1);
+  assert(status == THREAD_OK);
+
+  status = vm_create_event(&collector->task_finished_event,0,1);
+  assert(status == THREAD_OK);
+
+  status = (unsigned int)vm_create_thread(NULL,
+                                  0, 0, 0,
+                                  collector_thread_func,
+                                  (void*)collector);
 
-	collector->last_cycle_remset = new RemslotSet();
-  collector->last_cycle_remset->reserve(GC_NUM_ROOTS_HINT);
-	collector->last_cycle_remset->clear();
-
-	collector->this_cycle_remset = new RemslotSet();
-  collector->this_cycle_remset->reserve(GC_NUM_ROOTS_HINT);
-	collector->this_cycle_remset->clear();
-
-	int status = vm_create_event(&collector->task_assigned_event,0,1);
-	assert(status == THREAD_OK);
-
-	status = vm_create_event(&collector->task_finished_event,0,1);
-	assert(status == THREAD_OK);
-
-	status = (unsigned int)vm_create_thread(NULL,
-                            			0, 0, 0,
-                            			collector_thread_func,
-                            			(void*)collector);
-
-	assert(status == THREAD_OK);
-	
-	return;
+  assert(status == THREAD_OK);
+  
+  return;
 }
 
 static void collector_terminate_thread(Collector* collector)
 {
   collector->task_func = NULL; /* NULL to notify thread exit */
-	notify_collector_to_work(collector);
+  notify_collector_to_work(collector);
   vm_thread_yield(); /* give collector time to die */
   
-  delete collector->trace_stack;
-	delete collector->last_cycle_remset;
-	delete collector->this_cycle_remset;
-	
-	return;
+  delete collector->trace_stack;  
+  return;
 }
 
 void collector_destruct(GC* gc) 
@@ -155,8 +177,8 @@
   for(unsigned int i=0; i<gc->num_collectors; i++)
   {
     Collector* collector = gc->collectors[i];
-		collector_terminate_thread(collector);
-  	STD_FREE(collector);
+    collector_terminate_thread(collector);
+    STD_FREE(collector);
    
   }
   
@@ -164,25 +186,34 @@
   return;
 }
 
+unsigned int NUM_COLLECTORS = 0;
+
 struct GC_Gen;
 unsigned int gc_get_processor_num(GC_Gen*);
 void collector_initialize(GC* gc)
 {
- 	unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc);
-	
-	gc->num_collectors = 1; //FIXME:: nthreads;
-	gc->collectors = (Collector **) STD_MALLOC(sizeof(Collector *) * nthreads);	
-	assert(gc->collectors);
-
-	for (unsigned int i = 0; i < nthreads; i++) {
-		Collector* collector = (Collector *)STD_MALLOC(sizeof(Collector));
-		assert(collector);
-		
-		collector->gc = gc;
-		collector_init_thread(collector);
-		
-		gc->collectors[i] = collector;
-	}
+  //FIXME::
+  unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc);
+  
+  nthreads = (NUM_COLLECTORS==0)?nthreads:NUM_COLLECTORS;
+
+  gc->num_collectors = nthreads; 
+  unsigned int size = sizeof(Collector *) * nthreads;
+  gc->collectors = (Collector **) STD_MALLOC(size); 
+  memset(gc->collectors, 0, size);
+
+  size = sizeof(Collector);
+  for (unsigned int i = 0; i < nthreads; i++) {
+    Collector* collector = (Collector *)STD_MALLOC(size);
+    memset(collector, 0, size);
+    
+    /* FIXME:: thread_handle is for temporary control */
+    collector->thread_handle = (VmThreadHandle)i;
+    collector->gc = gc;
+    collector_init_thread(collector);
+    
+    gc->collectors[i] = collector;
+  }
 
   return;
 }



Mime
View raw message