harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r495225 [2/5] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen: javasrc/org/apache/harmony/drlvm/gc_gen/ src/common/ src/finalizer_weakref/ src/gen/ src/jni/ src/mark_compact/ src/mark_sweep/ src/thread/ src/trace_forward/ src/utils/ src/verify/
Date Thu, 11 Jan 2007 13:57:19 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,163 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _GC_SPACE_H_
+#define _GC_SPACE_H_
+
+#include "gc_block.h"
+
+struct GC;
+/* all Spaces inherit this Space structure */
+typedef struct Space{
+  void* heap_start;
+  void* heap_end;
+  unsigned int reserved_heap_size;
+  unsigned int committed_heap_size;
+  unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
+  GC* gc;
+  Boolean move_object;
+}Space;
+
+inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;}
+inline void* space_heap_start(Space* space){ return space->heap_start; }
+inline void* space_heap_end(Space* space){ return space->heap_end; }
+
+inline Boolean address_belongs_to_space(void* addr, Space* space) 
+{
+  return (addr >= space_heap_start(space) && addr < space_heap_end(space));
+}
+
+inline Boolean obj_belongs_to_space(Partial_Reveal_Object *p_obj, Space* space)
+{
+  return address_belongs_to_space((Partial_Reveal_Object*)p_obj, space);
+}
+
+
+typedef struct Blocked_Space {
+  /* <-- first couple of fields are overloadded as Space */
+  void* heap_start;
+  void* heap_end;
+  unsigned int reserved_heap_size;
+  unsigned int committed_heap_size;
+  unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
+  GC* gc;
+  Boolean move_object;
+  /* END of Space --> */
+
+  Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
+  
+  /* FIXME:: the block indices should be replaced with block header addresses */
+  unsigned int first_block_idx;
+  unsigned int ceiling_block_idx;
+  volatile unsigned int free_block_idx;
+  
+  unsigned int num_used_blocks;
+  unsigned int num_managed_blocks;
+  unsigned int num_total_blocks;
+  /* END of Blocked_Space --> */
+}Blocked_Space;
+
+inline Boolean space_has_free_block(Blocked_Space* space){ return space->free_block_idx <= space->ceiling_block_idx; }
+inline unsigned int space_free_memory_size(Blocked_Space* space){ return GC_BLOCK_SIZE_BYTES * (space->ceiling_block_idx - space->free_block_idx + 1);  }
+inline Boolean space_used_memory_size(Blocked_Space* space){ return GC_BLOCK_SIZE_BYTES * (space->free_block_idx - space->first_block_idx); }
+
+inline void space_init_blocks(Blocked_Space* space)
+{ 
+  Block* blocks = (Block*)space->heap_start; 
+  Block_Header* last_block = (Block_Header*)blocks;
+  unsigned int start_idx = space->first_block_idx;
+  for(unsigned int i=0; i < space->num_managed_blocks; i++){
+    Block_Header* block = (Block_Header*)&(blocks[i]);
+    block_init(block);
+    block->block_idx = i + start_idx;
+    last_block->next = block;
+    last_block = block;
+  }
+  last_block->next = NULL;
+  space->blocks = blocks;
+   
+  return;
+}
+
+
+inline void blocked_space_shrink(Blocked_Space* space, unsigned int changed_size)
+{
+  unsigned int block_dec_count = changed_size >> GC_BLOCK_SHIFT_COUNT;
+  void* new_base = (void*)&(space->blocks[space->num_managed_blocks - block_dec_count]);
+ 
+  void* decommit_base = (void*)round_down_to_size((unsigned int)new_base, SYSTEM_ALLOC_UNIT);
+  
+  assert( ((Block_Header*)decommit_base)->block_idx >= space->free_block_idx);
+  
+  void* old_end = (void*)&space->blocks[space->num_managed_blocks];
+  unsigned int decommit_size = (unsigned int)old_end - (unsigned int)decommit_base;
+  assert(decommit_size && !(decommit_size%GC_BLOCK_SIZE_BYTES));
+  
+  Boolean result = vm_decommit_mem(decommit_base, decommit_size);
+  assert(result == TRUE);
+  
+  space->committed_heap_size = (unsigned int)decommit_base - (unsigned int)space->heap_start;
+  space->num_managed_blocks = space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+  
+  Block_Header* new_last_block = (Block_Header*)&space->blocks[space->num_managed_blocks - 1];
+  space->ceiling_block_idx = new_last_block->block_idx;
+  new_last_block->next = NULL;
+}
+
+inline void blocked_space_extend(Blocked_Space* space, unsigned int changed_size)
+{
+  unsigned int block_inc_count = changed_size >> GC_BLOCK_SHIFT_COUNT;
+  
+  void* old_base = (void*)&space->blocks[space->num_managed_blocks];
+  void* commit_base = (void*)round_down_to_size((unsigned int)old_base, SYSTEM_ALLOC_UNIT);
+  unsigned int block_diff_count = ((unsigned int)old_base - (unsigned int)commit_base) >> GC_BLOCK_SHIFT_COUNT;
+  block_inc_count += block_diff_count;
+  
+  unsigned int commit_size = block_inc_count << GC_BLOCK_SHIFT_COUNT;
+  void* result = vm_commit_mem(commit_base, commit_size);
+  assert(result == commit_base);
+
+  void* new_end = (void*)((unsigned int)commit_base + commit_size);
+  space->committed_heap_size = (unsigned int)new_end - (unsigned int)space->heap_start;
+  
+  /* init the grown blocks */
+  Block_Header* block = (Block_Header*)commit_base;
+  Block_Header* last_block = (Block_Header*)((Block*)block -1);
+  unsigned int start_idx = last_block->block_idx + 1;
+  unsigned int i;
+  for(i=0; block < new_end; i++){
+    block_init(block);
+    block->block_idx = start_idx + i;
+    last_block->next = block;
+    last_block = block;
+    block = (Block_Header*)((Block*)block + 1);  
+  }
+  last_block->next = NULL;
+  space->ceiling_block_idx = last_block->block_idx;
+  space->num_managed_blocks = space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+}
+
+#endif //#ifndef _GC_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.cpp Thu Jan 11 05:57:16 2007
@@ -27,6 +27,7 @@
 typedef struct slot_offset_entry_struct{
 	void** slot;
 	unsigned int offset;
+	Partial_Reveal_Object *base; 
 } slot_offset_entry;
 
 static std::vector<slot_offset_entry> interior_pointer_set;
@@ -49,8 +50,17 @@
 	slot_offset_entry* push_back_entry = (slot_offset_entry*)&interior_pointer_set[interior_pointer_num_count++];
 	push_back_entry->offset = offset;
 	push_back_entry->slot   = slot;
-	*slot = p_obj;
-	gc_add_root_set_entry((Managed_Object_Handle*)slot, is_pinned);	
+	push_back_entry->base = p_obj;
+}
+
+void gc_copy_interior_pointer_table_to_rootset()
+{
+	unsigned int i;
+	for( i = 0; i<interior_pointer_num_count; i++)
+	{
+		slot_offset_entry* entry_traverser = (slot_offset_entry*)&interior_pointer_set[i];
+		gc_add_root_set_entry((Managed_Object_Handle*)(&(entry_traverser->base)), FALSE);
+	}
 }
 
 void update_rootset_interior_pointer()
@@ -60,12 +70,17 @@
 	{
 		slot_offset_entry* entry_traverser = (slot_offset_entry*)&interior_pointer_set[i];
 		void** root_slot = entry_traverser->slot;
-		Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)*root_slot;//entry_traverser->base;
+		Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base;
 		unsigned int root_offset = entry_traverser->offset;
 		void *new_slot_contents = (void *)((Byte*)root_base + root_offset);	
 		*root_slot = new_slot_contents;
 	}
-	interior_pointer_set.clear();
-	assert(interior_pointer_set.size()==0);
+       //can not reset the table here, for the rootset may be updated multi times
+}
+
+void gc_reset_interior_pointer_table()
+{
 	interior_pointer_num_count = 0;
+       //this function is for the case of out of space which need to call update_rootset_interior_pointer multi-times
 }
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.h Thu Jan 11 05:57:16 2007
@@ -24,6 +24,8 @@
 #include "gc_common.h"
 
 void add_root_set_entry_interior_pointer(void **slot, int offset, Boolean is_pinned);
+void gc_copy_interior_pointer_table_to_rootset();
 void update_rootset_interior_pointer();
+void gc_reset_interior_pointer_table();
 
 #endif //INTERIOR_POINTER_H

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan.cpp Thu Jan 11 05:57:16 2007
@@ -1,219 +0,0 @@
-/*
- *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "gc_metadata.h"
-#include "../thread/collector.h"
-#include "../gen/gen.h"
-
-#include "../finalizer_weakref/finalizer_weakref.h"
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
-{
-  Partial_Reveal_Object* p_obj = *p_ref;
-  if(p_obj==NULL) return;
-
-  Space* obj_space = space_of_addr(collector->gc, p_obj);
-
-  /* if obj to be moved, its ref slot needs remembering for later update */
-  if(obj_space->move_object) 
-    collector_repset_add_entry(collector, p_ref);
-
-  if(obj_space->mark_object_func(obj_space, p_obj))   
-    collector_tracestack_push(collector, p_obj);
-  
-  return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
-  if( !object_has_ref_field(p_obj) ) return;
-  
-    /* scan array object */
-  if (object_is_array(p_obj)) {
-    Partial_Reveal_Object* array = p_obj;
-    assert(!obj_is_primitive_array(array));
-    
-    int32 array_length = vector_get_length((Vector_Handle) array);
-    for (int i = 0; i < array_length; i++) {
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
-      scan_slot(collector, p_ref);
-    }   
-    return;
-  }
-
-  /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
-    if (p_ref == NULL) break; /* terminating ref slot */
-  
-    scan_slot(collector, p_ref);
-    offset_scanner = offset_next_ref(offset_scanner);
-  }
-
-  scan_weak_reference(collector, p_obj, scan_slot);
-  
-  return;
-}
-
-
-static void trace_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{ 
-  scan_object(collector, p_obj);
-  
-  Vector_Block* trace_stack = collector->trace_stack;
-  while( !vector_stack_is_empty(trace_stack)){
-    p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); 
-    scan_object(collector, p_obj);
-    trace_stack = collector->trace_stack;
-  }
-    
-  return; 
-}
-
-/* for marking phase termination detection */
-static volatile unsigned int num_finished_collectors = 0;
-
-/* NOTE:: Only marking in object header is idempotent */
-void mark_scan_heap(Collector* collector)
-{
-  GC* gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
-
-  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
-   
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-
-  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
-
-  /* first step: copy all root objects to mark tasks. 
-      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
-  while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
-    while(!vector_block_iterator_end(root_set,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(root_set,iter);
-
-      Partial_Reveal_Object* p_obj = *p_ref;
-      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
-      assert( (gc->collect_kind==MINOR_COLLECTION && !gc_requires_barriers()) || (gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL));
-      if(p_obj==NULL) continue;
-      /* we have to mark the object before put it into marktask, because
-         it is possible to have two slots containing a same object. They will
-         be scanned twice and their ref slots will be recorded twice. Problem
-         occurs after the ref slot is updated first time with new position
-         and the second time the value is the ref slot is the old position as expected.
-         This can be worked around if we want. 
-      */
-      Space* space = space_of_addr(gc, p_obj);
-      if( !space->mark_object_func(space, p_obj) ) continue;   
-    
-      collector_tracestack_push(collector, p_obj);
-    } 
-    root_set = pool_iterator_next(metadata->gc_rootset_pool);
-  }
-  /* put back the last trace_stack task */    
-  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
-  
-  /* second step: iterate over the mark tasks and scan objects */
-  /* get a task buf for the mark stack */
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-
-retry:
-  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
-  
-  while(mark_task){
-    unsigned int* iter = vector_block_iterator_init(mark_task);
-    while(!vector_block_iterator_end(mark_task,iter)){
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
-      iter = vector_block_iterator_advance(mark_task,iter);
-
-      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
-         degenerate my stack into mark_task, and grab another mark_task */
-      trace_object(collector, p_obj);
-    } 
-    /* run out one task, put back to the pool and grab another task */
-   vector_stack_clear(mark_task);
-   pool_put_entry(metadata->free_task_pool, mark_task);
-   mark_task = pool_get_entry(metadata->mark_task_pool);      
-  }
-  
-  /* termination detection. This is also a barrier.
-     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
-     generated new task would surely be processed by its generating collector eventually. 
-     So code below is only for load balance optimization. */
-  atomic_inc32(&num_finished_collectors);
-  while(num_finished_collectors != num_active_collectors){
-    if( !pool_is_empty(metadata->mark_task_pool)){
-      atomic_dec32(&num_finished_collectors);
-      goto retry;  
-    }
-  }
-     
-  /* put back the last mark stack to the free pool */
-  mark_task = (Vector_Block*)collector->trace_stack;
-  vector_stack_clear(mark_task);
-  pool_put_entry(metadata->free_task_pool, mark_task);   
-  collector->trace_stack = NULL;
-  
-  /* put back last repointed refs set recorded during marking */
-  pool_put_entry(metadata->collector_repset_pool, collector->rep_set);
-  collector->rep_set = NULL;
-
-  return;
-}
-
-void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  GC *gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
-  
-  Space* space = space_of_addr(gc, p_obj);
-//  if(!space->mark_object_func(space, p_obj)) { assert(0); }
-  space->mark_object_func(space, p_obj);
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-  collector_tracestack_push(collector, p_obj);
-  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
-  
-//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
-  while(mark_task){
-    unsigned int* iter = vector_block_iterator_init(mark_task);
-    while(!vector_block_iterator_end(mark_task,iter)){
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
-      trace_object(collector, p_obj);
-      iter = vector_block_iterator_advance(mark_task, iter);
-    } 
-    /* run out one task, put back to the pool and grab another task */
-    vector_stack_clear(mark_task);
-    pool_put_entry(metadata->free_task_pool, mark_task);
-    mark_task = pool_get_entry(metadata->mark_task_pool);      
-  }
-  
-  mark_task = (Vector_Block*)collector->trace_stack;
-  vector_stack_clear(mark_task);
-  pool_put_entry(metadata->free_task_pool, mark_task);   
-  collector->trace_stack = NULL;
-//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */
-//collector->rep_set = NULL; /* has got collector->rep_set in caller */
-}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,224 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if(p_obj==NULL) return;
+
+  if(obj_mark_in_vt(p_obj))
+    collector_tracestack_push(collector, p_obj);
+  
+  return;
+}
+
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+  if( !object_has_ref_field(p_obj) ) return;
+  
+  Partial_Reveal_Object **p_ref;
+
+  if (object_is_array(p_obj)) {   /* scan array object */
+  
+    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+  
+    p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
+
+    for (unsigned int i = 0; i < array_length; i++) {
+      scan_slot(collector, p_ref+i);
+    }   
+
+  }else{ /* scan non-array object */
+    
+    unsigned int num_refs = object_ref_field_num(p_obj);
+    
+    int* ref_iterator = object_ref_iterator_init(p_obj);
+    
+    for(unsigned int i=0; i<num_refs; i++){  
+      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
+      scan_slot(collector, p_ref);
+    }    
+
+#ifndef BUILD_IN_REFERENT
+    scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  }
+  
+  return;
+}
+
+
+static void trace_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{ 
+  scan_object(collector, p_obj);
+  
+  Vector_Block* trace_stack = collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); 
+    scan_object(collector, p_obj);
+    trace_stack = collector->trace_stack;
+  }
+    
+  return; 
+}
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+/* NOTE:: Only marking in object header is idempotent.
+   Originally, we have to mark the object before put it into markstack, to 
+   guarantee there is only one occurrance of an object in markstack. This is to
+   guarantee there is only one occurrance of a repointed ref slot in repset (they
+   are put to the set when the object is scanned). If the same object is put to 
+   markstack twice, they will be scanned twice and their ref slots will be recorded twice. 
+   Problem occurs when the ref slot is updated first time with new position,
+   the second time the value in the ref slot is not the old position as expected.
+   It needs to read the original obj header for forwarding pointer. With the new value,
+   it will read something nonsense since the obj is not moved yet.
+   This can be worked around if we want. 
+   To do this we have to use atomic instruction for marking, which is undesirable. 
+   So we abondoned this design. We no longer use the repset to remember repointed slots 
+*/
+  
+void mark_scan_pool(Collector* collector)
+{
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+   
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to mark tasks. 
+      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj!=NULL);
+      /* we have to mark the object before put it into marktask, because
+         it is possible to have two slots containing a same object. They will
+         be scanned twice and their ref slots will be recorded twice. Problem
+         occurs after the ref slot is updated first time with new position
+         and the second time the value is the ref slot is the old position as expected.
+         This can be worked around if we want. 
+      */
+      if(obj_mark_in_vt(p_obj))
+        collector_tracestack_push(collector, p_obj);
+
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    unsigned int* iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+
+      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
+         degenerate my stack into mark_task, and grab another mark_task */
+      trace_object(collector, p_obj);
+    } 
+    /* run out one task, put back to the pool and grab another task */
+   vector_stack_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);      
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
+     generated new task would surely be processed by its generating collector eventually. 
+     So code below is only for load balance optimization. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( !pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;  
+    }
+  }
+     
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+/* this is to resurrect p_obj and its decedants for some reason, here for finalizables */
+void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  GC *gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  
+  obj_mark_in_vt(p_obj);
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  collector_tracestack_push(collector, p_obj);
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+  while(mark_task){
+    unsigned int* iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
+      trace_object(collector, p_obj);
+      iter = vector_block_iterator_advance(mark_task, iter);
+    } 
+    /* run out one task, put back to the pool and grab another task */
+    vector_stack_clear(mark_task);
+    pool_put_entry(metadata->free_task_pool, mark_task);
+    mark_task = pool_get_entry(metadata->mark_task_pool);      
+  }
+  
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);   
+  collector->trace_stack = NULL;
+//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */
+//collector->rep_set = NULL; /* has got collector->rep_set in caller */
+}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,72 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "space_tuner.h"
+
+#define GC_LOS_MIN_VARY_SIZE ( 2 * 1024 * 1024 ) 
+
+
+struct GC_Gen;
+Space* gc_get_mos(GC_Gen* gc);
+Space* gc_get_nos(GC_Gen* gc);
+
+void gc_space_tune(GC* gc, unsigned int cause)
+{
+  if((gc->collect_kind == MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) )
+  	return;
+  	
+  Space_Tuner* tuner = gc->tuner;
+  tuner->kind = TRANS_FROM_MOS_TO_LOS;
+
+  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
+  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
+
+  unsigned int mos_free_block_nr = (mspace->ceiling_block_idx - mspace->free_block_idx + 1);
+  unsigned int nos_used_block_nr = fspace->free_block_idx - fspace->first_block_idx;
+  unsigned int mos_wast_block_nr = mos_free_block_nr - nos_used_block_nr; 
+  unsigned int min_vary_block_nr =  (GC_LOS_MIN_VARY_SIZE >> GC_BLOCK_SHIFT_COUNT);
+  if( mos_wast_block_nr > min_vary_block_nr ){
+    tuner->tuning_size = min_vary_block_nr << GC_BLOCK_SHIFT_COUNT;
+  }else{
+    tuner->tuning_size = mos_wast_block_nr << GC_BLOCK_SHIFT_COUNT;
+  }
+
+  if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING;
+
+	return;  
+}
+
+void  gc_space_tuner_reset(GC* gc)
+{
+  if(gc->collect_kind != MINOR_COLLECTION){
+    Space_Tuner* tuner = gc->tuner;
+    memset(tuner, 0, sizeof(Space_Tuner));
+  }
+}
+
+void gc_space_tuner_initialize(GC* gc)
+{
+    Space_Tuner* tuner = (Space_Tuner*)STD_MALLOC(sizeof(Space_Tuner));
+    assert(tuner);
+    memset(tuner, 0, sizeof(Space_Tuner));
+    tuner->kind = TRANS_NOTHING;
+    tuner->tuning_size = 0;
+    gc->tuner = tuner;
+}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _SPACE_TUNER_H_
+#define _SPACE_TUNER_H_
+
+#include "gc_common.h"
+#include "gc_space.h"
+
+//For_LOS_extend
+enum Transform_Kind {
+  TRANS_NOTHING = 0,
+  TRANS_FROM_LOS_TO_MOS = 0x1,
+  TRANS_FROM_MOS_TO_LOS = 0x2,
+};
+
+typedef struct Space_Tuner{
+    /*fixme: Now we use static value of GC_LOS_MIN_VARY_SIZE. */
+    unsigned int tuning_threshold;
+    Transform_Kind kind;
+    unsigned int tuning_size;
+}Space_Tuner;
+
+void gc_space_tune(GC* gc, unsigned int cause);
+void gc_space_tuner_reset(GC* gc);
+void gc_space_tuner_initialize(GC* gc);
+
+#endif /* _SPACE_TUNER_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Thu Jan 11 05:57:16 2007
@@ -27,22 +27,24 @@
 #include "../mark_sweep/lspace.h"
 #include "../gen/gen.h"
 
-/* reset objects_with_finalizer vector block of each mutator */
-void mutator_reset_objects_with_finalizer(Mutator *mutator)
+Boolean IGNORE_FINREF = TRUE;
+
+/* reset obj_with_fin vector block of each mutator */
+void mutator_reset_obj_with_fin(Mutator *mutator)
 {
-  mutator->objects_with_finalizer = finalizer_weakref_get_free_block();
+  mutator->obj_with_fin = finref_get_free_block();
 }
 
-void gc_set_objects_with_finalizer(GC *gc)
+void gc_set_obj_with_fin(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
 
-  /* put back last objects_with_finalizer block of each mutator */
+  /* put back last obj_with_fin block of each mutator */
   Mutator *mutator = gc->mutator_list;
   while(mutator){
-    pool_put_entry(objects_with_finalizer_pool, mutator->objects_with_finalizer);
-    mutator->objects_with_finalizer = NULL;
+    pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin);
+    mutator->obj_with_fin = NULL;
     mutator = mutator->next;
   }
   return;
@@ -51,23 +53,23 @@
 /* reset weak references vetctor block of each collector */
 void collector_reset_weakref_sets(Collector *collector)
 {
-  collector->softref_set = finalizer_weakref_get_free_block();
-  collector->weakref_set = finalizer_weakref_get_free_block();
-  collector->phanref_set= finalizer_weakref_get_free_block();
+  collector->softref_set = finref_get_free_block();
+  collector->weakref_set = finref_get_free_block();
+  collector->phanref_set= finref_get_free_block();
 }
 
-static void gc_set_weakref_sets(GC *gc)
+void gc_set_weakref_sets(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
   /* put back last weak references block of each collector */
   unsigned int num_active_collectors = gc->num_active_collectors;
   for(unsigned int i = 0; i < num_active_collectors; i++)
   {
     Collector* collector = gc->collectors[i];
-    pool_put_entry(metadata->softref_set_pool, collector->softref_set);
-    pool_put_entry(metadata->weakref_set_pool, collector->weakref_set);
-    pool_put_entry(metadata->phanref_set_pool, collector->phanref_set);
+    pool_put_entry(metadata->softref_pool, collector->softref_set);
+    pool_put_entry(metadata->weakref_pool, collector->weakref_set);
+    pool_put_entry(metadata->phanref_pool, collector->phanref_set);
     collector->softref_set = NULL;
     collector->weakref_set= NULL;
     collector->phanref_set= NULL;
@@ -76,26 +78,14 @@
 }
 
 
-extern Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj);
-static inline Boolean obj_is_dead_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+extern Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj);
+static inline Boolean obj_is_dead_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj)
 {
-  GC *gc = collector->gc;
-  Lspace *los = ((GC_Gen *)gc)->los;
-  
-  if(space_of_addr(gc, p_obj) != (Space *)los)
-    return !obj_is_marked_in_vt(p_obj);
-  else
-    return !lspace_object_is_marked(los, p_obj);
+  return !obj_is_marked_in_vt(p_obj);
 }
-static inline Boolean obj_is_dead_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline Boolean obj_is_dead_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj)
 {
-  GC *gc = collector->gc;
-  Lspace *los = ((GC_Gen *)gc)->los;
-  
-  if(space_of_addr(gc, p_obj) != (Space *)los)
-    return !obj_is_marked_in_vt(p_obj);
-  else
-    return !lspace_object_is_marked(los, p_obj);
+  return !obj_is_marked_in_vt(p_obj);
 }
 // clear the two least significant bits of p_obj first
 static inline Boolean obj_is_dead(Collector *collector, Partial_Reveal_Object *p_obj)
@@ -104,17 +94,17 @@
   
   assert(p_obj);
   if(gc->collect_kind == MINOR_COLLECTION){
-    if( gc_requires_barriers())
-      return obj_is_dead_in_minor_forward_collection(collector, p_obj);
+    if( gc_is_gen_mode())
+      return obj_is_dead_in_minor_forward_gc(collector, p_obj);
     else
-      return obj_is_dead_in_minor_copy_collection(collector, p_obj);
+      return obj_is_dead_in_minor_copy_gc(collector, p_obj);
   } else {
-    return obj_is_dead_in_major_collection(collector, p_obj);
+    return obj_is_dead_in_major_gc(collector, p_obj);
   }
 }
 
 
-static inline Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space)
+static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space)
 {
   if(!obj_belongs_to_space(p_obj, (Space*)space)) return FALSE;
   return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
@@ -124,8 +114,8 @@
   assert(!obj_is_dead(collector, p_obj));
   GC *gc = collector->gc;
   
-  if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
-    return fspace_object_to_be_forwarded(p_obj, collector->collect_space);
+  if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
+    return fspace_obj_to_be_forwarded(p_obj, collector->collect_space);
   
   Space *space = space_of_addr(gc, p_obj);
   return space->move_object;
@@ -134,11 +124,11 @@
 
 extern void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref);
 extern void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj);
-static inline void resurrect_obj_tree_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline void resurrect_obj_tree_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj)
 {
   resurrect_obj_tree_after_mark(collector, p_obj);
 }
-static inline void resurrect_obj_tree_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+static inline void resurrect_obj_tree_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj)
 {
   resurrect_obj_tree_after_mark(collector, p_obj);
 }
@@ -148,23 +138,23 @@
 {
   GC *gc = collector->gc;
   
-  if(!gc_requires_barriers() || !(gc->collect_kind == MINOR_COLLECTION))
+  if(!gc_is_gen_mode() || !(gc->collect_kind == MINOR_COLLECTION))
     collector_repset_add_entry(collector, p_ref);
   if(!obj_is_dead(collector, *p_ref)){
-    if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref))
-      *p_ref = obj_get_forwarding_pointer_in_vt(*p_ref);
+    if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref))
+      *p_ref = obj_get_fw_in_oi(*p_ref);
     return;
   }
   Partial_Reveal_Object* p_obj = *p_ref;
   assert(p_obj);
   
   if(gc->collect_kind == MINOR_COLLECTION){
-    if( gc_requires_barriers())
+    if( gc_is_gen_mode())
       resurrect_obj_tree_after_trace(collector, p_ref);
     else
-      resurrect_obj_tree_in_minor_copy_collection(collector, p_obj);
+      resurrect_obj_tree_in_minor_copy_gc(collector, p_obj);
   } else {
-    resurrect_obj_tree_in_major_collection(collector, p_obj);
+    resurrect_obj_tree_in_major_gc(collector, p_obj);
   }
 }
 
@@ -175,27 +165,27 @@
   GC *gc = collector->gc;
   
   assert(!collector->rep_set);
-  if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
+  if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
     return;
-  collector->rep_set = pool_get_entry(gc->metadata->free_set_pool);
+  collector->rep_set = free_set_pool_get_entry(gc->metadata);
 }
 /* called after loop of resurrect_obj_tree() */
 static inline void collector_put_repset(Collector *collector)
 {
   GC *gc = collector->gc;
   
-  if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
+  if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION)
     return;
   pool_put_entry(gc->metadata->collector_repset_pool, collector->rep_set);
   collector->rep_set = NULL;
 }
 
 
-void finalizer_weakref_repset_add_entry_from_pool(Collector *collector, Pool *pool)
+static void finref_add_repset_from_pool(Collector *collector, Pool *pool)
 {
   GC *gc = collector->gc;
   
-  finalizer_weakref_reset_repset(gc);
+  finref_reset_repset(gc);
 
   pool_iterator_init(pool);
   while(Vector_Block *block = pool_iterator_next(pool)){
@@ -206,32 +196,33 @@
       iter = vector_block_iterator_advance(block, iter);
 	  
       if(*p_ref && obj_need_move(collector, *p_ref))
-        finalizer_weakref_repset_add_entry(gc, p_ref);
+        finref_repset_add_entry(gc, p_ref);
     }
   }
-  finalizer_weakref_put_repset(gc);
+  finref_put_repset(gc);
 }
 
 
-static void process_objects_with_finalizer(Collector *collector)
+static void identify_finalizable_objects(Collector *collector)
 {
   GC *gc = collector->gc;
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
-  Pool *finalizable_objects_pool = metadata->finalizable_objects_pool;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
+  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
   
   gc_reset_finalizable_objects(gc);
-  pool_iterator_init(objects_with_finalizer_pool);
-  while(Vector_Block *block = pool_iterator_next(objects_with_finalizer_pool)){
+  pool_iterator_init(obj_with_fin_pool);
+  while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){
     unsigned int block_has_ref = 0;
     unsigned int *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
       if(!p_obj)
         continue;
       if(obj_is_dead(collector, p_obj)){
-        gc_finalizable_objects_add_entry(gc, p_obj);
-        *iter = NULL;
+        gc_add_finalizable_obj(gc, p_obj);
+        *p_ref = NULL;
       } else {
         ++block_has_ref;
       }
@@ -241,10 +232,10 @@
   }
   gc_put_finalizable_objects(gc);
   
-  collector_reset_repset(collector);
-  if(!finalizable_objects_pool_is_empty(gc)){
-    pool_iterator_init(finalizable_objects_pool);
-    while(Vector_Block *block = pool_iterator_next(finalizable_objects_pool)){
+  if(!finalizable_obj_pool_is_empty(gc)){
+    collector_reset_repset(collector);
+    pool_iterator_init(finalizable_obj_pool);
+    while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){
       unsigned int *iter = vector_block_iterator_init(block);
       while(!vector_block_iterator_end(block, iter)){
         assert(*iter);
@@ -253,20 +244,20 @@
       }
     }
     metadata->pending_finalizers = TRUE;
+    collector_put_repset(collector);
   }
-  collector_put_repset(collector);
   
-  finalizer_weakref_repset_add_entry_from_pool(collector, objects_with_finalizer_pool);
+  finref_add_repset_from_pool(collector, obj_with_fin_pool);
   /* fianlizable objects have been added to collector repset pool */
-  //finalizer_weakref_repset_add_entry_from_pool(collector, finalizable_objects_pool);
+  //finref_add_repset_from_pool(collector, finalizable_obj_pool);
 }
 
-static void post_process_finalizable_objects(GC *gc)
+static void put_finalizable_obj_to_vm(GC *gc)
 {
-  Pool *finalizable_objects_pool = gc->finalizer_weakref_metadata->finalizable_objects_pool;
-  Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+  Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
+  Pool *free_pool = gc->finref_metadata->free_pool;
   
-  while(Vector_Block *block = pool_get_entry(finalizable_objects_pool)){
+  while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       assert(*iter);
@@ -279,106 +270,149 @@
   }
 }
 
-static void process_soft_references(Collector *collector)
+static void update_referent_ignore_finref(Collector *collector, Pool *pool)
+{
+  GC *gc = collector->gc;
+  
+  while(Vector_Block *block = pool_get_entry(pool)){
+    unsigned int *iter = vector_block_iterator_init(block);
+    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
+      assert(p_obj);
+      Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+      Partial_Reveal_Object *p_referent = *p_referent_field;
+      
+      if(!p_referent){  // referent field has been cleared
+        *p_ref = NULL;
+        continue;
+      }
+      if(!obj_is_dead(collector, p_referent)){  // referent is alive
+        if(obj_need_move(collector, p_referent))
+          finref_repset_add_entry(gc, p_referent_field);
+        *p_ref = NULL;
+        continue;
+      }
+      *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */
+    }
+  }
+}
+
+void update_ref_ignore_finref(Collector *collector)
+{
+  GC *gc = collector->gc;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  
+  finref_reset_repset(gc);
+  update_referent_ignore_finref(collector, metadata->softref_pool);
+  update_referent_ignore_finref(collector, metadata->weakref_pool);
+  update_referent_ignore_finref(collector, metadata->phanref_pool);
+  finref_put_repset(gc);
+}
+
+static void identify_dead_softrefs(Collector *collector)
 {
   GC *gc = collector->gc;
   if(gc->collect_kind == MINOR_COLLECTION){
-    assert(softref_set_pool_is_empty(gc));
+    assert(softref_pool_is_empty(gc));
     return;
   }
   
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *softref_set_pool = metadata->softref_set_pool;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *softref_pool = metadata->softref_pool;
   
-  finalizer_weakref_reset_repset(gc);
-  pool_iterator_init(softref_set_pool);
-  while(Vector_Block *block = pool_iterator_next(softref_set_pool)){
+  finref_reset_repset(gc);
+  pool_iterator_init(softref_pool);
+  while(Vector_Block *block = pool_iterator_next(softref_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
       assert(p_obj);
       Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
       Partial_Reveal_Object *p_referent = *p_referent_field;
       
       if(!p_referent){  // referent field has been cleared
-        *iter = NULL;
+        *p_ref = NULL;
         continue;
       }
       if(!obj_is_dead(collector, p_referent)){  // referent is alive
         if(obj_need_move(collector, p_referent))
-          finalizer_weakref_repset_add_entry(gc, p_referent_field);
-        *iter = NULL;
+          finref_repset_add_entry(gc, p_referent_field);
+        *p_ref = NULL;
         continue;
       }
       *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */
     }
   }
-  finalizer_weakref_put_repset(gc);
+  finref_put_repset(gc);
   
-  finalizer_weakref_repset_add_entry_from_pool(collector, softref_set_pool);
+  finref_add_repset_from_pool(collector, softref_pool);
   return;
 }
 
-static void process_weak_references(Collector *collector)
+static void identify_dead_weakrefs(Collector *collector)
 {
   GC *gc = collector->gc;
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *weakref_set_pool = metadata->weakref_set_pool;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *weakref_pool = metadata->weakref_pool;
   
-  finalizer_weakref_reset_repset(gc);
-  pool_iterator_init(weakref_set_pool);
-  while(Vector_Block *block = pool_iterator_next(weakref_set_pool)){
+  finref_reset_repset(gc);
+  pool_iterator_init(weakref_pool);
+  while(Vector_Block *block = pool_iterator_next(weakref_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
       assert(p_obj);
       Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
       Partial_Reveal_Object *p_referent = *p_referent_field;
       
       if(!p_referent){  // referent field has been cleared
-        *iter = NULL;
+        *p_ref = NULL;
         continue;
       }
       if(!obj_is_dead(collector, p_referent)){  // referent is alive
         if(obj_need_move(collector, p_referent))
-          finalizer_weakref_repset_add_entry(gc, p_referent_field);
-        *iter = NULL;
+          finref_repset_add_entry(gc, p_referent_field);
+        *p_ref = NULL;
         continue;
       }
       *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */
     }
   }
-  finalizer_weakref_put_repset(gc);
+  finref_put_repset(gc);
   
-  finalizer_weakref_repset_add_entry_from_pool(collector, weakref_set_pool);
+  finref_add_repset_from_pool(collector, weakref_pool);
   return;
 }
 
-static void process_phantom_references(Collector *collector)
+static void identify_dead_phanrefs(Collector *collector)
 {
   GC *gc = collector->gc;
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *phanref_set_pool = metadata->phanref_set_pool;
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *phanref_pool = metadata->phanref_pool;
   
-  finalizer_weakref_reset_repset(gc);
+  finref_reset_repset(gc);
 //  collector_reset_repset(collector);
-  pool_iterator_init(phanref_set_pool);
-  while(Vector_Block *block = pool_iterator_next(phanref_set_pool)){
+  pool_iterator_init(phanref_pool);
+  while(Vector_Block *block = pool_iterator_next(phanref_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+      Partial_Reveal_Object *p_obj = *p_ref;
       assert(p_obj);
       Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
       Partial_Reveal_Object *p_referent = *p_referent_field;
       
       if(!p_referent){  // referent field has been cleared
-        *iter = NULL;
+        *p_ref = NULL;
         continue;
       }
       if(!obj_is_dead(collector, p_referent)){  // referent is alive
         if(obj_need_move(collector, p_referent))
-          finalizer_weakref_repset_add_entry(gc, p_referent_field);
-        *iter = NULL;
+          finref_repset_add_entry(gc, p_referent_field);
+        *p_ref = NULL;
         continue;
       }
       *p_referent_field = NULL;
@@ -392,15 +426,15 @@
     }
   }
 //  collector_put_repset(collector);
-  finalizer_weakref_put_repset(gc);
+  finref_put_repset(gc);
   
-  finalizer_weakref_repset_add_entry_from_pool(collector, phanref_set_pool);
+  finref_add_repset_from_pool(collector, phanref_pool);
   return;
 }
 
-static inline void post_process_special_reference_pool(GC *gc, Pool *reference_pool)
+static inline void put_dead_refs_to_vm(GC *gc, Pool *reference_pool)
 {
-  Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+  Pool *free_pool = gc->finref_metadata->free_pool;
   
   while(Vector_Block *block = pool_get_entry(reference_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
@@ -415,48 +449,48 @@
   }
 }
 
-static void post_process_special_references(GC *gc)
+static void put_dead_weak_refs_to_vm(GC *gc)
 {
-  if(softref_set_pool_is_empty(gc)
-      && weakref_set_pool_is_empty(gc)
-      && phanref_set_pool_is_empty(gc)){
-    gc_clear_special_reference_pools(gc);
+  if(softref_pool_is_empty(gc)
+      && weakref_pool_is_empty(gc)
+      && phanref_pool_is_empty(gc)){
+    gc_clear_weakref_pools(gc);
     return;
   }
   
-  gc->finalizer_weakref_metadata->pending_weak_references = TRUE;
+  gc->finref_metadata->pending_weakrefs = TRUE;
   
-  Pool *softref_set_pool = gc->finalizer_weakref_metadata->softref_set_pool;
-  Pool *weakref_set_pool = gc->finalizer_weakref_metadata->weakref_set_pool;
-  Pool *phanref_set_pool = gc->finalizer_weakref_metadata->phanref_set_pool;
-  Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+  Pool *softref_pool = gc->finref_metadata->softref_pool;
+  Pool *weakref_pool = gc->finref_metadata->weakref_pool;
+  Pool *phanref_pool = gc->finref_metadata->phanref_pool;
+  Pool *free_pool = gc->finref_metadata->free_pool;
   
-  post_process_special_reference_pool(gc, softref_set_pool);
-  post_process_special_reference_pool(gc, weakref_set_pool);
-  post_process_special_reference_pool(gc, phanref_set_pool);
+  put_dead_refs_to_vm(gc, softref_pool);
+  put_dead_refs_to_vm(gc, weakref_pool);
+  put_dead_refs_to_vm(gc, phanref_pool);
 }
 
-void collector_process_finalizer_weakref(Collector *collector)
+void collector_identify_finref(Collector *collector)
 {
   GC *gc = collector->gc;
   
   gc_set_weakref_sets(gc);
-  process_soft_references(collector);
-  process_weak_references(collector);
-  process_objects_with_finalizer(collector);
-  process_phantom_references(collector);
+  identify_dead_softrefs(collector);
+  identify_dead_weakrefs(collector);
+  identify_finalizable_objects(collector);
+  identify_dead_phanrefs(collector);
 }
 
-void gc_post_process_finalizer_weakref(GC *gc)
+void gc_put_finref_to_vm(GC *gc)
 {
-  post_process_special_references(gc);
-  post_process_finalizable_objects(gc);
+  put_dead_weak_refs_to_vm(gc);
+  put_finalizable_obj_to_vm(gc);
 }
 
-void process_objects_with_finalizer_on_exit(GC *gc)
+void put_all_fin_on_exit(GC *gc)
 {
-  Pool *objects_with_finalizer_pool = gc->finalizer_weakref_metadata->objects_with_finalizer_pool;
-  Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+  Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool;
+  Pool *free_pool = gc->finref_metadata->free_pool;
   
   vm_gc_lock_enum();
   /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer
@@ -465,9 +499,9 @@
    * allocating mem and adding the objects with finalizer to the pool
    */
   lock(gc->mutator_list_lock);
-  gc_set_objects_with_finalizer(gc);
+  gc_set_obj_with_fin(gc);
   unlock(gc->mutator_list_lock);
-  while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){
+  while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     while(!vector_block_iterator_end(block, iter)){
       Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
@@ -481,9 +515,9 @@
   vm_gc_unlock_enum();
 }
 
-void gc_update_finalizer_weakref_repointed_refs(GC* gc)
+void gc_update_finref_repointed_refs(GC* gc)
 {
-  Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata* metadata = gc->finref_metadata;
   Pool *repset_pool = metadata->repset_pool;
   
   /* NOTE:: this is destructive to the root sets. */
@@ -499,17 +533,10 @@
       /* For repset, this check is unnecessary, since all slots are repointed; otherwise
          they will not be recorded. For root set, it is possible to point to LOS or other
          non-moved space.  */
-#ifdef _DEBUG
-      if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){
-        assert(obj_is_forwarded_in_obj_info(p_obj));
-      } else
-        assert(obj_is_forwarded_in_vt(p_obj));
-#endif
       Partial_Reveal_Object* p_target_obj;
-      if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION )
-        p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
-      else
-        p_target_obj = obj_get_forwarding_pointer_in_vt(p_obj);
+      assert(obj_is_fw_in_oi(p_obj));
+      p_target_obj = obj_get_fw_in_oi(p_obj);
+
       *p_ref = p_target_obj;
     }
     vector_block_clear(root_set);
@@ -520,13 +547,13 @@
   return;
 }
 
-void gc_activate_finalizer_weakref_threads(GC *gc)
+void gc_activate_finref_threads(GC *gc)
 {
-  Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata* metadata = gc->finref_metadata;
   
-  if(metadata->pending_finalizers || metadata->pending_weak_references){
+  if(metadata->pending_finalizers || metadata->pending_weakrefs){
 	  metadata->pending_finalizers = FALSE;
-    metadata->pending_weak_references = FALSE;
+    metadata->pending_weakrefs = FALSE;
     vm_hint_finalize();
   }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h Thu Jan 11 05:57:16 2007
@@ -18,12 +18,16 @@
  * @author Li-Gang Wang, 2006/11/30
  */
 
-#ifndef _FINALIZER_WEAKREF_H_
-#define _FINALIZER_WEAKREF_H_
+#ifndef _FINREF_H_
+#define _FINREF_H_
+
+#define BUILD_IN_REFERENT
 
 #include "finalizer_weakref_metadata.h"
 #include "../thread/collector.h"
 
+extern Boolean IGNORE_FINREF;
+
 /* Phantom status: for future use
  * #define PHANTOM_REF_ENQUEUE_STATUS_MASK 0x3
  * #define PHANTOM_REF_ENQUEUED_MASK 0x1
@@ -64,13 +68,13 @@
       if(collect_kind==MINOR_COLLECTION)
         scan_slot(collector, p_referent_field);
       else
-        collector_softref_set_add_entry(collector, p_obj);
+        collector_add_softref(collector, p_obj);
       break;
     case WEAK_REFERENCE :
-      collector_weakref_set_add_entry(collector, p_obj);
+      collector_add_weakref(collector, p_obj);
       break;
     case PHANTOM_REFERENCE :
-      collector_phanref_set_add_entry(collector, p_obj);
+      collector_add_phanref(collector, p_obj);
       break;
     default :
       assert(0);
@@ -79,15 +83,17 @@
 }
 
 
-extern void mutator_reset_objects_with_finalizer(Mutator *mutator);
-extern void gc_set_objects_with_finalizer(GC *gc);
+extern void mutator_reset_obj_with_fin(Mutator *mutator);
+extern void gc_set_obj_with_fin(GC *gc);
 extern void collector_reset_weakref_sets(Collector *collector);
 
-extern void collector_process_finalizer_weakref(Collector *collector);
-extern void gc_post_process_finalizer_weakref(GC *gc);
-extern void process_objects_with_finalizer_on_exit(GC *gc);
+extern void gc_set_weakref_sets(GC *gc);
+extern void update_ref_ignore_finref(Collector *collector);
+extern void collector_identify_finref(Collector *collector);
+extern void gc_put_finref_to_vm(GC *gc);
+extern void put_all_fin_on_exit(GC *gc);
 
-extern void gc_update_finalizer_weakref_repointed_refs(GC* gc);
-extern void gc_activate_finalizer_weakref_threads(GC *gc);
+extern void gc_update_finref_repointed_refs(GC* gc);
+extern void gc_activate_finref_threads(GC *gc);
 
-#endif // _FINALIZER_WEAKREF_H_
+#endif // _FINREF_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Thu Jan 11 05:57:16 2007
@@ -28,168 +28,168 @@
 #define METADATA_BLOCK_SIZE_BIT_SHIFT 10
 #define METADATA_BLOCK_SIZE_BYTES (1<<METADATA_BLOCK_SIZE_BIT_SHIFT)
 
-static Finalizer_Weakref_Metadata finalizer_weakref_metadata;
+static Finref_Metadata finref_metadata;
 
 unsigned int get_gc_referent_offset(void)
 {
-  return finalizer_weakref_metadata.gc_referent_offset;
+  return finref_metadata.gc_referent_offset;
 }
 void set_gc_referent_offset(unsigned int offset)
 {
-  finalizer_weakref_metadata.gc_referent_offset = offset;
+  finref_metadata.gc_referent_offset = offset;
 }
 
-void gc_finalizer_weakref_metadata_initialize(GC *gc)
+void gc_finref_metadata_initialize(GC *gc)
 {
   void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES);
   memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES);
-  finalizer_weakref_metadata.next_segment_pos = 0;
-  finalizer_weakref_metadata.pool_segments[finalizer_weakref_metadata.next_segment_pos] = pool_segment;
-  ++finalizer_weakref_metadata.next_segment_pos;
-  
-  finalizer_weakref_metadata.free_pool = sync_pool_create();
-  finalizer_weakref_metadata.objects_with_finalizer_pool = sync_pool_create();
-  finalizer_weakref_metadata.finalizable_objects_pool = sync_pool_create();
-  finalizer_weakref_metadata.softref_set_pool = sync_pool_create();
-  finalizer_weakref_metadata.weakref_set_pool = sync_pool_create();
-  finalizer_weakref_metadata.phanref_set_pool = sync_pool_create();
-  finalizer_weakref_metadata.repset_pool = sync_pool_create();
+  finref_metadata.num_alloc_segs = 0;
+  finref_metadata.pool_segments[finref_metadata.num_alloc_segs] = pool_segment;
+  ++finref_metadata.num_alloc_segs;
+  
+  finref_metadata.free_pool = sync_pool_create();
+  finref_metadata.obj_with_fin_pool = sync_pool_create();
+  finref_metadata.finalizable_obj_pool = sync_pool_create();
+  finref_metadata.softref_pool = sync_pool_create();
+  finref_metadata.weakref_pool = sync_pool_create();
+  finref_metadata.phanref_pool = sync_pool_create();
+  finref_metadata.repset_pool = sync_pool_create();
   
-  finalizer_weakref_metadata.finalizable_objects = NULL;
-  finalizer_weakref_metadata.repset = NULL;
+  finref_metadata.finalizable_obj_set= NULL;
+  finref_metadata.repset = NULL;
   
   unsigned int num_blocks =  POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
   for(unsigned int i=0; i<num_blocks; i++){
     Vector_Block *block = (Vector_Block *)((unsigned int)pool_segment + i*METADATA_BLOCK_SIZE_BYTES);
     vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
     assert(vector_block_is_empty((Vector_Block *)block));
-    pool_put_entry(finalizer_weakref_metadata.free_pool, (void *)block);
+    pool_put_entry(finref_metadata.free_pool, (void *)block);
   }
   
-  finalizer_weakref_metadata.pending_finalizers = FALSE;
-  finalizer_weakref_metadata.pending_weak_references = FALSE;
-  finalizer_weakref_metadata.gc_referent_offset = 0;
+  finref_metadata.pending_finalizers = FALSE;
+  finref_metadata.pending_weakrefs = FALSE;
+  finref_metadata.gc_referent_offset = 0;
   
-  gc->finalizer_weakref_metadata = &finalizer_weakref_metadata;
+  gc->finref_metadata = &finref_metadata;
   return;
 }
 
-void gc_finalizer_weakref_metadata_destruct(GC *gc)
+void gc_finref_metadata_destruct(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
   sync_pool_destruct(metadata->free_pool);
-  sync_pool_destruct(metadata->objects_with_finalizer_pool);
-  sync_pool_destruct(metadata->finalizable_objects_pool);
-  sync_pool_destruct(metadata->softref_set_pool);
-  sync_pool_destruct(metadata->weakref_set_pool);
-  sync_pool_destruct(metadata->phanref_set_pool);
+  sync_pool_destruct(metadata->obj_with_fin_pool);
+  sync_pool_destruct(metadata->finalizable_obj_pool);
+  sync_pool_destruct(metadata->softref_pool);
+  sync_pool_destruct(metadata->weakref_pool);
+  sync_pool_destruct(metadata->phanref_pool);
   sync_pool_destruct(metadata->repset_pool);
   
-  metadata->finalizable_objects = NULL;
+  metadata->finalizable_obj_set = NULL;
   metadata->repset = NULL;
   
-  for(unsigned int i=0; i<metadata->next_segment_pos; i++){
+  for(unsigned int i=0; i<metadata->num_alloc_segs; i++){
     assert(metadata->pool_segments[i]);
     STD_FREE(metadata->pool_segments[i]);
   }
   
-  gc->finalizer_weakref_metadata = NULL;
+  gc->finref_metadata = NULL;
 }
 
-void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc)
+void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
-  assert(pool_is_empty(metadata->finalizable_objects_pool));
-  assert(pool_is_empty(metadata->softref_set_pool));
-  assert(pool_is_empty(metadata->weakref_set_pool));
-  assert(pool_is_empty(metadata->phanref_set_pool));
+  assert(pool_is_empty(metadata->finalizable_obj_pool));
+  assert(pool_is_empty(metadata->softref_pool));
+  assert(pool_is_empty(metadata->weakref_pool));
+  assert(pool_is_empty(metadata->phanref_pool));
   assert(pool_is_empty(metadata->repset_pool));
-  assert(metadata->finalizable_objects == NULL);
+  assert(metadata->finalizable_obj_set == NULL);
   assert(metadata->repset == NULL);
   
   return;
 }
 
-void gc_reset_finalizer_weakref_metadata(GC *gc)
+void gc_reset_finref_metadata(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
-  Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
-  Pool *finalizable_objects_pool = metadata->finalizable_objects_pool;
-  
-  assert(pool_is_empty(finalizable_objects_pool));
-  assert(pool_is_empty(metadata->softref_set_pool));
-  assert(pool_is_empty(metadata->weakref_set_pool));
-  assert(pool_is_empty(metadata->phanref_set_pool));
+  Finref_Metadata *metadata = gc->finref_metadata;
+  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
+  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
+  
+  assert(pool_is_empty(finalizable_obj_pool));
+  assert(pool_is_empty(metadata->softref_pool));
+  assert(pool_is_empty(metadata->weakref_pool));
+  assert(pool_is_empty(metadata->phanref_pool));
   assert(pool_is_empty(metadata->repset_pool));
-  assert(metadata->finalizable_objects == NULL);
+  assert(metadata->finalizable_obj_set == NULL);
   assert(metadata->repset == NULL);
   
-  while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){
+  while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){
     unsigned int *iter = vector_block_iterator_init(block);
     if(vector_block_iterator_end(block, iter)){
       vector_block_clear(block);
       pool_put_entry(metadata->free_pool, block);
     } else {
-      pool_put_entry(finalizable_objects_pool, block);
+      pool_put_entry(finalizable_obj_pool, block);
     }
   }
-  assert(pool_is_empty(objects_with_finalizer_pool));
-  metadata->objects_with_finalizer_pool = finalizable_objects_pool;
-  metadata->finalizable_objects_pool = objects_with_finalizer_pool;
+  assert(pool_is_empty(obj_with_fin_pool));
+  metadata->obj_with_fin_pool = finalizable_obj_pool;
+  metadata->finalizable_obj_pool = obj_with_fin_pool;
 }
 
-/* called when there is no Vector_Block in finalizer_weakref_metadata->free_pool
+/* called when there is no Vector_Block in finref_metadata->free_pool
  * extend the pool by a pool segment
  */
-static void gc_finalizer_weakref_metadata_extend(void)
+static void finref_metadata_extend(void)
 {
-  Finalizer_Weakref_Metadata metadata = finalizer_weakref_metadata;
+  Finref_Metadata *metadata = &finref_metadata;
   
-  unsigned int segment_pos = metadata.next_segment_pos;
-  while(segment_pos < POOL_SEGMENT_NUM){
-    unsigned int next_segment_pos = segment_pos + 1;
-    unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata.next_segment_pos, next_segment_pos, segment_pos);
-    if(temp == segment_pos)
+  unsigned int pos = metadata->num_alloc_segs;
+  while(pos < POOL_SEGMENT_NUM){
+    unsigned int next_pos = pos + 1;
+    unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata->num_alloc_segs, next_pos, pos);
+    if(temp == pos)
       break;
-    segment_pos = metadata.next_segment_pos;
+    pos = metadata->num_alloc_segs;
   }
-  if(segment_pos > POOL_SEGMENT_NUM)
+  if(pos > POOL_SEGMENT_NUM)
     return;
   
   void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES);
   memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES);
-  metadata.pool_segments[segment_pos] = pool_segment;
+  metadata->pool_segments[pos] = pool_segment;
   
   unsigned int num_blocks =  POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
   for(unsigned int i=0; i<num_blocks; i++){
     Vector_Block *block = (Vector_Block *)((unsigned int)pool_segment + i*METADATA_BLOCK_SIZE_BYTES);
     vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
     assert(vector_block_is_empty((Vector_Block *)block));
-    pool_put_entry(metadata.free_pool, (void *)block);
+    pool_put_entry(metadata->free_pool, (void *)block);
   }
   
   return;
 }
 
-Vector_Block *finalizer_weakref_get_free_block(void)
+Vector_Block *finref_get_free_block(void)
 {
   Vector_Block *block;
   
-  while(!(block = pool_get_entry(finalizer_weakref_metadata.free_pool)))
-    gc_finalizer_weakref_metadata_extend();
+  while(!(block = pool_get_entry(finref_metadata.free_pool)))
+    finref_metadata_extend();
   return block;
 }
 
-/* called when GC completes and there is no Vector_Block in the last five pools of gc->finalizer_weakref_metadata
+/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata
  * shrink the free pool by half
  */
-void gc_finalizer_weakref_metadata_shrink(GC *gc)
+void finref_metadata_shrink(GC *gc)
 {
 }
 
-static inline void finalizer_weakref_metadata_general_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref)
+static inline void finref_metadata_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref)
 {
   assert(vector_block_in_use);
   assert(ref);
@@ -200,41 +200,41 @@
   if(!vector_block_is_full(block)) return;
   
   pool_put_entry(pool, block);
-  vector_block_in_use = finalizer_weakref_get_free_block();
+  vector_block_in_use = finref_get_free_block();
 }
 
-void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref)
+void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref)
 {
-  finalizer_weakref_metadata_general_add_entry(mutator->objects_with_finalizer, finalizer_weakref_metadata.objects_with_finalizer_pool, ref);
+  finref_metadata_add_entry(mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref);
 }
 
-void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref)
+void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref)
 {
-  finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.finalizable_objects, finalizer_weakref_metadata.finalizable_objects_pool, ref);
+  finref_metadata_add_entry(finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref);
 }
 
-void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finalizer_weakref_metadata_general_add_entry(collector->softref_set, finalizer_weakref_metadata.softref_set_pool, ref);
+  finref_metadata_add_entry(collector->softref_set, finref_metadata.softref_pool, ref);
 }
 
-void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finalizer_weakref_metadata_general_add_entry(collector->weakref_set, finalizer_weakref_metadata.weakref_set_pool, ref);
+  finref_metadata_add_entry(collector->weakref_set, finref_metadata.weakref_pool, ref);
 }
 
-void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref)
 {
-  finalizer_weakref_metadata_general_add_entry(collector->phanref_set, finalizer_weakref_metadata.phanref_set_pool, ref);
+  finref_metadata_add_entry(collector->phanref_set, finref_metadata.phanref_pool, ref);
 }
 
-void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref)
+void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref)
 {
   assert(*p_ref);
-  finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.repset, finalizer_weakref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref);
+  finref_metadata_add_entry(finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref);
 }
 
-static inline Boolean pool_has_no_reference(Pool *pool)
+static inline Boolean pool_has_no_ref(Pool *pool)
 {
   if(pool_is_empty(pool))
     return TRUE;
@@ -250,48 +250,48 @@
   return TRUE;
 }
 
-Boolean objects_with_finalizer_pool_is_empty(GC *gc)
+Boolean obj_with_fin_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->objects_with_finalizer_pool);
+  return pool_has_no_ref(gc->finref_metadata->obj_with_fin_pool);
 }
 
-Boolean finalizable_objects_pool_is_empty(GC *gc)
+Boolean finalizable_obj_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->finalizable_objects_pool);
+  return pool_has_no_ref(gc->finref_metadata->finalizable_obj_pool);
 }
 
-Boolean softref_set_pool_is_empty(GC *gc)
+Boolean softref_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->softref_set_pool);
+  return pool_has_no_ref(gc->finref_metadata->softref_pool);
 }
 
-Boolean weakref_set_pool_is_empty(GC *gc)
+Boolean weakref_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->weakref_set_pool);
+  return pool_has_no_ref(gc->finref_metadata->weakref_pool);
 }
 
-Boolean phanref_set_pool_is_empty(GC *gc)
+Boolean phanref_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->phanref_set_pool);
+  return pool_has_no_ref(gc->finref_metadata->phanref_pool);
 }
 
-Boolean finalizer_weakref_repset_pool_is_empty(GC *gc)
+Boolean finref_repset_pool_is_empty(GC *gc)
 {
-  return pool_has_no_reference(gc->finalizer_weakref_metadata->repset_pool);
+  return pool_has_no_ref(gc->finref_metadata->repset_pool);
 }
 
-static inline void finalizer_weakref_metadata_clear_pool(Pool *pool)
+static inline void finref_metadata_clear_pool(Pool *pool)
 {
   while(Vector_Block* block = pool_get_entry(pool))
   {
     vector_block_clear(block);
-    pool_put_entry(finalizer_weakref_metadata.free_pool, block);
+    pool_put_entry(finref_metadata.free_pool, block);
   }
 }
 
-void gc_clear_special_reference_pools(GC *gc)
+void gc_clear_weakref_pools(GC *gc)
 {
-  finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->softref_set_pool);
-  finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->weakref_set_pool);
-  finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->phanref_set_pool);
+  finref_metadata_clear_pool(gc->finref_metadata->softref_pool);
+  finref_metadata_clear_pool(gc->finref_metadata->weakref_pool);
+  finref_metadata_clear_pool(gc->finref_metadata->phanref_pool);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h Thu Jan 11 05:57:16 2007
@@ -18,8 +18,8 @@
  * @author Li-Gang Wang, 2006/11/29
  */
 
-#ifndef _FINALIZER_WEAKREF_METADATA_H_
-#define _FINALIZER_WEAKREF_METADATA_H_
+#ifndef _FINREF_METADATA_H_
+#define _FINREF_METADATA_H_
 
 #include "../common/gc_common.h"
 #include "../utils/vector_block.h"
@@ -27,90 +27,89 @@
 
 #define POOL_SEGMENT_NUM 256
 
-typedef struct Finalizer_Weakref_Metadata{
+typedef struct Finref_Metadata{
   void *pool_segments[POOL_SEGMENT_NUM];  // malloced free pool segments' addresses array
-  unsigned int next_segment_pos;          // next available position in pool_segments array
+  unsigned int num_alloc_segs;              // next available position in pool_segments array
   
   Pool *free_pool;                        // list of free buffers for the five pools below
   
-  Pool *objects_with_finalizer_pool;      // list of objects that have finalizer;
+  Pool *obj_with_fin_pool;                // list of objects that have finalizer;
                                           // these objects are added in when they are allocated
-  Pool *finalizable_objects_pool;         // temporary buffer for finalizable objects identified during one single GC
+  Pool *finalizable_obj_pool;             // temporary buffer for finalizable objects identified during one single GC
   
-  Pool *softref_set_pool;                 // temporary buffer for soft references identified during one single GC
-  Pool *weakref_set_pool;                 // temporary buffer for weak references identified during one single GC
-  Pool *phanref_set_pool;                 // temporary buffer for phantom references identified during one single GC
+  Pool *softref_pool;                     // temporary buffer for soft references identified during one single GC
+  Pool *weakref_pool;                     // temporary buffer for weak references identified during one single GC
+  Pool *phanref_pool;                     // temporary buffer for phantom references identified during one single GC
   
   Pool *repset_pool;                      // repointed reference slot sets
   
-  Vector_Block *finalizable_objects;      // buffer for finalizable_objects_pool
+  Vector_Block *finalizable_obj_set;      // buffer for finalizable_objects_pool
   Vector_Block *repset;                   // buffer for repset_pool
   
   Boolean pending_finalizers;             // there are objects waiting to be finalized
-  Boolean pending_weak_references;        // there are weak references waiting to be enqueued
+  Boolean pending_weakrefs;               // there are weak references waiting to be enqueued
   
   unsigned int gc_referent_offset;        // the referent field's offset in Reference Class
-}Finalizer_Weakref_Metadata;
+}Finref_Metadata;
 
 extern unsigned int get_gc_referent_offset(void);
 extern void set_gc_referent_offset(unsigned int offset);
 
-extern void gc_finalizer_weakref_metadata_initialize(GC *gc);
-extern void gc_finalizer_weakref_metadata_destruct(GC *gc);
-extern void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc);
-extern void gc_reset_finalizer_weakref_metadata(GC *gc);
-extern Vector_Block *finalizer_weakref_get_free_block(void);
-extern void gc_finalizer_weakref_metadata_shrink(GC *gc);
-
-extern void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref);
-extern void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref);
-extern void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
-extern void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
-extern void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
-extern void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref);
-
-extern Boolean objects_with_finalizer_pool_is_empty(GC *gc);
-extern Boolean finalizable_objects_pool_is_empty(GC *gc);
-extern Boolean softref_set_pool_is_empty(GC *gc);
-extern Boolean weakref_set_pool_is_empty(GC *gc);
-extern Boolean phanref_set_pool_is_empty(GC *gc);
-extern Boolean finalizer_weakref_repset_pool_is_empty(GC *gc);
+extern void gc_finref_metadata_initialize(GC *gc);
+extern void gc_finref_metadata_destruct(GC *gc);
+extern void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc);
+extern void gc_reset_finref_metadata(GC *gc);
+extern Vector_Block *finref_get_free_block(void);
+
+extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref);
+extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref);
+extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref);
+extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref);
+extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref);
+extern void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref);
+
+extern Boolean obj_with_fin_pool_is_empty(GC *gc);
+extern Boolean finalizable_obj_pool_is_empty(GC *gc);
+extern Boolean softref_pool_is_empty(GC *gc);
+extern Boolean weakref_pool_is_empty(GC *gc);
+extern Boolean phanref_pool_is_empty(GC *gc);
+extern Boolean finref_repset_pool_is_empty(GC *gc);
 
-extern void gc_clear_special_reference_pools(GC *gc);
+extern void gc_clear_weakref_pools(GC *gc);
 
 
 /* called before loop of recording finalizable objects */
 inline void gc_reset_finalizable_objects(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
-  assert(!metadata->finalizable_objects);
-  metadata->finalizable_objects = pool_get_entry(metadata->free_pool);
+  assert(!metadata->finalizable_obj_set);
+  metadata->finalizable_obj_set = pool_get_entry(metadata->free_pool);
 }
 /* called after loop of recording finalizable objects */
 inline void gc_put_finalizable_objects(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
-  pool_put_entry(metadata->finalizable_objects_pool, metadata->finalizable_objects);
-  metadata->finalizable_objects = NULL;
+  pool_put_entry(metadata->finalizable_obj_pool, metadata->finalizable_obj_set);
+  metadata->finalizable_obj_set = NULL;
 }
 
 /* called before loop of recording repointed reference */
-inline void finalizer_weakref_reset_repset(GC *gc)
+inline void finref_reset_repset(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
   assert(!metadata->repset);
   metadata->repset = pool_get_entry(metadata->free_pool);
 }
 /* called after loop of recording repointed reference */
-inline void finalizer_weakref_put_repset(GC *gc)
+inline void finref_put_repset(GC *gc)
 {
-  Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+  Finref_Metadata *metadata = gc->finref_metadata;
   
   pool_put_entry(metadata->repset_pool, metadata->repset);
   metadata->repset = NULL;
 }
 
-#endif // _FINALIZER_WEAKREF_METADATA_H_
+#endif // _FINREF_METADATA_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp Thu Jan 11 05:57:16 2007
@@ -19,34 +19,33 @@
  */
 
 #include "../gen/gen.h"
-
 #include "../thread/mutator.h"
+#include "gc_for_barrier.h"
 
 /* All the write barrier interfaces need cleanup */
 
-Boolean NEED_BARRIER = TRUE;
-
-Boolean gc_requires_barriers() 
-{   return NEED_BARRIER; }
+Boolean gen_mode;
 
 /* The implementations are only temporary */
 static void gc_slot_write_barrier(Managed_Object_Handle *p_slot, 
                       Managed_Object_Handle p_target) 
 {
-  Mutator *mutator = (Mutator *)gc_get_tls();
-  GC_Gen* gc = (GC_Gen*)mutator->gc;
-  if( address_belongs_to_nursery((void *)p_target, gc) && 
-       !address_belongs_to_nursery((void *)p_slot, gc)) 
-  {
+  if(p_target >= nos_boundary && p_slot < nos_boundary){
+
+    Mutator *mutator = (Mutator *)gc_get_tls();
+    assert( addr_belongs_to_nos(p_target) && !addr_belongs_to_nos(p_slot)); 
+            
     mutator_remset_add_entry(mutator, (Partial_Reveal_Object**)p_slot);
   }
+  return;
 }
 
 static void gc_object_write_barrier(Managed_Object_Handle p_object) 
 {
+  
+  if( addr_belongs_to_nos(p_object)) return;
+
   Mutator *mutator = (Mutator *)gc_get_tls();
-  GC_Gen* gc = (GC_Gen*)mutator->gc;
-  if( address_belongs_to_nursery((void *)p_object, gc)) return;
   
   Partial_Reveal_Object **p_slot; 
   /* scan array object */
@@ -57,7 +56,7 @@
     int32 array_length = vector_get_length((Vector_Handle) array);
     for (int i = 0; i < array_length; i++) {
       p_slot = (Partial_Reveal_Object **)vector_get_element_address_ref((Vector_Handle) array, i);
-      if( *p_slot != NULL && address_belongs_to_nursery((void *)*p_slot, gc)){
+      if( *p_slot != NULL && addr_belongs_to_nos(*p_slot)){
         mutator_remset_add_entry(mutator, p_slot);
       }
     }   
@@ -70,7 +69,7 @@
   while (true) {
     p_slot = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
     if (p_slot == NULL) break;  
-    if( address_belongs_to_nursery((void *)*p_slot, gc)){
+    if( addr_belongs_to_nos(*p_slot)){
       mutator_remset_add_entry(mutator, p_slot);
     }
     offset_scanner = offset_next_ref(offset_scanner);
@@ -81,7 +80,7 @@
 
 void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
 {
-  if( !NEED_BARRIER ) return;
+  if( !gc_is_gen_mode() ) return;
   if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){
     /* for array copy and object clone */
     gc_object_write_barrier(p_obj_written); 
@@ -97,7 +96,7 @@
 {  
   *p_slot = p_target;
   
-  if( !NEED_BARRIER ) return;
+  if( !gc_is_gen_mode() ) return;
   gc_slot_write_barrier(p_slot, p_target); 
 }
 

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _GC_FOR_BARRIER_H_
+#define _GC_FOR_BARRIER_H_
+
+#include "../jni/java_support.h"
+
+extern Boolean gen_mode;
+
+inline Boolean gc_is_gen_mode()
+{  return gen_mode; }
+
+inline void gc_enable_gen_mode()
+{  
+  gen_mode = TRUE;
+  HelperClass_set_GenMode(TRUE);
+}
+
+inline void gc_disable_gen_mode()
+{  
+  gen_mode = FALSE; 
+  HelperClass_set_GenMode(FALSE);
+}
+
+inline void gc_set_gen_mode(Boolean status)
+{
+  gen_mode = status; 
+  HelperClass_set_GenMode(status);   
+}
+
+#endif /* _GC_FOR_BARRIER_H_ */
+



Mime
View raw message