harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r495225 [5/5] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen: javasrc/org/apache/harmony/drlvm/gc_gen/ src/common/ src/finalizer_weakref/ src/gen/ src/jni/ src/mark_compact/ src/mark_sweep/ src/thread/ src/trace_forward/ src/utils/ src/verify/
Date Thu, 11 Jan 2007 13:57:19 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp Thu Jan 11 05:57:16 2007
@@ -22,13 +22,7 @@
 
 static Boolean fspace_alloc_block(Fspace* fspace, Allocator* allocator)
 {
-  Block_Header* alloc_block = (Block_Header* )allocator->alloc_block;
-  /* put back the used block */
-  if(alloc_block != NULL){ /* it is NULL at first time */
-    assert(alloc_block->status == BLOCK_IN_USE);
-    alloc_block->status = BLOCK_USED;
-    alloc_block->free = allocator->free;
-  }
+  alloc_context_reset(allocator);
 
   /* now try to get a new block */
   unsigned int old_free_idx = fspace->free_block_idx;
@@ -41,17 +35,30 @@
       continue;
     }
     /* ok, got one */
-    alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]);
+    Block_Header* alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]);
     assert(alloc_block->status == BLOCK_FREE);
     alloc_block->status = BLOCK_IN_USE;
-    fspace->num_used_blocks++;
-    memset(alloc_block->free, 0, GC_BLOCK_BODY_SIZE_BYTES);
     
     /* set allocation context */
-    allocator->free = alloc_block->free;
+    void* new_free = alloc_block->free;
+    allocator->free = new_free;
+
+#ifndef ALLOC_ZEROING
+
     allocator->ceiling = alloc_block->ceiling;
+    memset(new_free, 0, GC_BLOCK_BODY_SIZE_BYTES);
+
+#else
+    /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */
+    unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES;
+    allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size);
+    memset(new_free, 0, zeroing_size);
+
+#endif /* #ifndef ALLOC_ZEROING */
+
+    allocator->end = alloc_block->ceiling;
     allocator->alloc_block = (Block*)alloc_block; 
-    
+        
     return TRUE;
   }
 
@@ -73,7 +80,7 @@
   while( !fspace_alloc_block(fspace, allocator)){
     vm_gc_lock_enum();
     /* after holding lock, try if other thread collected already */
-    if ( !fspace_has_free_block(fspace) ) {  
+    if ( !space_has_free_block((Blocked_Space*)fspace) ) {  
       gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); 
     }    
     vm_gc_unlock_enum();  

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp Thu Jan 11 05:57:16 2007
@@ -1,224 +0,0 @@
-/*
- *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "fspace.h"
-#include "../mark_compact/mspace.h"
-#include "../mark_sweep/lspace.h"
-#include "../thread/collector.h"
-#include "../finalizer_weakref/finalizer_weakref.h"
-
-static volatile Block_Header* current_copy_block;
-static volatile Block_Header* current_target_block;
-
-static Block_Header* fspace_get_first_copy_block(Fspace* fspace)
-{  return (Block_Header*)fspace->blocks; }
-
-static Block_Header* fspace_get_next_copy_block(Fspace* fspace)
-{  
-  /* FIXME::FIXME:: this only works for full space copying */
-  Block_Header* cur_copy_block = (Block_Header*)current_copy_block;
-  
-  while(cur_copy_block != NULL){
-    Block_Header* next_copy_block = current_copy_block->next;
-
-    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&current_copy_block, next_copy_block, cur_copy_block);
-    if(temp == cur_copy_block)
-      return cur_copy_block;
-      
-    cur_copy_block = (Block_Header*)current_copy_block;
-  }
-  /* run out fspace blocks for copying */
-  return NULL;
-}
-
-
-/* copying of fspace is only for MAJOR_COLLECTION or non-generational partial copy collection */
-static Block_Header* mspace_get_first_target_block_for_nos(Mspace* mspace)
-{  
-  return (Block_Header*)&mspace->blocks[mspace->free_block_idx-mspace->first_block_idx];
-}
-
-static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace)
-{ 
-  Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
-  Block_Header* cur_target_block = (Block_Header*)current_target_block;
-  Block_Header* next_target_block = current_target_block->next;
-  
-  while(cur_target_block < mspace_heap_end){
-    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&current_target_block, next_target_block, cur_target_block);
-    if(temp == cur_target_block)
-      return cur_target_block;
-      
-    cur_target_block = (Block_Header*)current_target_block;
-    next_target_block = current_target_block->next;     
-  }
-  /* mos is always able to hold nos in minor collection */
-  assert(0);
-  return NULL;
-}
-
-struct GC_Gen;
-Space* gc_get_mos(GC_Gen* gc);
-
-Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace)
-{  
-  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)collector->gc);
-  Block_Header* dest_block = mspace_get_next_target_block_for_nos(mspace);    
-  Block_Header* curr_block = fspace_get_next_copy_block(fspace);
-
-  assert(dest_block->status == BLOCK_FREE);
-  dest_block->status = BLOCK_USED;
-  void* dest_addr = GC_BLOCK_BODY(dest_block);
-  
-  while( curr_block ){
-    unsigned int mark_bit_idx;
-    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
-    
-    while( p_obj ){
-      assert( obj_is_marked_in_vt(p_obj));
-            
-      unsigned int obj_size = vm_object_size(p_obj);
-      
-      if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
-        dest_block->free = dest_addr;
-        dest_block = mspace_get_next_target_block_for_nos(mspace);
-        if(dest_block == NULL) return FALSE;
-        assert(dest_block->status == BLOCK_FREE);
-        dest_block->status = BLOCK_USED;
-        dest_addr = GC_BLOCK_BODY(dest_block);
-      }
-      assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
-      
-      Obj_Info_Type obj_info = get_obj_info(p_obj);
-      if( obj_info != 0 ) {
-        collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
-      }
-      set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
-
-      /* FIXME: should use alloc to handle alignment requirement */
-      dest_addr = (void *) WORD_SIZE_ROUND_UP((unsigned int) dest_addr + obj_size);
-      p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
-  
-    }
-    curr_block = fspace_get_next_copy_block(fspace);
-  }
-    
-  return TRUE;
-}   
-
-#include "../verify/verify_live_heap.h"
-
-void fspace_copy_collect(Collector* collector, Fspace* fspace) 
-{  
-  Block_Header* curr_block = fspace_get_next_copy_block(fspace);
-  
-  while( curr_block ){
-    unsigned int mark_bit_idx;
-    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
-    
-    while( p_obj ){
-      assert( obj_is_marked_in_vt(p_obj));
-      obj_unmark_in_vt(p_obj);
-      
-      unsigned int obj_size = vm_object_size(p_obj);
-      Partial_Reveal_Object *p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
-      memmove(p_target_obj, p_obj, obj_size);
-
-      if (verify_live_heap)
-        /* we forwarded it, we need remember it for verification */
-        event_collector_move_obj(p_obj, p_target_obj, collector);
-
-      set_obj_info(p_target_obj, 0);
- 
-      p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);  
-    }
-        
-    curr_block = fspace_get_next_copy_block(fspace);
-  }
-    
-  return;
-}
-
-void gc_update_repointed_refs(Collector* collector);
-
-static volatile unsigned int num_marking_collectors = 0;
-static volatile unsigned int num_installing_collectors = 0;
-
-void mark_copy_fspace(Collector* collector) 
-{  
-  GC* gc = collector->gc;
-  Fspace* fspace = (Fspace*)collector->collect_space;
-  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
-
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  
-  /* Pass 1: mark all live objects in heap, and save all the slots that 
-             have references  that are going to be repointed */
-  atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
-             
-  mark_scan_heap(collector);
-
-  unsigned int old_num = atomic_inc32(&num_marking_collectors);
-  if( ++old_num == num_active_collectors ){
-    /* world for single thread, e.g., verification of last phase, and preparation of next phase */
-    current_copy_block = fspace_get_first_copy_block(fspace);
-    current_target_block = mspace_get_first_target_block_for_nos(mspace);    
-    
-    collector_process_finalizer_weakref(collector);
-    
-    /* let other collectors go */
-    num_marking_collectors++; 
-  }
-  
-  while(num_marking_collectors != num_active_collectors + 1);
-
-  /* Pass 2: assign each live fspace object a new location */
-  atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
-
-  fspace_compute_object_target(collector, fspace);  
-
-  old_num = atomic_inc32(&num_installing_collectors);
-  if( ++old_num == num_active_collectors){
-    /* nothing to do in this single thread region */
-    mspace->free_block_idx = current_target_block->block_idx;
-    num_installing_collectors++; 
-  }
-  
-  while(num_installing_collectors != num_active_collectors + 1);
-
-  /* FIXME:: temporary. let only one thread go forward */
-  if( collector->thread_handle != 0 ) return;
-  
-  gc_update_repointed_refs(collector);
-  
-  gc_post_process_finalizer_weakref(gc);
-
-  /* FIXME:: Pass 2 and 3 can be merged into one pass */
-  /* Pass 3: copy live fspace object to new location */
-  current_copy_block = fspace_get_first_copy_block(fspace);
-  fspace_copy_collect(collector, fspace);
-          
-  /* FIXME:: should be collector_restore_obj_info(collector) */
-  gc_restore_obj_info(gc);
-  
-  reset_fspace_for_allocation(fspace);  
-    
-  return;
-}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp Thu Jan 11 05:57:16 2007
@@ -1,302 +0,0 @@
-
-/*
- *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "fspace.h"
-#include "../thread/collector.h"
-#include "../common/gc_metadata.h"
-#include "../finalizer_weakref/finalizer_weakref.h"
-
-static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
-{
-  assert(obj_belongs_to_space(p_obj, (Space*)fspace));  
-  return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
-}
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
-{
-  Partial_Reveal_Object *p_obj = *p_ref;
-  if (p_obj == NULL) return;  
-    
-  /* the slot can be in tspace or fspace, we don't care.
-     we care only if the reference in the slot is pointing to fspace */
-  if (obj_belongs_to_space(p_obj, collector->collect_space))
-    collector_tracestack_push(collector, p_ref); 
-
-  return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
-{
-  if (!object_has_ref_field(p_obj)) return;
-    
-  void *slot;
-
-  /* scan array object */
-  if (object_is_array(p_obj)) {
-    Partial_Reveal_Object* array = p_obj;
-    assert(!obj_is_primitive_array(array));
-
-    int32 array_length = vector_get_length((Vector_Handle) array);        
-    for (int i = 0; i < array_length; i++) {
-      slot = vector_get_element_address_ref((Vector_Handle) array, i);
-      scan_slot(collector, (Partial_Reveal_Object **)slot);
-    }   
-    return;
-  }
-
-  /* scan non-array object */
-  int *offset_scanner = init_object_scanner(p_obj);
-  while (true) {
-    slot = offset_get_ref(offset_scanner, p_obj);
-    if (slot == NULL) break;
-  
-    scan_slot(collector, (Partial_Reveal_Object **)slot);
-    offset_scanner = offset_next_ref(offset_scanner);
-  }
-
-  scan_weak_reference(collector, p_obj, scan_slot);
-  
-  return;
-}
-
-/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, 
-   since only slot which points to object in fspace could be added into TraceStack.
-   The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
-   We will simply return for that case. It might be forwarded due to:
-    1. two difference slots containing same reference; 
-    2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
-   The same object can be traced by the thread itself, or by other thread.
-*/
-
-#include "../verify/verify_live_heap.h"
-
-static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
-{
-  Space* space = collector->collect_space; 
-  GC* gc = collector->gc;
-  Partial_Reveal_Object *p_obj = *p_ref;
-
-  if(!obj_belongs_to_space(p_obj, space)) return; 
-
-  /* Fastpath: object has already been forwarded, update the ref slot */
-  if(obj_is_forwarded_in_vt(p_obj)) {
-    *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
-    return;
-  }
-
-  /* only mark the objects that will remain in fspace */
-  if(!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
-    assert(!obj_is_forwarded_in_vt(p_obj));
-    /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root */
-    if( !address_belongs_to_space(p_ref, space) && address_belongs_to_gc_heap(p_ref, gc))
-      collector_remset_add_entry(collector, p_ref); 
-    
-    if(fspace_mark_object((Fspace*)space, p_obj)) 
-      scan_object(collector, p_obj);
-    
-    return;
-  }
-    
-  /* following is the logic for forwarding */  
-  Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
-  
-  /* if p_target_obj is NULL, it is forwarded by other thread. 
-      Note: a race condition here, it might be forwarded by other, but not set the 
-      forwarding pointer yet. We need spin here to get the forwarding pointer. 
-      We can implement the collector_forward_object() so that the forwarding pointer 
-      is set in the atomic instruction, which requires to roll back the mos_alloced
-      space. That is easy for thread local block allocation cancellation. */
-  if( p_target_obj == NULL ){
-    *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
-    return;
-  }  
-  /* otherwise, we successfully forwarded */
-  *p_ref = p_target_obj;
-
-  /* we forwarded it, we need remember it for verification. */
-  if(verify_live_heap) {
-    event_collector_move_obj(p_obj, p_target_obj, collector);
-  }
-
-  scan_object(collector, p_target_obj); 
-  return;
-}
-
-static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
-{ 
-  forward_object(collector, p_ref);
-  
-  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
-  while( !vector_stack_is_empty(trace_stack)){
-    p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); 
-    forward_object(collector, p_ref);
-    trace_stack = (Vector_Block*)collector->trace_stack;
-  }
-    
-  return; 
-}
- 
-/* for tracing phase termination detection */
-static volatile unsigned int num_finished_collectors = 0;
-
-static void collector_trace_rootsets(Collector* collector)
-{
-  GC* gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
-  
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
-
-  Space* space = collector->collect_space;
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-
-  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
-  Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
-
-  /* first step: copy all root objects to trace tasks. */ 
-  while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
-    while(!vector_block_iterator_end(root_set,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(root_set,iter);
-      if(*p_ref == NULL) continue;  /* root ref cann't be NULL, but remset can be */
-      if(obj_belongs_to_space(*p_ref, space)){
-        collector_tracestack_push(collector, p_ref);
-      }
-    } 
-    vector_block_clear(root_set);
-    pool_put_entry(metadata->free_set_pool, root_set);
-    root_set = pool_get_entry(metadata->gc_rootset_pool);
-  }
-  /* put back the last trace_stack task */    
-  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
-  
-  /* second step: iterate over the trace tasks and forward objects */
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-
-retry:
-  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
-
-  while(trace_task){    
-    unsigned int* iter = vector_block_iterator_init(trace_task);
-    while(!vector_block_iterator_end(trace_task,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(trace_task,iter);
-      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
-      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
-         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
-   
-      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
-         degenerate my stack into root_set, and grab another stack */
-   
-      /* a task has to belong to collected space, it was checked before put into the stack */
-      trace_object(collector, p_ref);
-    }
-    vector_stack_clear(trace_task);
-    pool_put_entry(metadata->free_task_pool, trace_task);
-    trace_task = pool_get_entry(metadata->mark_task_pool);
-  }
-  
-  atomic_inc32(&num_finished_collectors);
-  while(num_finished_collectors != num_active_collectors){
-    if( pool_is_empty(metadata->mark_task_pool)) continue;
-    /* we can't grab the task here, because of a race condition. If we grab the task, 
-       and the pool is empty, other threads may fall to this barrier and then pass. */
-    atomic_dec32(&num_finished_collectors);
-    goto retry;      
-  }
-
-  /* now we are done, but each collector has a private stack that is empty */  
-  trace_task = (Vector_Block*)collector->trace_stack;
-  vector_stack_clear(trace_task);
-  pool_put_entry(metadata->free_task_pool, trace_task);   
-  collector->trace_stack = NULL;
-  
-  return;
-}
-
-void trace_forward_fspace(Collector* collector) 
-{  
-  GC* gc = collector->gc;
-  Fspace* space = (Fspace*)collector->collect_space;
- 
-  collector_trace_rootsets(collector);
-  
-  /* the rest work is not enough for parallelization, so let only one thread go */
-  if( collector->thread_handle != 0 ) return;
-
-  collector_process_finalizer_weakref(collector);
-  
-  gc_update_repointed_refs(collector);
-  
-  gc_post_process_finalizer_weakref(gc);
-  
-  reset_fspace_for_allocation(space);  
-
-  return;
-  
-}
-
-Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  Space *space = collector->collect_space;
-  Boolean belong_to_nos = obj_belongs_to_space(p_obj, space);
-  
-  if(!belong_to_nos)
-    return FALSE;
-  
-  Boolean space_to_be_forwarded = fspace_object_to_be_forwarded(p_obj, (Fspace*)space);
-  Boolean forwarded = obj_is_forwarded_in_vt(p_obj);
-  Boolean marked = obj_is_marked_in_vt(p_obj);
-  
-  return (space_to_be_forwarded && !forwarded) || (!space_to_be_forwarded && !marked);
-}
-
-void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref)
-{
-  GC *gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
-  
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-  collector_tracestack_push(collector, p_ref);
-  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
-  
-//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */
-  collector->trace_stack = pool_get_entry(metadata->free_task_pool);
-  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
-  while(trace_task){    
-    unsigned int* iter = vector_block_iterator_init(trace_task);
-    while(!vector_block_iterator_end(trace_task,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(trace_task,iter);
-      assert(*p_ref);
-      trace_object(collector, p_ref);
-    }
-    vector_stack_clear(trace_task);
-    pool_put_entry(metadata->free_task_pool, trace_task);
-    trace_task = pool_get_entry(metadata->mark_task_pool);
-  }
-  
-  trace_task = (Vector_Block*)collector->trace_stack;
-  vector_stack_clear(trace_task);
-  pool_put_entry(metadata->free_task_pool, trace_task);   
-  collector->trace_stack = NULL;
-}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,313 @@
+
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "fspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
+{
+  assert(obj_belongs_to_nos(p_obj));  
+  return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+}
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
+{
+  Partial_Reveal_Object *p_obj = *p_ref;
+  if (p_obj == NULL) return;  
+    
+  /* the slot can be in tspace or fspace, we don't care.
+     we care only if the reference in the slot is pointing to fspace */
+  if (obj_belongs_to_nos(p_obj))
+    collector_tracestack_push(collector, p_ref); 
+
+  return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+{
+  if (!object_has_ref_field(p_obj)) return;
+    
+  void *slot;
+
+  /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Object* array = p_obj;
+    assert(!obj_is_primitive_array(array));
+
+    int32 array_length = vector_get_length((Vector_Handle) array);        
+    for (int i = 0; i < array_length; i++) {
+      slot = vector_get_element_address_ref((Vector_Handle) array, i);
+      scan_slot(collector, (Partial_Reveal_Object **)slot);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    slot = offset_get_ref(offset_scanner, p_obj);
+    if (slot == NULL) break;
+  
+    scan_slot(collector, (Partial_Reveal_Object **)slot);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  
+  return;
+}
+
+/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, 
+   since only slot which points to object in fspace could be added into TraceStack.
+   The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
+   We will simply return for that case. It might be forwarded due to:
+    1. two difference slots containing same reference; 
+    2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
+   The same object can be traced by the thread itself, or by other thread.
+*/
+
+#include "../verify/verify_live_heap.h"
+
+static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
+{
+  Space* space = collector->collect_space; 
+  GC* gc = collector->gc;
+  Partial_Reveal_Object *p_obj = *p_ref;
+
+  if(!obj_belongs_to_nos(p_obj)) return; 
+
+  /* Fastpath: object has already been forwarded, update the ref slot */
+  if(obj_is_fw_in_oi(p_obj)) {
+    *p_ref = obj_get_fw_in_oi(p_obj);
+    return;
+  }
+
+  /* only mark the objects that will remain in fspace */
+  if(NOS_PARTIAL_FORWARD && !fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
+    assert(!obj_is_fw_in_oi(p_obj));
+    /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root. 
+       we don't need remember root ref. Actually it's wrong to rem root ref since they change in next GC */
+    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
+      collector_remset_add_entry(collector, p_ref); 
+    
+    if(obj_mark_in_oi(p_obj)) 
+      scan_object(collector, p_obj);
+    
+    return;
+  }
+    
+  /* following is the logic for forwarding */  
+  Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
+  
+  /* if p_target_obj is NULL, it is forwarded by other thread. 
+      Note: a race condition here, it might be forwarded by other, but not set the 
+      forwarding pointer yet. We need spin here to get the forwarding pointer. 
+      We can implement the collector_forward_object() so that the forwarding pointer 
+      is set in the atomic instruction, which requires to roll back the mos_alloced
+      space. That is easy for thread local block allocation cancellation. */
+  if( p_target_obj == NULL ){
+    if(collector->result == FALSE ){
+      /* failed to forward, let's get back to controller. */
+      vector_stack_clear(collector->trace_stack);
+      return;
+    }
+
+    Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_new_obj);
+    *p_ref = p_new_obj;
+    return;
+  }  
+  /* otherwise, we successfully forwarded */
+  *p_ref = p_target_obj;
+
+  /* we forwarded it, we need remember it for verification. */
+  if(verify_live_heap) {
+    event_collector_move_obj(p_obj, p_target_obj, collector);
+  }
+
+  scan_object(collector, p_target_obj); 
+  return;
+}
+
+static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{ 
+  forward_object(collector, p_ref);
+  
+  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); 
+    forward_object(collector, p_ref);
+    trace_stack = (Vector_Block*)collector->trace_stack;
+  }
+    
+  return; 
+}
+ 
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+  Space* space = collector->collect_space;
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to trace tasks. */ 
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      if(*p_ref == NULL) continue;  /* root ref cann't be NULL, but remset can be */
+      if(obj_belongs_to_nos(*p_ref)){
+        collector_tracestack_push(collector, p_ref);
+      }
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the trace tasks and forward objects */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+
+  while(trace_task){    
+    unsigned int* iter = vector_block_iterator_init(trace_task);
+    while(!vector_block_iterator_end(trace_task,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(trace_task,iter);
+      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
+      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
+         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
+   
+      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
+         degenerate my stack into root_set, and grab another stack */
+   
+      /* a task has to belong to collected space, it was checked before put into the stack */
+      trace_object(collector, p_ref);
+      if(collector->result == FALSE)  break; /* force return */
+    }
+    vector_stack_clear(trace_task);
+    pool_put_entry(metadata->free_task_pool, trace_task);
+    if(collector->result == FALSE){
+      gc_task_pool_clear(metadata->mark_task_pool);
+      break; /* force return */
+    }
+
+    trace_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( pool_is_empty(metadata->mark_task_pool)) continue;
+    /* we can't grab the task here, because of a race condition. If we grab the task, 
+       and the pool is empty, other threads may fall to this barrier and then pass. */
+    atomic_dec32(&num_finished_collectors);
+    goto retry;      
+  }
+
+  /* now we are done, but each collector has a private stack that is empty */  
+  trace_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(trace_task);
+  pool_put_entry(metadata->free_task_pool, trace_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void gen_forward_pool(Collector* collector) 
+{  
+  GC* gc = collector->gc;
+  Fspace* space = (Fspace*)collector->collect_space;
+ 
+  collector_trace_rootsets(collector);
+  
+  /* the rest work is not enough for parallelization, so let only one thread go */
+  if( collector->thread_handle != 0 ) return;
+
+  gc->collect_result = gc_collection_result(gc);
+  if(!gc->collect_result) return;
+
+  if(!IGNORE_FINREF )
+    collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+  else {
+      gc_set_weakref_sets(gc);
+      update_ref_ignore_finref(collector);
+    }
+#endif
+  
+  gc_fix_rootset(collector);
+  
+  if(!IGNORE_FINREF )
+    gc_put_finref_to_vm(gc);
+  
+  fspace_reset_for_allocation(space);  
+
+  return;
+  
+}
+
+void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref)
+{
+  GC *gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  collector_tracestack_push(collector, p_ref);
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+  while(trace_task){    
+    unsigned int* iter = vector_block_iterator_init(trace_task);
+    while(!vector_block_iterator_end(trace_task,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(trace_task,iter);
+      assert(*p_ref);
+      trace_object(collector, p_ref);
+    }
+    vector_stack_clear(trace_task);
+    pool_put_entry(metadata->free_task_pool, trace_task);
+    trace_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  trace_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(trace_task);
+  pool_put_entry(metadata->free_task_pool, trace_task);   
+  collector->trace_stack = NULL;
+}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,255 @@
+
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+
+#include "fspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+#ifdef MARK_BIT_FLIPPING
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
+{
+  Partial_Reveal_Object *p_obj = *p_ref;
+  if(p_obj == NULL) return;  
+    
+  collector_tracestack_push(collector, p_ref); 
+  return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+{
+  if (!object_has_ref_field_before_scan(p_obj)) return;
+    
+  Partial_Reveal_Object **p_ref;
+
+  if (object_is_array(p_obj)) {   /* scan array object */
+  
+    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len; 
+    p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
+
+    for (unsigned int i = 0; i < array_length; i++) {
+      scan_slot(collector, p_ref+i);
+    }   
+
+  }else{ /* scan non-array object */
+    
+    unsigned int num_refs = object_ref_field_num(p_obj);
+    int* ref_iterator = object_ref_iterator_init(p_obj);
+ 
+    for(unsigned int i=0; i<num_refs; i++){  
+      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
+      scan_slot(collector, p_ref);
+    }    
+
+#ifndef BUILD_IN_REFERENT
+    scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  
+  }
+
+  return;
+}
+
+/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, 
+   since only slot which points to object in fspace could be added into TraceStack.
+   The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
+   We will simply return for that case. It might be forwarded due to:
+    1. two difference slots containing same reference; 
+    2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
+   The same object can be traced by the thread itself, or by other thread.
+*/
+
+#include "../verify/verify_live_heap.h"
+static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
+{
+  GC* gc = collector->gc;
+  Partial_Reveal_Object *p_obj = *p_ref;
+
+  if(!obj_belongs_to_nos(p_obj)){
+    if(obj_mark_in_oi(p_obj))
+      scan_object(collector, p_obj);
+    return;
+  }
+
+  /* following is the logic for forwarding */  
+  Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
+  
+  /* if p_target_obj is NULL, it is forwarded by other thread. 
+      Note: a race condition here, it might be forwarded by other, but not set the 
+      forwarding pointer yet. We need spin here to get the forwarding pointer. 
+      We can implement the collector_forward_object() so that the forwarding pointer 
+      is set in the atomic instruction, which requires to roll back the mos_alloced
+      space. That is easy for thread local block allocation cancellation. */
+  if( p_target_obj == NULL ){
+    if(collector->result == FALSE ){
+      /* failed to forward, let's get back to controller. */
+      vector_stack_clear(collector->trace_stack);
+      return;
+    }
+
+    Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_new_obj);
+    *p_ref = p_new_obj;
+    return;
+  }
+  /* otherwise, we successfully forwarded */
+  *p_ref = p_target_obj;
+
+  /* we forwarded it, we need remember it for verification. */
+  if(verify_live_heap) {
+    event_collector_move_obj(p_obj, p_target_obj, collector);
+  }
+  
+  scan_object(collector, p_target_obj); 
+  return;
+}
+
+static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{ 
+  forward_object(collector, p_ref);
+
+  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); 
+    forward_object(collector, p_ref);
+    trace_stack = (Vector_Block*)collector->trace_stack;
+  }
+  return; 
+}
+ 
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+  Space* space = collector->collect_space;
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to trace tasks. */ 
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      assert(p_obj != NULL);  /* root ref cann't be NULL, but remset can be */
+
+      collector_tracestack_push(collector, p_ref);
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the trace tasks and forward objects */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+
+  while(trace_task){    
+    unsigned int* iter = vector_block_iterator_init(trace_task);
+    while(!vector_block_iterator_end(trace_task,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(trace_task,iter);
+      trace_object(collector, p_ref);
+      
+      if(collector->result == FALSE)  break; /* force return */
+ 
+    }
+    vector_stack_clear(trace_task);
+    pool_put_entry(metadata->free_task_pool, trace_task);
+
+    if(collector->result == FALSE){
+      gc_task_pool_clear(metadata->mark_task_pool);
+      break; /* force return */
+    }
+    
+    trace_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
+     all the tasks are finished.*/
+     
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( pool_is_empty(metadata->mark_task_pool)) continue;
+    /* we can't grab the task here, because of a race condition. If we grab the task, 
+       and the pool is empty, other threads may fall to this barrier and then pass. */
+    atomic_dec32(&num_finished_collectors);
+    goto retry; 
+  }
+
+  /* now we are done, but each collector has a private stack that is empty */  
+  trace_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(trace_task);
+  pool_put_entry(metadata->free_task_pool, trace_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void nongen_forward_pool(Collector* collector) 
+{  
+  GC* gc = collector->gc;
+  Fspace* space = (Fspace*)collector->collect_space;
+  
+  collector_trace_rootsets(collector);  
+  /* the rest work is not enough for parallelization, so let only one thread go */
+  if( collector->thread_handle != 0 ) return;
+
+  gc->collect_result = gc_collection_result(gc);
+  if(!gc->collect_result) return;
+
+  if(!IGNORE_FINREF )
+    collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+  else {
+      gc_set_weakref_sets(gc);
+      update_ref_ignore_finref(collector);
+    }
+#endif
+  
+  gc_fix_rootset(collector);
+  
+  if(!IGNORE_FINREF )
+    gc_put_finref_to_vm(gc);
+  
+  fspace_reset_for_allocation(space);  
+
+  return;
+  
+}
+
+#endif /* MARK_BIT_FLIPPING */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bidir_list.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bidir_list.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bidir_list.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bidir_list.h Thu Jan 11 05:57:16 2007
@@ -22,16 +22,17 @@
 #define _BIDIR_LIST_H_
 
 typedef struct Bidir_List{
+  unsigned int zero;
   Bidir_List* next;
   Bidir_List* prev;
 }Bidir_List;
 
 inline Bidir_List* bidir_list_add_item(Bidir_List* head, Bidir_List* item)
 {
-  item->next = head;
-  item->prev = head->prev;
-  head->prev->next = item;
-  head->prev = item;
+  item->next = head->next;
+  item->prev = head;
+  head->next->prev = item;
+  head->next = item;
   return head;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h Thu Jan 11 05:57:16 2007
@@ -52,7 +52,7 @@
 {
   unsigned int bit_offset;
   
-  assert(start_idx < 128);
+  assert((start_idx >= 0) && (start_idx < 128));
   
   unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;
   unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;
@@ -79,7 +79,7 @@
 
 inline void words_set_bit(unsigned int* words, unsigned int count, unsigned int start_idx)
 {
-  assert(start_idx < 128);
+  assert((start_idx >= 0) && (start_idx < 128));
   
   unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;	
   unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;
@@ -98,7 +98,7 @@
 
 inline void words_clear_bit(unsigned int* words, unsigned int count, unsigned int start_idx)
 {
-  assert(start_idx < 128);
+  assert((start_idx >= 0) && (start_idx < 128));
   
   unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;
   unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h Thu Jan 11 05:57:16 2007
@@ -28,7 +28,10 @@
 inline Pool* sync_pool_create(){ return sync_stack_init(); }
 inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
 
-inline Boolean pool_is_empty(Pool* pool){ return stack_is_empty(pool);}
+inline Boolean pool_is_empty(Pool* pool){ return sync_stack_is_empty(pool);}
+
+inline unsigned int pool_size(Pool* pool){ return sync_stack_size(pool); }
+
 inline Vector_Block* pool_get_entry(Pool* pool)
 { 
   Vector_Block* block = (Vector_Block*)sync_stack_pop(pool); 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h Thu Jan 11 05:57:16 2007
@@ -21,22 +21,44 @@
 #ifndef _SYNC_STACK_H_
 #define _SYNC_STACK_H_
 
+#include "vector_block.h"
+
+#define SYNC_STACK_VERSION_MASK_SHIFT 10
+#define SYNC_STACK_VERSION_MASK ((1 << SYNC_STACK_VERSION_MASK_SHIFT) - 1)
+
 typedef struct Node{
   Node* next;  
 }Node;
 
+/*
+ * ATTENTION: only for reference
+ * Perhaps in some platforms compilers compile this struct in a way different from what we expect
+ */
+typedef struct Stack_Top{
+  unsigned int version: SYNC_STACK_VERSION_MASK_SHIFT;
+  unsigned int entry: (32-SYNC_STACK_VERSION_MASK_SHIFT);
+}Stack_Top;
+
 typedef struct Sync_Stack{
-  Node* top; /* pointing to the first filled entry */
+  Stack_Top top; /* pointing to the first filled entry */
   Node* cur; /* pointing to the current accessed entry, only for iterator */
 }Sync_Stack;
 
+#define stack_top_get_entry(top) ((Node*)((*(unsigned int*)&(top)) & ~SYNC_STACK_VERSION_MASK))
+/* The alternative way: (Node*)(top.entry<<SYNC_STACK_VERSION_MASK_SHIFT) */
+#define stack_top_get_version(top) ((*(unsigned int*)&(top)) & SYNC_STACK_VERSION_MASK)
+/* The alternative way: (top.version) */
+#define stack_top_contruct(entry, version) ((unsigned int)(entry) | (version))
+#define stack_top_get_next_version(top) ((stack_top_get_version(top) + 1) & SYNC_STACK_VERSION_MASK)
+
 inline Sync_Stack* sync_stack_init()
 {
   unsigned int size = sizeof(Sync_Stack);
   Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
   memset(stack, 0, size);
   stack->cur = NULL;
-  stack->top = NULL; 
+  unsigned int temp_top = 0;
+  stack->top = *(Stack_Top*)&temp_top;
   return stack;
 }
 
@@ -48,7 +70,7 @@
 
 inline void sync_stack_iterate_init(Sync_Stack* stack)
 {
-  stack->cur = stack->top;
+  stack->cur = stack_top_get_entry(stack->top);
   return;
 }
 
@@ -62,37 +84,49 @@
       return entry;
     }
     entry = stack->cur;
-  }  
+  }
   return NULL;
 }
 
 inline Node* sync_stack_pop(Sync_Stack* stack)
 {
-  Node* entry = stack->top;
-  while( entry != NULL ){
-    Node* new_entry = entry->next;
-    Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, new_entry, entry);
-    if(temp == entry){ /* got it */ 
-      entry->next = NULL;
-      return entry;
+  Stack_Top cur_top = stack->top;
+  Node* top_entry = stack_top_get_entry(cur_top);
+  unsigned int version = stack_top_get_version(cur_top);
+  
+  while( top_entry != NULL ){
+    unsigned int temp = stack_top_contruct(top_entry->next, version);
+    Stack_Top new_top = *(Stack_Top*)&temp;
+    temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
+    if(temp == *(unsigned int*)&cur_top){ /* got it */ 
+      top_entry->next = NULL;
+      return top_entry;
     }
-    entry = stack->top;
+    cur_top = stack->top;
+    top_entry = stack_top_get_entry(cur_top);
+    version = stack_top_get_version(cur_top);
   }  
   return 0;
 }
 
 inline Boolean sync_stack_push(Sync_Stack* stack, Node* node)
 {
-  Node* entry = stack->top;
-  node->next = entry;
+  Stack_Top cur_top = stack->top;
+  node->next = stack_top_get_entry(cur_top);
+  unsigned int new_version = stack_top_get_next_version(cur_top);
+  unsigned int temp = stack_top_contruct(node, new_version);
+  Stack_Top new_top = *(Stack_Top*)&temp;
   
   while( TRUE ){
-    Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, node, entry);
-    if(temp == entry){ /* got it */  
+    temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
+    if(temp == *(unsigned int*)&cur_top){ /* got it */  
       return TRUE;
     }
-    entry = stack->top;
-    node->next = entry;
+    cur_top = stack->top;
+    node->next = stack_top_get_entry(cur_top);
+    new_version = stack_top_get_next_version(cur_top);
+    temp = stack_top_contruct(node, new_version);
+    new_top = *(Stack_Top*)&temp;
   }
   /* never comes here */
   return FALSE;
@@ -100,9 +134,21 @@
 
 /* it does not matter whether this is atomic or not, because
    it is only invoked when there is no contention or only for rough idea */
-inline Boolean stack_is_empty(Sync_Stack* stack)
+inline Boolean sync_stack_is_empty(Sync_Stack* stack)
+{
+  return (stack_top_get_entry(stack->top) == NULL);
+}
+
+inline unsigned int sync_stack_size(Sync_Stack* stack)
 {
-  return (stack->top == NULL);
+  unsigned int entry_count = 0;
+  
+  sync_stack_iterate_init(stack);
+  while(sync_stack_iterate_next(stack)){
+    ++entry_count;
+  }
+
+  return entry_count;
 }
 
 #endif /* _SYNC_STACK_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h Thu Jan 11 05:57:16 2007
@@ -26,9 +26,16 @@
   unsigned int* head;  /* point to the first filled entry */
   unsigned int* tail;  /* point to the entry after the last filled one */
   unsigned int* heap_end;   /* point to heap_end of the block (right after the last entry) */
-  unsigned int* entries[1];
+  unsigned int entries[1];
 }Vector_Block;
 
+
+/* this size better be 2's power */
+#define VECTOR_BLOCK_DATA_SIZE_BYTES (2*KB)
+
+#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((unsigned int)((Vector_Block*)0)->entries)
+#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_PER_WORD)
+
 inline void vector_block_init(Vector_Block* block, unsigned int size)
 {
   block->heap_end = (unsigned int*)((unsigned int)block + size);
@@ -41,15 +48,26 @@
 inline unsigned int vector_block_entry_count(Vector_Block* block)
 { return (unsigned int)(block->tail - block->head); }
 
+/*
 inline Boolean vector_block_is_full(Vector_Block* block)
 { return block->tail == block->heap_end; }
 
 inline Boolean vector_block_is_empty(Vector_Block* block)
 { return block->tail == block->head; }
+*/
+
+inline Boolean vector_block_is_full(Vector_Block* block)
+{ return (block->tail - block->entries) == VECTOR_BLOCK_ENTRY_NUM; }
+
+inline Boolean vector_block_is_empty(Vector_Block* block)
+{ return block->tail == block->entries; }
 
 inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
-{ 
+{
+#ifdef _DEBUG 
   assert(value && !*(block->tail));
+#endif
+
   *(block->tail++) = value; 
 }
 
@@ -88,16 +106,23 @@
 #endif
 }
 
+/*
 inline Boolean vector_stack_is_empty(Vector_Block* block)
 {  return (block->head == block->tail); }
+*/
+
+inline Boolean vector_stack_is_empty(Vector_Block* block)
+{ return (block->head - block->entries) == VECTOR_BLOCK_ENTRY_NUM; }
 
 inline Boolean vector_stack_is_full(Vector_Block* block)
-{  return (block->head == (unsigned int*)block->entries); }
+{  return (block->head == block->entries); }
 
 inline void vector_stack_push(Vector_Block* block, unsigned int value)
 { 
   block->head--;
+#ifdef _DEBUG
   assert(value && !*(block->head));
+#endif
   *(block->head) = value;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp Thu Jan 11 05:57:16 2007
@@ -35,3 +35,6 @@
 void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector)
 { return; }
 
+void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector)
+{ return; }
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.h Thu Jan 11 05:57:16 2007
@@ -29,4 +29,7 @@
 void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector);
 void gc_verify_heap(GC* gc, Boolean is_before_gc);
 
+/* functions used in fall back compaction and the out-of-space cases*/
+void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector);
+
 #endif



Mime
View raw message