harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r495225 [4/5] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen: javasrc/org/apache/harmony/drlvm/gc_gen/ src/common/ src/finalizer_weakref/ src/gen/ src/jni/ src/mark_compact/ src/mark_sweep/ src/thread/ src/trace_forward/ src/utils/ src/verify/
Date Thu, 11 Jan 2007 13:57:19 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,233 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Chunrong Lai, 2006/12/01
+ */
+
+#include "mspace_collect_compact.h"
+#include "../trace_forward/fspace.h"
+#include "../mark_sweep/lspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+struct GC_Gen;
+Space* gc_get_nos(GC_Gen* gc);
+Space* gc_get_mos(GC_Gen* gc);
+Space* gc_get_los(GC_Gen* gc);
+
+#include "../verify/verify_live_heap.h"
+
+static void mspace_move_objects(Collector* collector, Mspace* mspace) 
+{
+  Block_Header* curr_block = collector->cur_compact_block;
+  Block_Header* dest_block = collector->cur_target_block;
+  
+  void* dest_sector_addr = dest_block->base;
+  Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION);
+  
+ 
+  while( curr_block ){
+    void* start_pos;
+    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);
+
+    if( !p_obj ){
+      curr_block = mspace_get_next_compact_block(collector, mspace);
+      continue;    
+    }
+    
+    int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
+    void* src_sector_addr = p_obj;
+          
+    while( p_obj ){
+      assert( obj_is_marked_in_vt(p_obj));
+      /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
+      obj_unmark_in_oi(p_obj); 
+      
+      unsigned int curr_sector_size = (unsigned int)start_pos - (unsigned int)src_sector_addr;
+
+      /* check if dest block is not enough to hold this sector. If yes, grab next one */      
+      unsigned int block_end = (unsigned int)GC_BLOCK_END(dest_block);
+      if( ((unsigned int)dest_sector_addr + curr_sector_size) > block_end ){
+        dest_block->new_free = dest_sector_addr; 
+        dest_block = mspace_get_next_target_block(collector, mspace);
+        if(dest_block == NULL){ 
+          collector->result = FALSE; 
+          return; 
+        }
+        block_end = (unsigned int)GC_BLOCK_END(dest_block);
+        dest_sector_addr = dest_block->base;
+      }
+        
+      assert(((unsigned int)dest_sector_addr + curr_sector_size) <= block_end );
+
+      /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */      
+      p_obj =  block_get_next_marked_object(curr_block, &start_pos);
+      if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector))
+        continue;
+
+      /* current sector is done, let's move it. */
+      unsigned int sector_distance = (unsigned int)src_sector_addr - (unsigned int)dest_sector_addr;
+      curr_block->table[curr_sector] = sector_distance;
+
+      if (verify_live_heap) {
+      	   Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr;
+      	   void *rescan_pos = (Partial_Reveal_Object *)((unsigned int)rescan_obj + vm_object_size(rescan_obj));
+      	   while ((unsigned int)rescan_obj < (unsigned int)src_sector_addr + curr_sector_size) {
+    	      Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((unsigned int)rescan_obj- sector_distance);
+             if(is_fallback)
+               event_collector_doublemove_obj(rescan_obj, targ_obj, collector);
+             else
+               event_collector_move_obj(rescan_obj, targ_obj, collector);
+      	      rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos);	
+      	      if(rescan_obj == NULL) break;
+      	   }
+      }
+         
+      memmove(dest_sector_addr, src_sector_addr, curr_sector_size);
+
+      dest_sector_addr = (void*)((unsigned int) dest_sector_addr + curr_sector_size);
+      src_sector_addr = p_obj;
+      curr_sector  = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
+    }
+    curr_block = mspace_get_next_compact_block(collector, mspace);
+  }
+  dest_block->new_free = dest_sector_addr;
+ 
+  return;
+}
+
+#include "../common/fix_repointed_refs.h"
+
+static void mspace_fix_repointed_refs(Collector *collector, Mspace *mspace)
+{
+  Block_Header* curr_block = mspace_block_iterator_next(mspace);
+  
+  while( curr_block){
+    if(curr_block->block_idx >= mspace->free_block_idx) break;    
+    curr_block->free = curr_block->new_free; //
+    block_fix_ref_after_marking(curr_block);
+    curr_block = mspace_block_iterator_next(mspace);
+  }
+  
+  return;
+}
+      
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_fixing_collectors = 0;
+static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_extending_collectors = 0;
+
+void move_compact_mspace(Collector* collector) 
+{
+  GC* gc = collector->gc;
+  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
+  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
+  Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: **************************************************
+     mark all live objects in heap, and save all the slots that 
+            have references  that are going to be repointed */
+  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+  if(gc->collect_kind != FALLBACK_COLLECTION)    
+       mark_scan_heap(collector);  
+  else
+       fallback_mark_scan_heap(collector);  	
+
+  old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+    /* prepare for next phase */
+    gc_init_block_for_collectors(gc, mspace); 
+    
+    if(!IGNORE_FINREF )
+      collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+    else {
+      gc_set_weakref_sets(gc);
+      update_ref_ignore_finref(collector);
+    }
+#endif
+
+    
+    /* let other collectors go */
+    num_marking_collectors++; 
+  }
+  while(num_marking_collectors != num_active_collectors + 1);
+  
+  /* Pass 2: **************************************************
+     move object and set the forwarding offset table */
+  atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
+
+  mspace_move_objects(collector, mspace);   
+  
+  old_num = atomic_inc32(&num_moving_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* single thread world */
+    gc->collect_result = gc_collection_result(gc);
+    if(!gc->collect_result){
+      num_moving_collectors++; 
+      return;
+    }
+ 
+    gc_reset_block_for_collectors(gc, mspace);
+    mspace_block_iterator_init(mspace);
+    num_moving_collectors++; 
+  }
+  while(num_moving_collectors != num_active_collectors + 1);
+  if(!gc->collect_result) return;
+    
+  /* Pass 3: **************************************************
+     update all references whose pointed objects were moved */  
+  old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
+
+  mspace_fix_repointed_refs(collector, mspace);
+
+  old_num = atomic_inc32(&num_fixing_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+    lspace_fix_repointed_refs(collector, lspace);   
+    gc_fix_rootset(collector);
+    update_mspace_info_for_los_extension(mspace);
+    num_fixing_collectors++; 
+  }
+  while(num_fixing_collectors != num_active_collectors + 1);
+
+   /* Dealing with out of space in mspace */  
+  if(mspace->free_block_idx > fspace->first_block_idx){    
+     atomic_cas32( &num_extending_collectors, 0, num_active_collectors);        
+     mspace_extend_compact(collector);        
+     atomic_inc32(&num_extending_collectors);    
+     while(num_extending_collectors != num_active_collectors);  
+  }
+  
+  /* Leftover: **************************************************
+   */
+  if( collector->thread_handle != 0 ) return;
+
+
+  if(!IGNORE_FINREF )
+    gc_put_finref_to_vm(gc);
+
+  mspace_reset_after_compaction(mspace);
+  fspace_reset_for_allocation(fspace);
+
+  gc_set_pool_clear(gc->metadata->gc_rootset_pool);  
+  
+  return;
+}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,581 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "mspace_collect_compact.h"
+#include "../trace_forward/fspace.h"
+#include "../mark_sweep/lspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+//#define VERIFY_SLIDING_COMPACT
+
+struct GC_Gen;
+Space* gc_get_nos(GC_Gen* gc);
+Space* gc_get_mos(GC_Gen* gc);
+Space* gc_get_los(GC_Gen* gc);
+
+#ifdef VERIFY_SLIDING_COMPACT
+typedef struct {
+  unsigned int addr;
+  unsigned int dest_counter;
+  unsigned int collector;
+  Block_Header *src_list[1021];
+} Block_Verify_Info;
+static Block_Verify_Info block_info[32*1024][2];
+#endif
+
+static volatile Block_Header *last_block_for_dest;
+
+static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
+{  
+  Block_Header *curr_block = collector->cur_compact_block;
+  Block_Header *dest_block = collector->cur_target_block;
+  void *dest_addr = dest_block->base;
+  Block_Header *last_src;
+  
+#ifdef VERIFY_SLIDING_COMPACT
+  block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1;
+#endif
+  
+  assert(!collector->rem_set);
+  collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
+  
+  while( curr_block ){
+    void* start_pos;
+    Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
+    if(first_obj){
+      ++curr_block->dest_counter;
+      if(!dest_block->src)
+        dest_block->src = first_obj;
+      else
+        last_src->next_src = first_obj;
+      last_src = curr_block;
+    }
+    Partial_Reveal_Object* p_obj = first_obj;
+ 
+    while( p_obj ){
+      assert( obj_is_marked_in_vt(p_obj));
+
+      unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj;
+      
+      if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
+        dest_block->new_free = dest_addr;
+        dest_block = mspace_get_next_target_block(collector, mspace);
+        if(dest_block == NULL){ 
+          collector->result = FALSE; 
+          return; 
+        }
+        dest_addr = dest_block->base;
+        dest_block->src = p_obj;
+        last_src = curr_block;
+        if(p_obj != first_obj)
+          ++curr_block->dest_counter;
+
+#ifdef VERIFY_SLIDING_COMPACT
+        block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1;
+#endif
+      }
+      assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
+      
+      Obj_Info_Type obj_info = get_obj_info(p_obj);
+
+      if( obj_info != 0 ) {
+        collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
+        collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info);
+      }
+      
+      obj_set_fw_in_oi(p_obj, dest_addr);
+      
+      /* FIXME: should use alloc to handle alignment requirement */
+      dest_addr = (void *)((unsigned int) dest_addr + obj_size);
+      p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
+    }
+    
+    curr_block = mspace_get_next_compact_block(collector, mspace);
+  
+  }
+  
+  pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
+  collector->rem_set = NULL;
+  dest_block->new_free = dest_addr;
+  
+  Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest;
+  while(dest_block > last_block_for_dest){
+    atomic_casptr((volatile void **)&last_block_for_dest, dest_block, cur_last_dest);
+    cur_last_dest = (Block_Header *)last_block_for_dest;
+  }
+  
+  return;
+}   
+
+#include "../common/fix_repointed_refs.h"
+
+static void mspace_fix_repointed_refs(Collector* collector, Mspace* mspace)
+{
+  Block_Header* curr_block = mspace_block_iterator_next(mspace);
+
+  /* for MAJOR_COLLECTION, we must iterate over all compact blocks */
+  while( curr_block){
+    block_fix_ref_after_repointing(curr_block); 
+    curr_block = mspace_block_iterator_next(mspace);
+  }
+
+  return;
+}
+
+typedef struct{
+  volatile Block_Header *block;
+  SpinLock lock;
+} Cur_Dest_Block;
+
+static Cur_Dest_Block current_dest_block;
+static volatile Block_Header *next_block_for_dest;
+
+static inline Block_Header *set_next_block_for_dest(Mspace *mspace)
+{
+  assert(!next_block_for_dest);
+  
+  Block_Header *block = mspace_block_iterator_get(mspace);
+  
+  if(block->status != BLOCK_DEST)
+    return block;
+  
+  while(block->status == BLOCK_DEST)
+    block = block->next;
+  next_block_for_dest = block;
+  return block;
+}
+
+#define DEST_NOT_EMPTY ((Block_Header *)0xFF)
+
+static Block_Header *get_next_dest_block(Mspace *mspace)
+{
+  Block_Header *cur_dest_block;
+  
+  if(next_block_for_dest){
+    cur_dest_block = (Block_Header*)next_block_for_dest;
+    while(cur_dest_block->status == BLOCK_DEST){
+      cur_dest_block = cur_dest_block->next;
+    }
+    next_block_for_dest = cur_dest_block;
+  } else {
+    cur_dest_block = set_next_block_for_dest(mspace);
+  }
+
+//  printf("Getting next dest block:\n");
+//  printf("next_block_for_dest: %d\n\n", next_block_for_dest ? next_block_for_dest->block_idx : 0);
+  
+  unsigned int total_dest_counter = 0;
+  Block_Header *last_dest_block = (Block_Header *)last_block_for_dest;
+  for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){
+    if(cur_dest_block->status == BLOCK_DEST){
+//      printf("idx: %d  DEST  ", cur_dest_block->block_idx);
+      continue;
+    }
+    if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){
+//      printf("idx: %d  DEST  FOUND!\n\n", cur_dest_block->block_idx);
+      cur_dest_block->status = BLOCK_DEST;
+      return cur_dest_block;
+    } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){
+//      printf("idx: %d  NON_DEST  FOUND!\n\n", cur_dest_block->block_idx);
+      return cur_dest_block;
+    } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){
+//      printf("idx: %d  NO_SRC  ", cur_dest_block->block_idx);
+      cur_dest_block->status = BLOCK_DEST;
+    } else {
+//      printf("OTHER  ");
+      total_dest_counter += cur_dest_block->dest_counter;
+    }
+  }
+  
+  if(total_dest_counter){
+//    printf("\nNeed refind!\n\n");
+    return DEST_NOT_EMPTY;
+  }
+  return NULL;
+}
+
+static Block_Header *check_dest_block(Mspace *mspace)
+{
+  Block_Header *cur_dest_block;
+  
+  if(next_block_for_dest){
+    cur_dest_block = (Block_Header*)next_block_for_dest;
+    while(cur_dest_block->status == BLOCK_DEST){
+      cur_dest_block = cur_dest_block->next;
+    }
+  } else {
+    cur_dest_block = set_next_block_for_dest(mspace);
+  }
+
+  unsigned int total_dest_counter = 0;
+  Block_Header *last_dest_block = (Block_Header *)last_block_for_dest;
+  for(; cur_dest_block < last_dest_block; cur_dest_block = cur_dest_block->next){
+    if(cur_dest_block->status == BLOCK_DEST)
+      continue;
+    if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){
+      return cur_dest_block;
+    } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){
+      return cur_dest_block;
+    } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){
+      cur_dest_block->status = BLOCK_DEST;
+    } else {
+      total_dest_counter += cur_dest_block->dest_counter;
+    }
+  }
+  
+  if(total_dest_counter) return DEST_NOT_EMPTY;
+  return NULL;
+}
+
+static inline Partial_Reveal_Object *get_next_first_src_obj(Mspace *mspace)
+{
+  Partial_Reveal_Object *first_src_obj;
+  
+  while(TRUE){
+    lock(current_dest_block.lock);
+    Block_Header *next_dest_block = (Block_Header *)current_dest_block.block;
+    
+    if (!next_dest_block || !(first_src_obj = next_dest_block->src)){
+      next_dest_block = get_next_dest_block(mspace);
+      if(!next_dest_block){
+        unlock(current_dest_block.lock);
+        return NULL;
+      } else if(next_dest_block == DEST_NOT_EMPTY){
+        unlock(current_dest_block.lock);
+        while(check_dest_block(mspace)==DEST_NOT_EMPTY);
+        continue;
+      }
+      first_src_obj = next_dest_block->src;
+      if(next_dest_block->status == BLOCK_DEST){
+        assert(!next_dest_block->dest_counter);
+        current_dest_block.block = next_dest_block;
+      }
+    }
+    
+    Partial_Reveal_Object *next_src_obj = GC_BLOCK_HEADER(first_src_obj)->next_src;
+    if(next_src_obj && GC_BLOCK_HEADER(get_obj_info_raw(next_src_obj)) != next_dest_block){
+      next_src_obj = NULL;
+    }
+    next_dest_block->src = next_src_obj;
+    unlock(current_dest_block.lock);
+    return first_src_obj;
+  }
+}
+
+static inline void gc_init_block_for_sliding_compact(GC *gc, Mspace *mspace)
+{
+  /* initialize related static variables */
+  next_block_for_dest = NULL;
+  current_dest_block.block = NULL;
+  current_dest_block.lock = FREE_LOCK;
+  mspace_block_iterator_init(mspace);
+
+  return;
+}
+
+
+#include "../verify/verify_live_heap.h"
+extern unsigned int mspace_free_block_idx;
+
+static void mspace_sliding_compact(Collector* collector, Mspace* mspace)
+{
+  void *start_pos;
+  Block_Header *nos_fw_start_block = (Block_Header *)&mspace->blocks[mspace_free_block_idx - mspace->first_block_idx];
+  Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION);
+  
+  while(Partial_Reveal_Object *p_obj = get_next_first_src_obj(mspace)){
+    Block_Header *src_block = GC_BLOCK_HEADER(p_obj);
+    assert(src_block->dest_counter);
+    
+    Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
+    Block_Header *dest_block = GC_BLOCK_HEADER(p_target_obj);
+    
+    /* We don't set start_pos as p_obj in case that memmove of this obj may overlap itself.
+     * In that case we can't get the correct vt and obj_info.
+     */
+    start_pos = obj_end(p_obj);
+    
+    do {
+      assert(obj_is_marked_in_vt(p_obj));
+      obj_unmark_in_vt(p_obj);
+      
+      unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj;
+      if(p_obj != p_target_obj){
+        memmove(p_target_obj, p_obj, obj_size);
+
+        if(verify_live_heap){
+          /* we forwarded it, we need remember it for verification */
+          if(is_fallback)
+            event_collector_doublemove_obj(p_obj, p_target_obj, collector);
+          else
+            event_collector_move_obj(p_obj, p_target_obj, collector);
+        }
+      }
+      set_obj_info(p_target_obj, 0);
+      
+      p_obj = block_get_next_marked_obj_after_prefetch(src_block, &start_pos);
+      if(!p_obj)
+        break;
+      p_target_obj = obj_get_fw_in_oi(p_obj);
+    
+    } while(GC_BLOCK_HEADER(p_target_obj) == dest_block);
+
+#ifdef VERIFY_SLIDING_COMPACT
+    printf("dest_block: %x   src_block: %x   collector: %x\n", (unsigned int)dest_block, (unsigned int)src_block, (unsigned int)collector->thread_handle);
+#endif
+
+    atomic_dec32(&src_block->dest_counter);
+  }
+
+#ifdef VERIFY_SLIDING_COMPACT
+  static unsigned int fax = 0;
+  fax++;
+  printf("\n\n\nCollector %d   Sliding compact ends!   %d  \n\n\n", (unsigned int)collector->thread_handle, fax);
+#endif
+
+}
+
+#ifdef VERIFY_SLIDING_COMPACT
+
+static void verify_sliding_compact(Mspace *mspace, Boolean before)
+{
+  unsigned int i, j, k;
+  Block_Header *header;
+  
+  if(before)
+    j = 0;
+  else
+    j = 1;
+  
+  for(i = 0, header = (Block_Header *)mspace->blocks;
+      header;
+      header=header->next, ++i)
+  {
+    block_info[i][j].addr = (unsigned int)header;
+    block_info[i][j].dest_counter = header->dest_counter;
+    if(header->src){
+      Partial_Reveal_Object *src_obj = header->src;
+      k = 0;
+      printf("\nHeader: %x %x Collector: %x  ", (unsigned int)header, block_info[i][j].dest_counter, block_info[i][j].collector);
+      Block_Header *dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj));
+      while(dest_header == header){
+        block_info[i][j].src_list[k] = dest_header;
+        Block_Header *src_header = GC_BLOCK_HEADER(src_obj);
+        printf("%x %x ", (unsigned int)src_header, src_header->dest_counter);
+        src_obj = src_header->next_src;
+        if(!src_obj)
+          break;
+        dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj));
+        if(++k >= 1021)
+          assert(0);
+      }
+    }
+  }
+  
+  if(!before){
+    for(i = 0, header = (Block_Header *)mspace->blocks;
+        header;
+        header=header->next, ++i)
+    {
+      Boolean correct = TRUE;
+      if(block_info[i][0].addr != block_info[i][1].addr)
+        correct = FALSE;
+      if(block_info[i][0].dest_counter != block_info[i][1].dest_counter)
+        correct = FALSE;
+      for(k = 0; k < 1021; k++){
+        if(block_info[i][0].src_list[k] != block_info[i][1].src_list[k]){
+          correct = FALSE;
+          break;
+        }
+      }
+      if(!correct)
+        printf("header: %x %x   dest_counter: %x %x   src: %x %x",
+                block_info[i][0].addr, block_info[i][1].addr,
+                block_info[i][0].dest_counter, block_info[i][1].dest_counter,
+                block_info[i][0].src_list[k], block_info[i][1].src_list[k]);
+    }
+    
+    unsigned int *array = (unsigned int *)block_info;
+    memset(array, 0, 1024*32*1024*2);
+  }
+}
+#endif
+
+/*
+#define OI_RESTORING_THRESHOLD 8
+static volatile Boolean parallel_oi_restoring;
+unsigned int mspace_saved_obj_info_size(GC*gc){ return pool_size(gc->metadata->collector_remset_pool);} 
+*/
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_repointing_collectors = 0;
+static volatile unsigned int num_fixing_collectors = 0;
+static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_restoring_collectors = 0;
+static volatile unsigned int num_extending_collectors = 0;
+
+//For_LOS_extend
+void mspace_restore_block_chain(Mspace* mspace)
+{
+  GC* gc = mspace->gc;
+  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
+  if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) {
+      Block_Header* fspace_last_block = (Block_Header*)&fspace->blocks[fspace->num_managed_blocks - 1];
+      fspace_last_block->next = NULL;
+  }
+}
+
+void slide_compact_mspace(Collector* collector) 
+{
+  GC* gc = collector->gc;
+  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
+  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
+  Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: **************************************************
+     mark all live objects in heap, and save all the slots that 
+            have references  that are going to be repointed */
+  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+  if(gc->collect_kind != FALLBACK_COLLECTION)
+    mark_scan_heap(collector);
+  else
+    fallback_mark_scan_heap(collector);
+  
+  old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+    /* prepare for next phase */
+    gc_init_block_for_collectors(gc, mspace);
+    
+    if(!IGNORE_FINREF )
+      collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+    else {
+      gc_set_weakref_sets(gc);
+      update_ref_ignore_finref(collector);
+    }
+#endif
+    
+    last_block_for_dest = NULL;
+    
+    /* let other collectors go */
+    num_marking_collectors++; 
+  }
+  while(num_marking_collectors != num_active_collectors + 1);
+  
+  /* Pass 2: **************************************************
+     assign target addresses for all to-be-moved objects */
+  atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);
+
+  mspace_compute_object_target(collector, mspace);
+  
+  old_num = atomic_inc32(&num_repointing_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* single thread world */
+    gc->collect_result = gc_collection_result(gc);
+    if(!gc->collect_result){
+      num_repointing_collectors++; 
+      assert(0);    // Now we should not be out of mem here. mspace_extend_compact() is backing up for this case.
+      return;
+    }
+    
+    gc_reset_block_for_collectors(gc, mspace);
+    mspace_block_iterator_init(mspace);
+    num_repointing_collectors++; 
+  }
+  while(num_repointing_collectors != num_active_collectors + 1);
+  if(!gc->collect_result) return;
+    
+  /* Pass 3: **************************************************
+     update all references whose objects are to be moved */  
+  old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
+
+  mspace_fix_repointed_refs(collector, mspace);
+
+  old_num = atomic_inc32(&num_fixing_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+    lspace_fix_repointed_refs(collector, lspace);
+    gc_fix_rootset(collector);
+
+    if(!IGNORE_FINREF )
+      gc_put_finref_to_vm(gc);
+      
+#ifdef VERIFY_SLIDING_COMPACT
+    verify_sliding_compact(mspace, TRUE);
+#endif
+    
+    gc_init_block_for_sliding_compact(gc, mspace);
+    num_fixing_collectors++; 
+  }
+  while(num_fixing_collectors != num_active_collectors + 1);
+
+  /* Pass 4: **************************************************
+     move objects                                             */
+  atomic_cas32( &num_moving_collectors, 0, num_active_collectors);
+  
+  mspace_sliding_compact(collector, mspace); 
+  
+  atomic_inc32(&num_moving_collectors);
+  while(num_moving_collectors != num_active_collectors);
+  
+  /* Pass 5: **************************************************
+     restore obj_info                                         */
+  atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);
+  
+  collector_restore_obj_info(collector);
+  
+  old_num = atomic_inc32(&num_restoring_collectors);
+  if( ++old_num == num_active_collectors ){
+    update_mspace_info_for_los_extension(mspace);
+    
+    num_restoring_collectors++;
+  }
+  while(num_restoring_collectors != num_active_collectors + 1);
+  
+  /* Dealing with out of memory in mspace */
+  if(mspace->free_block_idx > fspace->first_block_idx){
+    atomic_cas32( &num_extending_collectors, 0, num_active_collectors);
+    
+    mspace_extend_compact(collector);
+    
+    atomic_inc32(&num_extending_collectors);
+    while(num_extending_collectors != num_active_collectors);
+  }
+  
+  if( collector->thread_handle != 0 )
+    return;
+  
+  /* Leftover: **************************************************
+   */
+  
+  mspace_reset_after_compaction(mspace);
+  fspace_reset_for_allocation(fspace);
+
+  //For_LOS_extend
+  mspace_restore_block_chain(mspace);
+  
+  gc_set_pool_clear(gc->metadata->gc_rootset_pool);
+  
+  return;
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.cpp Thu Jan 11 05:57:16 2007
@@ -23,12 +23,13 @@
 void free_area_pool_init(Free_Area_Pool* pool)
 {
   for(unsigned int i = 0; i < NUM_FREE_LIST; i ++){
-    Bidir_List* list = &pool->sized_area_list[i];
+    Bidir_List* list = (Bidir_List*)(&pool->sized_area_list[i]);
     list->next = list->prev = list;
+    ((Lockable_Bidir_List*)list)->lock = 0;
+    ((Lockable_Bidir_List*)list)->zero = 0;
   }
   
   memset((void*)pool->list_bit_flag, 0, NUM_FLAG_WORDS << BIT_SHIFT_TO_BYTES_PER_WORD);
-  pool->free_pool_lock = 0;
   return;
 }
 
@@ -51,7 +52,7 @@
   if(index == NUM_FREE_LIST) 
   return NULL; 
   
-  Bidir_List* list = &pool->sized_area_list[index];
+  Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index];
   Free_Area* area = (Free_Area*)list->next;
   
   if(index != MAX_LIST_INDEX)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h Thu Jan 11 05:57:16 2007
@@ -31,8 +31,18 @@
 
 #define NUM_FREE_LIST 128
 
+typedef struct Lockable_Bidir_List{
+  /* <-- First couple of fields overloadded as Bidir_List */
+  unsigned int zero;
+  Bidir_List* next;
+  Bidir_List* prev;
+  /* END of Bidir_List --> */
+  unsigned int lock;	
+}Lockable_Bidir_List;
+
 typedef struct Free_Area{
   /* <-- First couple of fields overloadded as Bidir_List */
+  unsigned int zero;
   Bidir_List* next;
   Bidir_List* prev;
   /* END of Bidir_List --> */
@@ -44,10 +54,13 @@
 {
   assert(ADDRESS_IS_KB_ALIGNED(start));
   assert(ADDRESS_IS_KB_ALIGNED(size));
-  
+
+  //memset(start, 0, size);
+ 
   if( size < GC_OBJ_SIZE_THRESHOLD) return NULL;
   Free_Area* area = (Free_Area*)start;
-  memset(area, 0, size);
+  area->zero = 0;
+  area->next = area->prev = (Bidir_List*)area;
   area->size = size;
   return area;
 }
@@ -55,10 +68,9 @@
 #define NUM_FLAG_WORDS (NUM_FREE_LIST >> BIT_SHIFT_TO_BITS_PER_WORD)
 
 typedef struct Free_Area_Pool{
-  Bidir_List sized_area_list[NUM_FREE_LIST];
+  Lockable_Bidir_List sized_area_list[NUM_FREE_LIST];
   /* each list corresponds to one bit in below vector */
   unsigned int list_bit_flag[NUM_FLAG_WORDS];
-  volatile unsigned int free_pool_lock;
 }Free_Area_Pool;
 
 #define MAX_LIST_INDEX (NUM_FREE_LIST - 1)
@@ -93,7 +105,7 @@
   assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD);
   
   unsigned int index = pool_list_index_with_size(free_area->size);
-  bidir_list_add_item(&(pool->sized_area_list[index]), (Bidir_List*)free_area);
+  bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area);
   
   /* set bit flag of the list */
   pool_list_set_flag(pool, index);
@@ -106,7 +118,7 @@
   bidir_list_remove_item((Bidir_List*)free_area);
   
   /* set bit flag of the list */
-  Bidir_List* list = &(pool->sized_area_list[index]);
+  Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]);
   if(list->next == list){
   	pool_list_clear_flag(pool, index);		
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp Thu Jan 11 05:57:16 2007
@@ -30,22 +30,17 @@
   assert(lspace);
   memset(lspace, 0, sizeof(Lspace));
 
+  /* commit mspace mem */    
   void* reserved_base = start;
   unsigned int committed_size = lspace_size;
-  int status = port_vmem_commit(&reserved_base, committed_size, gc->allocated_memory); 
-  assert(status == APR_SUCCESS && reserved_base == start);
+  vm_commit_mem(reserved_base, lspace_size);
+  memset(reserved_base, 0, lspace_size);
 
-  memset(reserved_base, 0, committed_size);
   lspace->committed_heap_size = committed_size;
-  lspace->reserved_heap_size = lspace_size - committed_size;
+  lspace->reserved_heap_size = committed_size;
   lspace->heap_start = reserved_base;
   lspace->heap_end = (void *)((unsigned int)reserved_base + committed_size);
 
-  /*Treat with mark bit table*/
-  unsigned int num_words = LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace_size);
-  lspace->mark_table = (unsigned int*)STD_MALLOC( num_words*BYTES_PER_WORD );
-  memset(lspace->mark_table, 0, num_words*BYTES_PER_WORD);
-  lspace->mark_object_func = lspace_mark_object;
   lspace->move_object = FALSE;
   lspace->gc = gc;
 
@@ -56,6 +51,10 @@
   initial_fa->size = lspace->committed_heap_size;
   free_pool_add_area(lspace->free_pool, initial_fa);
 
+  lspace->num_collections = 0;
+  lspace->time_collections = 0;
+  lspace->survive_ratio = 0.5f;
+
   gc_set_los((GC_Gen*)gc, (Space*)lspace);
   los_boundary = lspace->heap_end;
 
@@ -64,47 +63,42 @@
 
 void lspace_destruct(Lspace* lspace)
 {
-  //FIXME:: decommit lspace space
-  STD_FREE(lspace->mark_table);
   STD_FREE(lspace);
   lspace = NULL;
   return;
 }
 
-Boolean lspace_mark_object(Lspace* lspace, Partial_Reveal_Object* p_obj)
+#include "../common/fix_repointed_refs.h"
+
+/* this is minor collection, lspace is not swept, so we need clean markbits */
+void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace)
 {
-  assert( obj_belongs_to_space(p_obj, (Space*)lspace));
-  unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj);
-  unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj);
-
-  unsigned int* p_word = &(lspace->mark_table[word_index]);
-  unsigned int word_mask = (1<<bit_offset_in_word);
-
-  unsigned int old_value = *p_word;
-  unsigned int new_value = old_value|word_mask;
-
-  while(old_value != new_value){
-    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
-    if(temp == old_value) return TRUE;
-    old_value = *p_word;
-    new_value = old_value|word_mask;
+  unsigned int mark_bit_idx = 0;
+  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
+  while( p_obj){
+    assert(obj_is_marked_in_vt(p_obj));
+    obj_unmark_in_vt(p_obj);
+    object_fix_ref_slots(p_obj);
+    p_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
   }
-
-  return FALSE;
 }
 
-void reset_lspace_after_copy_nursery(Lspace* lspace)
+void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace)
 {
-  unsigned int marktable_size = LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(lspace->committed_heap_size);
-  memset(lspace->mark_table, 0, marktable_size); 
-  return;  
+  unsigned int start_pos = 0;
+  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos);
+  while( p_obj){
+    assert(obj_is_marked_in_vt(p_obj));
+    object_fix_ref_slots(p_obj);
+    p_obj = lspace_get_next_marked_object(lspace, &start_pos);
+  }
 }
 
 void lspace_collection(Lspace* lspace)
 {
   /* heap is marked already, we need only sweep here. */
+  lspace->num_collections ++;
+  lspace_reset_after_collection(lspace);  
   lspace_sweep(lspace);
-  unsigned int marktable_size = LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(lspace->committed_heap_size);
-  memset(lspace->mark_table, 0, marktable_size); 
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h Thu Jan 11 05:57:16 2007
@@ -25,6 +25,9 @@
 #include "../thread/gc_thread.h"
 #include "free_area_pool.h"
 
+#define GC_MIN_LOS_SIZE ( 4 * 1024 * 1024)
+
+
 typedef struct Lspace{
   /* <-- first couple of fields are overloadded as Space */
   void* heap_start;
@@ -32,104 +35,63 @@
   unsigned int reserved_heap_size;
   unsigned int committed_heap_size;
   unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
   GC* gc;
   Boolean move_object;
-  Boolean (*mark_object_func)(Lspace* space, Partial_Reveal_Object* p_obj);
   /* END of Space --> */
 
 //  void* alloc_free;
   Free_Area_Pool* free_pool;
   
-  unsigned int* mark_table;
-
 }Lspace;
 
 void lspace_initialize(GC* gc, void* reserved_base, unsigned int lspace_size);
 void lspace_destruct(Lspace* lspace);
 Managed_Object_Handle lspace_alloc(unsigned int size, Allocator* allocator);
 void lspace_sweep(Lspace* lspace);
+void lspace_reset_after_collection(Lspace* lspace);
 void lspace_collection(Lspace* lspace);
 
 inline unsigned int lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; }
+inline unsigned int lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; }
 
-
-#define LSPACE_SIZE_TO_MARKTABLE_SIZE_BITS(space_size) (((space_size) >> BIT_SHIFT_TO_KILO)+1)
-#define LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(space_size) ((LSPACE_SIZE_TO_MARKTABLE_SIZE_BITS(space_size)>> BIT_SHIFT_TO_BITS_PER_BYTE)+1) 
-#define LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(space_size) ((LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(space_size)>> BIT_SHIFT_TO_BYTES_PER_WORD)+1) 
-
-/* The assumption is the offset below is always aligned at word size, because both numbers are aligned */
-#define ADDRESS_OFFSET_IN_LSPACE_BODY(lspace, p_obj) ((unsigned int)p_obj - (unsigned int)space_heap_start((Space*)lspace))
-#define OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj)    (ADDRESS_OFFSET_IN_LSPACE_BODY(lspace, p_obj) >> BIT_SHIFT_TO_KILO)
-#define OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj)   (OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) >> BIT_SHIFT_TO_BITS_PER_WORD)
-#define OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj)  (OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) & BIT_MASK_TO_BITS_PER_WORD)
-
-inline Boolean lspace_object_is_marked(Lspace* lspace, Partial_Reveal_Object* p_obj)
+inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index)
 {
-  assert( obj_belongs_to_space(p_obj, (Space*)lspace));
-  unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj);
-  unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj);
- 
-  unsigned int markbits = lspace->mark_table[word_index];
-  return markbits & (1<<bit_offset_in_word);
-}
-
+    unsigned int next_area_start = (unsigned int)lspace->heap_start + (*iterate_index) * KB;
+    BOOLEAN reach_heap_end = 0;
 
-inline Partial_Reveal_Object* lspace_get_first_marked_object(Lspace* lspace, unsigned int* mark_bit_idx)
-{
-  unsigned int* mark_table = lspace->mark_table;
-  unsigned int* table_end = mark_table + LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace->committed_heap_size);
-  
-  unsigned j=0;
-  unsigned int k=0;
-  while( (mark_table + j) < table_end){
-    unsigned int markbits = *(mark_table+j);
-    if(!markbits){ j++; continue; }
-    while(k<32){
-        if( !(markbits& (1<<k)) ){ k++; continue;}
-        unsigned int kilo_bytes_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
-        Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((char*)lspace->heap_start + kilo_bytes_index * KB);
-        *mark_bit_idx = kilo_bytes_index;
-        return p_obj;
+    while(!reach_heap_end){
+        //FIXME: This while shoudl be if, try it!
+        while(!*((unsigned int *)next_area_start)){
+                next_area_start += ((Free_Area*)next_area_start)->size;
+        }
+        if(next_area_start < (unsigned int)lspace->heap_end){
+            //If there is a living object at this addr, return it, and update iterate_index
+            if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){
+                unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start));
+                *iterate_index = (next_area_start + obj_size - (unsigned int)lspace->heap_start) >> BIT_SHIFT_TO_KILO;
+                return (Partial_Reveal_Object*)next_area_start;
+            //If this is a dead object, go on to find  a living one.
+            }else{
+                unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start));
+                next_area_start += obj_size;
+            }
+        }else{
+            reach_heap_end = 1;
+        } 
     }
-    j++;
-    k=0;
-  }          
-  *mark_bit_idx = 0;
-  return NULL;   
-}
+    return NULL;
 
+}
 
-inline Partial_Reveal_Object* lspace_get_next_marked_object(Lspace* lspace, unsigned int* mark_bit_idx)
+inline Partial_Reveal_Object* lspace_get_first_marked_object(Lspace* lspace, unsigned int* mark_bit_idx)
 {
-  unsigned int* mark_table = lspace->mark_table;
-  unsigned int* table_end = mark_table + LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace->committed_heap_size);
-  unsigned int bit_index = *mark_bit_idx;
-  
-  unsigned int j = bit_index >> BIT_SHIFT_TO_BITS_PER_WORD;
-  unsigned int k = (bit_index & BIT_MASK_TO_BITS_PER_WORD) + 1;  
-     
-  while( (mark_table + j) < table_end){
-    unsigned int markbits = *(mark_table+j);
-    if(!markbits){ j++; continue; }
-    while(k<32){
-      if( !(markbits& (1<<k)) ){ k++; continue;}
-      
-      unsigned int kilo_byte_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((char*)lspace->heap_start + kilo_byte_index *  KB);      
-      *mark_bit_idx = kilo_byte_index;
-      return p_obj;
-    }
-    j++;
-    k=0;
-  }        
-  
-  *mark_bit_idx = 0;
-  return NULL;   
-
+    return lspace_get_next_marked_object(lspace, mark_bit_idx);
 }
 
-Boolean lspace_mark_object(Lspace* lspace, Partial_Reveal_Object* p_obj);
+void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace);
 
-void reset_lspace_after_copy_nursery(Lspace* lspace);
+void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace);
 
 #endif /*_LSPACE_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp Thu Jan 11 05:57:16 2007
@@ -19,50 +19,202 @@
  */
 
 #include "lspace.h"
-struct GC_Gen;
-Space* gc_get_los(GC_Gen* gc);
+#include "../gen/gen.h"
+#include "../common/space_tuner.h"
 
-void* lspace_alloc(unsigned int size, Allocator *allocator)
-{
-  vm_gc_lock_enum();
-  unsigned int try_count = 0;
-  Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
-  Free_Area_Pool* pool = lspace->free_pool;
-  Free_Area* free_area;
-  Free_Area* remain_area;
-  void* p_return = NULL;
+inline void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index){
+    Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
+    while (apr_atomic_casptr( 
+                (volatile void **) &(list_head->lock),
+                (void *) 1, (void *) 0) 
+            != (void *) 0) {
+        while (list_head->lock ==  1) {
+            ;   
+        }
+    }
 
-  while( try_count < 2 ){
-    free_area = free_pool_find_size_area(pool, size);
-    
-    /*Got one free area!*/
-    if(free_area != NULL){
-      assert(free_area->size >= size);
-      free_pool_remove_area(pool, free_area);
-      p_return = (void*)free_area;
-      unsigned int old_size = free_area->size;
-      memset(p_return, 0, sizeof(Free_Area));
-
-      /* we need put the remaining area back if it size is ok.*/
-      void* new_start = (Free_Area*)ALIGN_UP_TO_KILO(((unsigned int)free_area + size));
-      unsigned int alloc_size = (unsigned int)new_start - (unsigned int)free_area;
-      unsigned int new_size = old_size - alloc_size;
-      
-      remain_area = free_area_new(new_start, new_size);
-      if( remain_area )
-        free_pool_add_area(pool, remain_area);
+}
+inline void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index){
+    ((Lockable_Bidir_List*)(&pool->sized_area_list[list_index]))->lock = 0;
+}
+inline unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index){
+    Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]);
+    return (head->next == head);
+}
+inline void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size)
+{
+    Free_Area* free_area;
+    void* p_result;
+    unsigned int remain_size;
+    unsigned int alloc_size = ALIGN_UP_TO_KILO(size);
+    unsigned int new_list_nr = 0;
+    Lockable_Bidir_List* head = &pool->sized_area_list[list_hint];
+
+    assert(list_hint < MAX_LIST_INDEX);
+
+    free_pool_lock_nr_list(pool, list_hint);
+    /*Other LOS allocation may race with this one, so check list status here.*/
+    if(free_pool_nr_list_is_empty(pool, list_hint)){
+        free_pool_unlock_nr_list(pool, list_hint);
+        return NULL;
+    }
 
-      vm_gc_unlock_enum();
-      return p_return;
+    free_area = (Free_Area*)(head->next);
+    remain_size = free_area->size - alloc_size;
+    if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+        new_list_nr = pool_list_index_with_size(remain_size);
+        p_result = (void*)((unsigned int)free_area + remain_size);
+        if(new_list_nr == list_hint){
+            free_area->size = remain_size;
+            free_pool_unlock_nr_list(pool, list_hint);
+            return p_result;
+        }else{
+            free_pool_remove_area(pool, free_area);
+            free_pool_unlock_nr_list(pool, list_hint);
+            free_area->size = remain_size;
+            free_pool_lock_nr_list(pool, new_list_nr);
+            free_pool_add_area(pool, free_area);
+            free_pool_unlock_nr_list(pool, new_list_nr);
+            return p_result;            
+        }
+    }
+    else if(remain_size >= 0)
+    {
+        free_pool_remove_area(pool, free_area);
+        free_pool_unlock_nr_list(pool, list_hint);
+        p_result = (void*)((unsigned int)free_area + remain_size);
+        if(remain_size > 0){
+            assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+            free_area->size = remain_size;
+        }
+        return p_result;
     }
+    /*We never get here, because if the list head is not NULL, it definitely satisfy the request. */
+    assert(0);
+    return NULL;
+}
 
-    if(try_count++ == 0) 
-      gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);
+inline void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size)
+{
+    void* p_result;
+    int remain_size = 0;
+    unsigned int alloc_size = ALIGN_UP_TO_KILO(size);
+    Free_Area* free_area = NULL;
+    Free_Area* new_area = NULL;
+    unsigned int new_list_nr = 0;        
+    Lockable_Bidir_List* head = &(pool->sized_area_list[MAX_LIST_INDEX]);
+    
+    free_pool_lock_nr_list(pool, MAX_LIST_INDEX );
+    /*The last list is empty.*/
+    if(free_pool_nr_list_is_empty(pool, MAX_LIST_INDEX)){
+        free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );                
+        return NULL;
+    }
+    
+    free_area = (Free_Area*)(head->next);
+    while(  free_area != (Free_Area*)head ){
+        remain_size = free_area->size - alloc_size;
+        if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+            new_list_nr = pool_list_index_with_size(remain_size);
+            p_result = (void*)((unsigned int)free_area + remain_size);
+            if(new_list_nr == MAX_LIST_INDEX){
+                free_area->size = remain_size;
+                free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+                return p_result;
+            }else{
+                free_pool_remove_area(pool, free_area);
+                free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+                free_area->size = remain_size;
+                free_pool_lock_nr_list(pool, new_list_nr);
+                free_pool_add_area(pool, free_area);
+                free_pool_unlock_nr_list(pool, new_list_nr);
+                return p_result;            
+            }
+        }
+        else if(remain_size >= 0)
+        {
+            free_pool_remove_area(pool, free_area);
+            free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+            p_result = (void*)((unsigned int)free_area + remain_size);
+            if(remain_size > 0){
+                assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+                free_area->size = remain_size;
+            }
+            return p_result;
+        }
+        else free_area = (Free_Area*)free_area->next;
+    }
+    /*No adequate area in the last list*/
+    free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );
+    return NULL;
+}
 
-  }
+void* lspace_alloc(unsigned int size, Allocator *allocator)
+{
+    unsigned int try_count = 0;
+    void* p_result = NULL;
+    unsigned int  list_hint = 0;
+    unsigned int alloc_size = ALIGN_UP_TO_KILO(size);
+    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
+    Free_Area_Pool* pool = lspace->free_pool;
+
+    while( try_count < 2 ){
+        list_hint = pool_list_index_with_size(alloc_size);
+        list_hint = pool_list_get_next_flag(pool, list_hint);
+        while((!p_result) && (list_hint <= MAX_LIST_INDEX)){
+            /*List hint is not the last list, so look for it in former lists.*/
+            if(list_hint < MAX_LIST_INDEX){
+                p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size);
+                if(p_result){
+                    memset(p_result, 0, size);
+                    return p_result;
+                }else{
+                    list_hint ++;
+                    list_hint = pool_list_get_next_flag(pool, list_hint);
+                    continue;
+                }
+            }
+            /*List hint is the last list, so look for it in the last list.*/
+            else
+            {
+                p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size);
+                if(p_result){
+                    memset(p_result, 0, size);
+                    return p_result;
+                }
+                else break;
+            }
+        }
+        /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/   
+        if(try_count == 0){
+            vm_gc_lock_enum();
+            gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);
+            vm_gc_unlock_enum();
+            try_count ++;
+        }else{
+            try_count ++;
+        }
+    }
+    return NULL;
+}
 
-  vm_gc_unlock_enum();
-  return NULL;
+void lspace_reset_after_collection(Lspace* lspace)
+{
+    GC* gc = lspace->gc;
+    Space_Tuner* tuner = gc->tuner;
+    unsigned int trans_size = tuner->tuning_size;
+    assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
+    //For_LOS_extend
+    if(tuner->kind == TRANS_FROM_MOS_TO_LOS){
+        void* origin_end = lspace->heap_end;
+        lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks);
+        
+        Free_Area* trans_fa = free_area_new(origin_end, trans_size);
+        free_pool_add_area(lspace->free_pool, trans_fa);
+        lspace->committed_heap_size += trans_size;
+        lspace->reserved_heap_size += trans_size;
+    }
+    los_boundary = lspace->heap_end;
 }
 
 void lspace_sweep(Lspace* lspace)
@@ -70,13 +222,20 @@
   /* reset the pool first because its info is useless now. */
   free_area_pool_reset(lspace->free_pool);
 
-  unsigned int mark_bit_idx, cur_size;
+  unsigned int mark_bit_idx = 0, cur_size = 0;
   void *cur_area_start, *cur_area_end;
 
 
 
   Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
   Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
+  if(p_next_obj){
+    obj_unmark_in_vt(p_next_obj);
+    /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked
+       in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode,
+       the last time marked object is thought to be already marked and not scanned for this cycle. */
+    obj_clear_dual_bits_in_oi(p_next_obj);
+  }
 
   cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
   cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
@@ -92,6 +251,10 @@
 
     p_prev_obj = p_next_obj;
     p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
+    if(p_next_obj){
+      obj_unmark_in_vt(p_next_obj);
+      obj_clear_dual_bits_in_oi(p_next_obj);
+    }
 
     cur_area_start = (void*)ALIGN_UP_TO_KILO((unsigned int)p_prev_obj + vm_object_size(p_prev_obj));
     cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
@@ -106,6 +269,8 @@
   if( cur_area )
     free_pool_add_area(lspace->free_pool, cur_area);
 
+   mark_bit_idx = 0;
+   assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
    return;
 
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Thu Jan 11 05:57:16 2007
@@ -24,29 +24,27 @@
 #include "../mark_compact/mspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+unsigned int MINOR_COLLECTORS = 0;
+unsigned int MAJOR_COLLECTORS = 0;
 
-static void collector_restore_obj_info(Collector* collector)
+void collector_restore_obj_info(Collector* collector)
 {
-  ObjectMap* objmap = collector->obj_info_map;
-  ObjectMap::iterator obj_iter;
-  for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
-    Partial_Reveal_Object* p_target_obj = obj_iter->first;
-    Obj_Info_Type obj_info = obj_iter->second;
-    set_obj_info(p_target_obj, obj_info);     
+  Pool *remset_pool = collector->gc->metadata->collector_remset_pool;
+  Pool *free_pool = collector->gc->metadata->free_set_pool;
+  assert(!collector->rem_set);
+  
+  while(Vector_Block *oi_block = pool_get_entry(remset_pool)){
+    unsigned int *iter = vector_block_iterator_init(oi_block);
+    while(!vector_block_iterator_end(oi_block, iter)){
+      Partial_Reveal_Object *p_target_obj = (Partial_Reveal_Object *)*iter;
+      iter = vector_block_iterator_advance(oi_block, iter);
+      Obj_Info_Type obj_info = (Obj_Info_Type)*iter;
+      iter = vector_block_iterator_advance(oi_block, iter);
+      set_obj_info(p_target_obj, obj_info);
+    }
+    vector_block_clear(oi_block);
+    pool_put_entry(free_pool, oi_block);
   }
-  objmap->clear();
-  return;  
-}
-
-void gc_restore_obj_info(GC* gc)
-{
-  for(unsigned int i=0; i<gc->num_active_collectors; i++)
-  {
-    Collector* collector = gc->collectors[i];    
-    collector_restore_obj_info(collector);
-  }
-  return;
-  
 }
 
 static void collector_reset_thread(Collector *collector) 
@@ -57,22 +55,25 @@
   vm_reset_event(collector->task_assigned_event);
   vm_reset_event(collector->task_finished_event);
   */
-  
-  alloc_context_reset((Allocator*)collector);
-  
+    
   GC_Metadata* metadata = collector->gc->metadata;
 
+/* TO_REMOVE
+
   assert(collector->rep_set==NULL);
-  if( !gc_requires_barriers() || collector->gc->collect_kind != MINOR_COLLECTION){
-    collector->rep_set = pool_get_entry(metadata->free_set_pool);
+  if( !gc_is_gen_mode() || collector->gc->collect_kind != MINOR_COLLECTION){
+    collector->rep_set = free_set_pool_get_entry(metadata);
   }
+*/
   
-  if(gc_requires_barriers()){
+  if(gc_is_gen_mode() && collector->gc->collect_kind==MINOR_COLLECTION && NOS_PARTIAL_FORWARD){
     assert(collector->rem_set==NULL);
-    collector->rem_set = pool_get_entry(metadata->free_set_pool);
+    collector->rem_set = free_set_pool_get_entry(metadata);
   }
   
+#ifndef BUILD_IN_REFERENT
   collector_reset_weakref_sets(collector);
+#endif
 
   collector->result = TRUE;
   return;
@@ -101,7 +102,14 @@
 static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space)
 {
   /* FIXME:: to adaptively identify the num_collectors_to_activate */
-  gc->num_active_collectors = gc->num_collectors;
+  if( MINOR_COLLECTORS && gc->collect_kind == MINOR_COLLECTION){
+    gc->num_active_collectors = MINOR_COLLECTORS;      
+  }else if ( MAJOR_COLLECTORS && gc->collect_kind != MINOR_COLLECTION){
+    gc->num_active_collectors = MAJOR_COLLECTORS;  
+  }else{
+    gc->num_active_collectors = gc->num_collectors;
+  }
+  
   for(unsigned int i=0; i<gc->num_active_collectors; i++)
   {
     Collector* collector = gc->collectors[i];
@@ -121,7 +129,7 @@
   {
     Collector* collector = gc->collectors[i];
     wait_collector_to_finish(collector);
-  }
+  }  
   return;
 }
 
@@ -139,7 +147,9 @@
     if(task_func == NULL) return 1;
       
     task_func(collector);
-    
+
+    alloc_context_reset((Allocator*)collector);
+
     collector_notify_work_done(collector);
   }
 
@@ -148,7 +158,6 @@
 
 static void collector_init_thread(Collector *collector) 
 {
-  collector->obj_info_map = new ObjectMap();
   collector->rem_set = NULL;
   collector->rep_set = NULL;
 
@@ -193,14 +202,14 @@
 
 struct GC_Gen;
 unsigned int gc_get_processor_num(GC_Gen*);
+
 void collector_initialize(GC* gc)
 {
   //FIXME::
-  unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc);
+  unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc);
   
-  nthreads = (NUM_COLLECTORS==0)?nthreads:NUM_COLLECTORS;
+  unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); 
 
-  gc->num_collectors = nthreads; 
   unsigned int size = sizeof(Collector *) * nthreads;
   gc->collectors = (Collector **) STD_MALLOC(size); 
   memset(gc->collectors, 0, size);
@@ -217,6 +226,8 @@
     
     gc->collectors[i] = collector;
   }
+
+  gc->num_collectors = NUM_COLLECTORS? NUM_COLLECTORS:num_processors; 
 
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Thu Jan 11 05:57:16 2007
@@ -21,13 +21,15 @@
 #ifndef _COLLECTOR_H_
 #define _COLLECTOR_H_
 
-#include "../common/gc_common.h"
+#include "../common/gc_space.h"
 struct Block_Header;
+struct Stealable_Stack;
 
 typedef struct Collector{
   /* <-- first couple of fields are overloaded as Allocator */
   void *free;
   void *ceiling;
+  void *end;
   void *alloc_block;
   Space* alloc_space;
   GC* gc;
@@ -52,9 +54,6 @@
   Block_Header* cur_compact_block;
   Block_Header* cur_target_block;
   
-  /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
-  ObjectMap*  obj_info_map; 
-
   void(*task_func)(void*) ;   /* current task */
   
   unsigned int result;
@@ -67,9 +66,29 @@
 
 void collector_execute_task(GC* gc, TaskType task_func, Space* space);
 
-Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj);
+void collector_restore_obj_info(Collector* collector);
+
+inline Boolean gc_collection_result(GC* gc)
+{
+  Boolean result = TRUE;
+  for(unsigned i=0; i<gc->num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    result &= collector->result;
+  }  
+  return result;
+}
+
+inline void gc_reset_collect_result(GC* gc)
+{
+  for(unsigned i=0; i<gc->num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    collector->result = TRUE;
+  }  
+  
+  gc->collect_result = TRUE;
+  return;
+}
 
-void gc_restore_obj_info(GC* gc);
 
 
 #endif //#ifndef _COLLECTOR_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.cpp Thu Jan 11 05:57:16 2007
@@ -1,59 +0,0 @@
-/*
- *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "gc_thread.h"
-
-void* mos_alloc(unsigned size, Allocator *allocator);
-
-/* NOS forward obj to MOS in MINOR_COLLECTION */
-Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
-{
-  Partial_Reveal_VTable* vt = obj_get_vtraw(p_obj);
-
-  /* forwarded by somebody else */
-  if ((unsigned int)vt & FORWARDING_BIT_MASK){
-     assert(!obj_is_marked_in_vt(p_obj));
-     return NULL;
-  }
-  
-  /* otherwise, try to alloc it. mos should always has enough space to hold nos during collection */
-  unsigned int size = vm_object_size(p_obj);
-  Partial_Reveal_Object* p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector);  
-  assert(p_targ_obj); 
-    
-  /* else, take the obj by setting the forwarding flag atomically 
-     we don't put a simple bit in vt because we need compute obj size later. */
-  if ((unsigned int)vt != atomic_cas32((unsigned int*)obj_get_vtraw_addr(p_obj), ((unsigned int)p_targ_obj|FORWARDING_BIT_MASK), (unsigned int)vt)) {
-    /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched
-       block. The remaining part of the switched block cannot be revivied for next allocation of 
-       object that has smaller size than this one. */
-    assert( obj_is_forwarded_in_vt(p_obj) && !obj_is_marked_in_vt(p_obj));
-    thread_local_unalloc(size, (Allocator*)collector);
-    return NULL;
-  }
-
-  /* we forwarded the object */
-  memcpy(p_targ_obj, p_obj, size);
-  /* because p_obj has forwarding pointer in its vt, we set it seperately here */
-  obj_set_vt(p_targ_obj, (Allocation_Handle)vt);
-  
-  return p_targ_obj;  
- 
-}

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,80 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _COLLECTOR_ALLOC_H_
+#define _COLLECTOR_ALLOC_H_
+
+#include "gc_thread.h"
+
+void* mos_alloc(unsigned size, Allocator *allocator);
+
+/* NOS forward obj to MOS in MINOR_COLLECTION */
+inline Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
+{
+  Obj_Info_Type oi = get_obj_info_raw(p_obj);
+
+  /* forwarded by somebody else */
+  if ((unsigned int)oi & FORWARD_BIT){
+     return NULL;
+  }
+  
+  /* otherwise, try to alloc it. mos should always has enough space to hold nos during collection */
+  unsigned int size = vm_object_size(p_obj);
+
+  Partial_Reveal_Object* p_targ_obj = thread_local_alloc(size, (Allocator*)collector);
+  if(!p_targ_obj)
+    p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector);
+    
+  if(p_targ_obj == NULL){
+    /* failed to forward an obj */
+    collector->result = FALSE;
+    return NULL;
+  }
+    
+  /* else, take the obj by setting the forwarding flag atomically 
+     we don't put a simple bit in vt because we need compute obj size later. */
+  if ((unsigned int)oi != atomic_cas32((unsigned int*)get_obj_info_addr(p_obj), ((unsigned int)p_targ_obj|FORWARD_BIT), (unsigned int)oi)) {
+    /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched
+       block. The remaining part of the switched block cannot be revivied for next allocation of 
+       object that has smaller size than this one. */
+    assert( obj_is_fw_in_oi(p_obj));
+    thread_local_unalloc(size, (Allocator*)collector);
+    return NULL;
+  }
+
+  /* we forwarded the object */
+  memcpy(p_targ_obj, p_obj, size);
+
+  /* we need clear the bit to give major collection a clean status. */
+  if(gc_is_gen_mode())
+    set_obj_info(p_targ_obj, oi&DUAL_MARKBITS_MASK);
+
+#ifdef MARK_BIT_FLIPPING 
+  /* we need set MARK_BIT to indicate this object is processed for nongen forwarding */
+  else
+    set_obj_info(p_targ_obj, oi|FLIP_MARK_BIT);
+
+#endif
+
+  return p_targ_obj;  
+ 
+}
+
+#endif /* _COLLECTOR_ALLOC_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h Thu Jan 11 05:57:16 2007
@@ -21,9 +21,12 @@
 #ifndef _GC_THREAD_H_
 #define _GC_THREAD_H_
 
-#include "../common/gc_block.h"
+#include "../common/gc_space.h"
 #include "../common/gc_metadata.h"
 
+#define ALLOC_ZEROING
+#define ZEROING_SIZE  2*KB
+
 extern unsigned int tls_gc_offset;
 
 inline void* gc_get_tls()
@@ -42,6 +45,7 @@
 typedef struct Allocator{
   void *free;
   void *ceiling;
+  void* end;
   Block *alloc_block;
   Space* alloc_space;
   GC   *gc;
@@ -50,31 +54,76 @@
 
 inline void thread_local_unalloc(unsigned int size, Allocator* allocator)
 {
-    void* free = allocator->free;    
-    allocator->free = (void*)((unsigned int)free - size);
-    return;
+  void* free = allocator->free;    
+  allocator->free = (void*)((unsigned int)free - size);
+  return;
+}
+
+#ifdef ALLOC_ZEROING
+
+inline Partial_Reveal_Object* thread_local_alloc_zeroing(unsigned int size, Allocator* allocator)
+{
+  unsigned int  free = (unsigned int)allocator->free;
+  unsigned int ceiling = (unsigned int)allocator->ceiling;
+  
+  unsigned int new_free = free + size;
+  
+  unsigned int block_ceiling = (unsigned int)allocator->end;
+  if( new_free > block_ceiling) 
+    return NULL;
+
+  unsigned int new_ceiling;
+  new_ceiling =  new_free + ZEROING_SIZE;
+  if( new_ceiling > block_ceiling )
+    new_ceiling = block_ceiling;
+
+  allocator->ceiling = (void*)new_ceiling;
+  allocator->free = (void*)new_free;
+  memset((void*)ceiling, 0, new_ceiling - ceiling);
+  return (Partial_Reveal_Object*)free;
+
 }
 
+#endif /* ALLOC_ZEROING */
+
 inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator)
 {
-    void* free = allocator->free;
-    void* ceiling = allocator->ceiling;
-    
-    void* new_free = (void*)((unsigned int)free + size);
+  unsigned int  free = (unsigned int)allocator->free;
+  unsigned int ceiling = (unsigned int)allocator->ceiling;
+  
+  unsigned int new_free = free + size;
     
-    if (new_free <= ceiling){
-    	allocator->free= new_free;
-    	return (Partial_Reveal_Object*)free;
-    }
+  if (new_free <= ceiling){
+  	allocator->free= (void*)new_free;
+    return (Partial_Reveal_Object*)free;
+  }
+
+#ifndef ALLOC_ZEROING
+  
+  return NULL;
+
+#else
+
+  return thread_local_alloc_zeroing(size, allocator);
+
+#endif /* #ifndef ALLOC_ZEROING */
 
-    return NULL;
 }
 
 inline void alloc_context_reset(Allocator* allocator)
 {
-  allocator->free = NULL;
-  allocator->ceiling = NULL;
-  allocator->alloc_block = NULL;
+  Block_Header* block = (Block_Header*)allocator->alloc_block;
+  /* it can be NULL if GC happens before the mutator resumes, or called by collector */
+  if( block != NULL ){ 
+    assert(block->status == BLOCK_IN_USE);
+    block->free = allocator->free;
+    block->status = BLOCK_USED;
+    allocator->alloc_block = NULL;
+  }
+    
+   allocator->free = NULL;
+   allocator->ceiling = NULL;
+   allocator->end = NULL;
   
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Thu Jan 11 05:57:16 2007
@@ -28,18 +28,19 @@
 {
   /* FIXME:: make sure gc_info is cleared */
   Mutator *mutator = (Mutator *)STD_MALLOC(sizeof(Mutator));
-  mutator->free = NULL;
-  mutator->ceiling = NULL;
-  mutator->alloc_block = NULL;
+  memset(mutator, 0, sizeof(Mutator));
   mutator->alloc_space = gc_get_nos((GC_Gen*)gc);
   mutator->gc = gc;
     
-  if(gc_requires_barriers()){
-    mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
+  if(gc_is_gen_mode()){
+    mutator->rem_set = free_set_pool_get_entry(gc->metadata);
     assert(vector_block_is_empty(mutator->rem_set));
   }
   
-  mutator->objects_with_finalizer = finalizer_weakref_get_free_block();
+  if(!IGNORE_FINREF )
+    mutator->obj_with_fin = finref_get_free_block();
+  else
+    mutator->obj_with_fin = NULL;
        
   lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
 
@@ -60,14 +61,16 @@
 
   Mutator *mutator = (Mutator *)gc_get_tls();
 
-  if(gc_requires_barriers()){ /* put back the remset when a mutator exits */
+  alloc_context_reset((Allocator*)mutator);
+
+  if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */
     pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
     mutator->rem_set = NULL;
   }
   
-  if(mutator->objects_with_finalizer){
-    pool_put_entry(gc->finalizer_weakref_metadata->objects_with_finalizer_pool, mutator->objects_with_finalizer);
-    mutator->objects_with_finalizer = NULL;
+  if(mutator->obj_with_fin){
+    pool_put_entry(gc->finref_metadata->obj_with_fin_pool, mutator->obj_with_fin);
+    mutator->obj_with_fin = NULL;
   }
 
   lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
@@ -96,9 +99,19 @@
 {
   Mutator *mutator = gc->mutator_list;
   while (mutator) {
-    mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
     alloc_context_reset((Allocator*)mutator);    
-    mutator_reset_objects_with_finalizer(mutator);
+    mutator = mutator->next;
+  }  
+  return;
+}
+
+void gc_prepare_mutator_remset(GC* gc)
+{
+  Mutator *mutator = gc->mutator_list;
+  while (mutator) {
+    mutator->rem_set = free_set_pool_get_entry(gc->metadata);
+    if(!IGNORE_FINREF )
+      mutator_reset_obj_with_fin(mutator);
     mutator = mutator->next;
   }  
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Thu Jan 11 05:57:16 2007
@@ -21,13 +21,14 @@
 #ifndef _MUTATOR_H_
 #define _MUTATOR_H_
 
-#include "../common/gc_common.h"
+#include "../common/gc_space.h"
 
 /* Mutator thread local information for GC */
 typedef struct Mutator {
   /* <-- first couple of fields are overloaded as Allocator */
-	void*	free;
-	void*	ceiling;
+  void* free;
+  void* ceiling;
+  void* end;
   void* alloc_block;
   Space* alloc_space;
   GC* gc;
@@ -35,7 +36,7 @@
   /* END of Allocator --> */
   
   Vector_Block* rem_set;
-  Vector_Block* objects_with_finalizer;
+  Vector_Block* obj_with_fin;
   Mutator* next;  /* The gc info area associated with the next active thread. */
 } Mutator;
 
@@ -44,5 +45,6 @@
 void mutator_reset(GC *gc);
 
 void gc_reset_mutator_context(GC* gc);
+void gc_prepare_mutator_remset(GC* gc);
 
 #endif /*ifndef _MUTATOR_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp Thu Jan 11 05:57:16 2007
@@ -22,11 +22,7 @@
 
 #include "../gen/gen.h"
 
-#include "../finalizer_weakref/finalizer_weakref_metadata.h"
-
-/* classloader sometimes sets the bit for finalizible objects (?) */
-inline unsigned int get_instance_data_size (unsigned int encoded_size) 
-{    return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); }
+#include "../finalizer_weakref/finalizer_weakref.h"
 
 Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls) 
 {
@@ -36,22 +32,20 @@
   assert((size % GC_OBJECT_ALIGNMENT) == 0);
   assert(ah);
 
-  /* FIXME:: this is outdated actually */
-  size = get_instance_data_size(size);
-  
-  Mutator* mutator = (Mutator*)gc_get_tls();
-  
+  Allocator* allocator = (Allocator*)gc_get_tls();
+   
   if ( size > GC_OBJ_SIZE_THRESHOLD )
-    p_obj = (Managed_Object_Handle)los_alloc(size, (Allocator*)mutator);
-  else
-    p_obj = (Managed_Object_Handle)nos_alloc(size, (Allocator*)mutator);
+    p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
+  else{
+    p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
+  }
   
   if( p_obj == NULL ) return NULL;
     
   obj_set_vt((Partial_Reveal_Object*)p_obj, ah);
   
-  if(type_has_finalizer((Partial_Reveal_VTable *)ah))
-    mutator_finalizer_add_entry(mutator, (Partial_Reveal_Object*)p_obj);
+  if(!IGNORE_FINREF && type_has_finalizer((Partial_Reveal_VTable *)ah))
+    mutator_add_finalizer((Mutator*)allocator, (Partial_Reveal_Object*)p_obj);
     
   return (Managed_Object_Handle)p_obj;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Thu Jan 11 05:57:16 2007
@@ -22,64 +22,20 @@
 
 #include "fspace.h"
 
-Boolean NOS_PARTIAL_FORWARD = TRUE;
-
-void* nos_boundary = null; /* this is only for speeding up write barrier */
+Boolean NOS_PARTIAL_FORWARD = FALSE;
 
 Boolean forward_first_half;
 void* object_forwarding_boundary=NULL;
 
-Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj)
-{  
-  obj_mark_in_vt(p_obj);
-
-  unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
-  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); 	
-	
-  unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
-  unsigned int word_mask = (1<<obj_offset_in_word);
-	
-  unsigned int old_value = *p_word;
-  unsigned int new_value = old_value|word_mask;
-  
-  while(old_value != new_value){
-    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
-    if(temp == old_value) return TRUE;
-    old_value = *p_word;
-    new_value = old_value|word_mask;
-  }
-  return FALSE;
-}
-
 static void fspace_destruct_blocks(Fspace* fspace)
 {   
   return;
 }
 
-static void fspace_init_blocks(Fspace* fspace)
-{ 
-  Block* blocks = (Block*)fspace->heap_start; 
-  Block_Header* last_block = (Block_Header*)blocks;
-  unsigned int start_idx = fspace->first_block_idx;
-  for(unsigned int i=0; i < fspace->num_managed_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES);
-    block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); 
-    block->base = block->free;
-    block->block_idx = i + start_idx;
-    block->status = BLOCK_FREE;  
-    last_block->next = block;
-    last_block = block;
-  }
-  last_block->next = NULL;
-  fspace->blocks = blocks;
-   
-  return;
-}
-
 struct GC_Gen;
 void gc_set_nos(GC_Gen* gc, Space* space);
-void fspace_initialize(GC* gc, void* start, unsigned int fspace_size) 
+
+void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size) 
 {    
   assert( (fspace_size%GC_BLOCK_SIZE_BYTES) == 0 );
   Fspace* fspace = (Fspace *)STD_MALLOC(sizeof(Fspace));
@@ -90,14 +46,20 @@
   fspace->num_total_blocks = fspace_size >> GC_BLOCK_SHIFT_COUNT;
 
   void* reserved_base = start;
-  int status = port_vmem_commit(&reserved_base, fspace_size, gc->allocated_memory); 
-  assert(status == APR_SUCCESS && reserved_base == start);
-    
-  memset(reserved_base, 0, fspace_size);
-  fspace->committed_heap_size = fspace_size;
+  /* commit fspace mem */    
+  vm_commit_mem(reserved_base, commit_size);
+  memset(reserved_base, 0, commit_size);
+  
+  fspace->committed_heap_size = commit_size;
   fspace->heap_start = reserved_base;
+
+#ifdef STATIC_NOS_MAPPING
   fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->reserved_heap_size);
-  fspace->num_managed_blocks = fspace_size >> GC_BLOCK_SHIFT_COUNT;
+#else /* for dynamic mapping, nos->heap_end is gc->heap_end */
+  fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->committed_heap_size);
+#endif
+
+  fspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT;
   
   fspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base);
   fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1;
@@ -105,47 +67,51 @@
   fspace->num_used_blocks = 0;
   fspace->free_block_idx = fspace->first_block_idx;
   
-  fspace_init_blocks(fspace);
+  space_init_blocks((Blocked_Space*)fspace);
   
-  fspace->mark_object_func = fspace_mark_object;
-
   fspace->move_object = TRUE;
   fspace->num_collections = 0;
+  fspace->time_collections = 0;
+  fspace->survive_ratio = 0.2f;
+  
   fspace->gc = gc;
   gc_set_nos((GC_Gen*)gc, (Space*)fspace);
   /* above is same as Mspace init --> */
   
-  nos_boundary = fspace->heap_start;
-
   forward_first_half = TRUE;
+  /* we always disable partial forwarding in non-gen mode. */
+  if( !gc_is_gen_mode() )
+    NOS_PARTIAL_FORWARD = FALSE;
+
   if( NOS_PARTIAL_FORWARD )
     object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1 ];
   else
     object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks];
-
+     
   return;
 }
 
 void fspace_destruct(Fspace *fspace) 
 {
   fspace_destruct_blocks(fspace);
-  port_vmem_decommit(fspace->heap_start, fspace->committed_heap_size, fspace->gc->allocated_memory);
-  STD_FREE(fspace);  
- 
+  STD_FREE(fspace);   
 }
  
-void reset_fspace_for_allocation(Fspace* fspace)
+void fspace_reset_for_allocation(Fspace* fspace)
 { 
   unsigned int first_idx = fspace->first_block_idx;
-  unsigned int marked_start_idx = 0;
+  unsigned int marked_start_idx = 0; //was for oi markbit reset, now useless
   unsigned int marked_last_idx = 0;
+  Boolean is_major_collection = (fspace->gc->collect_kind != MINOR_COLLECTION);
+  Boolean gen_mode = gc_is_gen_mode();
 
-  if( fspace->gc->collect_kind == MAJOR_COLLECTION || 
-         NOS_PARTIAL_FORWARD == FALSE || !gc_requires_barriers())            
+  if(  is_major_collection || 
+         NOS_PARTIAL_FORWARD == FALSE || !gen_mode)            
   {
     fspace->free_block_idx = first_idx;
     fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1;  
     forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/
+  
   }else{    
     if(forward_first_half){
       fspace->free_block_idx = first_idx;
@@ -160,7 +126,6 @@
     }
     forward_first_half = forward_first_half^1;
   }
-
   
   Block* blocks = fspace->blocks;
   unsigned int num_freed = 0;
@@ -168,41 +133,55 @@
   unsigned int new_last_idx = fspace->ceiling_block_idx - first_idx;
   for(unsigned int i = new_start_idx; i <= new_last_idx; i++){
     Block_Header* block = (Block_Header*)&(blocks[i]);
+    block->src = NULL;
+    block->next_src = NULL;
+    assert(!block->dest_counter);
     if(block->status == BLOCK_FREE) continue;
     block->status = BLOCK_FREE; 
-    block->free = GC_BLOCK_BODY(block);
-    if( !gc_requires_barriers() || fspace->gc->collect_kind == MAJOR_COLLECTION )
-      block_clear_mark_table(block); 
+    block->free = block->base;
 
     num_freed ++;
   }
 
-  for(unsigned int i = marked_start_idx; i <= marked_last_idx; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    if(block->status == BLOCK_FREE) continue;
-    block_clear_markbits(block);
-  }
   fspace->num_used_blocks = fspace->num_used_blocks - num_freed;
-
+  return;
 }
 
 void collector_execute_task(GC* gc, TaskType task_func, Space* space);
 
+#include "../gen/gen.h"
+unsigned int mspace_free_block_idx;
+
 /* world is stopped when starting fspace_collection */      
 void fspace_collection(Fspace *fspace)
 {
   fspace->num_collections++;  
   
   GC* gc = fspace->gc;
+  mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx;
+    
+  /* we should not destruct rootset structure in case we need fall back */
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
 
-  if(gc_requires_barriers()){ 
-    /* generational GC. Only trace nos */
-    collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace);
-  }else{
-    /* non-generational GC. Mark the whole heap (nos, mos, and los) */
-    pool_iterator_init(gc->metadata->gc_rootset_pool);
-    collector_execute_task(gc, (TaskType)mark_copy_fspace, (Space*)fspace);
+  switch(fspace->collect_algorithm){
+
+#ifdef MARK_BIT_FLIPPING
+    
+    case MINOR_NONGEN_FORWARD_POOL:
+      collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace);    
+      break;
+        
+#endif /*#ifdef MARK_BIT_FLIPPING */
+
+    case MINOR_GEN_FORWARD_POOL:
+      collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace);
+      break;
+        
+    default:
+      printf("\nSpecified minor collection algorithm doesn't exist in built module!\n");
+      exit(0);    
+      break;
   }
-  
+
   return; 
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h Thu Jan 11 05:57:16 2007
@@ -22,6 +22,7 @@
 #define _FROM_SPACE_H_
 
 #include "../thread/gc_thread.h"
+#include "../thread/collector_alloc.h"
 
 /*
  * In our Gen GC, not all live objects are copied to tspace space, the newer baby will
@@ -39,9 +40,11 @@
   unsigned int reserved_heap_size;
   unsigned int committed_heap_size;
   unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
-  Boolean (*mark_object_func)(Fspace* space, Partial_Reveal_Object* p_obj);
   /* END of Space --> */
 
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -58,26 +61,31 @@
       
 } Fspace;
 
-void fspace_initialize(GC* gc, void* start, unsigned int fspace_size);
+void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size);
 void fspace_destruct(Fspace *fspace);
 
-inline Boolean fspace_has_free_block(Fspace* fspace){ return fspace->free_block_idx <= fspace->ceiling_block_idx; }
-inline unsigned int fspace_free_memory_size(Fspace* fspace){ return GC_BLOCK_SIZE_BYTES * (fspace->ceiling_block_idx - fspace->free_block_idx + 1);  }
-inline Boolean fspace_used_memory_size(Fspace* fspace){ return GC_BLOCK_SIZE_BYTES * fspace->num_used_blocks; }
-
+inline Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  return (!obj_is_marked_or_fw_in_oi(p_obj)) ;
+}
 
 void* fspace_alloc(unsigned size, Allocator *allocator);
 
-Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj);
+void fspace_reset_for_allocation(Fspace* fspace);
+
+/* gen mode */
+void gen_forward_pool(Collector* collector); 
+void gen_forward_steal(Collector* collector);
+/* nongen mode */
+void nongen_slide_copy(Collector* collector); 
 
-void reset_fspace_for_allocation(Fspace* fspace);
+#ifdef MARK_BIT_FLIPPING
 
+void nongen_forward_steal(Collector* collector); 
+void nongen_forward_pool(Collector* collector); 
 
-Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace);
-void fspace_copy_collect(Collector* collector, Fspace* fspace); 
+#endif /* MARK_BIT_FLIPPING */
 
-void trace_forward_fspace(Collector* collector); 
-void mark_copy_fspace(Collector* collector); 
 
 void fspace_collection(Fspace* fspace);
   



Mime
View raw message