harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wjwashb...@apache.org
Subject svn commit: r500803 [3/3] - in /harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/ vm/gc_gen/src/common/ vm/gc_gen/src/finalizer_weakref/ vm/gc_gen/src/gen/ vm/gc_gen/src/jni/ vm/gc_gen/src/mark_c...
Date Sun, 28 Jan 2007 14:41:11 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Sun Jan 28 06:41:08 2007
@@ -47,24 +47,24 @@
 
 static unsigned int fspace_shrink(Fspace *fspace)
 {
-  void *committed_nos_end = (void *)((unsigned int)space_heap_start((Space *)fspace) + fspace->committed_heap_size);
+  void *committed_nos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)fspace) + fspace->committed_heap_size);
   
-  unsigned int nos_used_size = (unsigned int)nos_first_free_block - (unsigned int)fspace->heap_start;
-  unsigned int nos_free_size = (unsigned int)committed_nos_end - (unsigned int)nos_first_free_block;
-  unsigned int decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size;
+  POINTER_SIZE_INT nos_used_size = (POINTER_SIZE_INT)nos_first_free_block - (POINTER_SIZE_INT)fspace->heap_start;
+  POINTER_SIZE_INT nos_free_size = (POINTER_SIZE_INT)committed_nos_end - (POINTER_SIZE_INT)nos_first_free_block;
+  POINTER_SIZE_INT decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size;
   assert(decommit_size);
   
-  void *decommit_base = (void *)((unsigned int)committed_nos_end - decommit_size);
-  decommit_base = (void *)round_down_to_size((unsigned int)decommit_base, SPACE_ALLOC_UNIT);
+  void *decommit_base = (void *)((POINTER_SIZE_INT)committed_nos_end - decommit_size);
+  decommit_base = (void *)round_down_to_size((POINTER_SIZE_INT)decommit_base, SPACE_ALLOC_UNIT);
   if(decommit_base < (void *)nos_first_free_block)
-    decommit_base = (void *)((unsigned int)decommit_base + SPACE_ALLOC_UNIT);
-  decommit_size = (unsigned int)committed_nos_end - (unsigned int)decommit_base;
+    decommit_base = (void *)((POINTER_SIZE_INT)decommit_base + SPACE_ALLOC_UNIT);
+  decommit_size = (POINTER_SIZE_INT)committed_nos_end - (POINTER_SIZE_INT)decommit_base;
   assert(decommit_size && !(decommit_size % SPACE_ALLOC_UNIT));
   
   Boolean result = vm_decommit_mem(decommit_base, decommit_size);
   assert(result == TRUE);
   
-  fspace->committed_heap_size = (unsigned int)decommit_base - (unsigned int)fspace->heap_start;
+  fspace->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)fspace->heap_start;
   fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
   
   Block_Header *new_last_block = (Block_Header *)&fspace->blocks[fspace->num_managed_blocks - 1];
@@ -78,24 +78,24 @@
 {
   Block_Header *old_last_mos_block = (Block_Header *)(mos_first_new_block -1);
   old_last_mos_block->next = (Block_Header *)mos_first_new_block;
-  void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); 
+  void *new_committed_mos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)mspace) + mspace->committed_heap_size); 
   Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1);
   new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)fspace);
 }
 
-static Block *mspace_extend_without_link(Mspace *mspace, unsigned int commit_size)
+static Block *mspace_extend_without_link(Mspace *mspace, Fspace *fspace, unsigned int commit_size)
 {
-  assert(commit_size && !(commit_size % SPACE_ALLOC_UNIT));
+  assert(commit_size && !(commit_size % GC_BLOCK_SIZE_BYTES));
   
-  void *committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size);
+  void *committed_mos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)mspace) + mspace->committed_heap_size);
   void *commit_base = committed_mos_end;
-  assert(!((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT));
+  assert(!((POINTER_SIZE_INT)committed_mos_end % SPACE_ALLOC_UNIT));
   
   void *result = vm_commit_mem(commit_base, commit_size);
   assert(result == commit_base);
   
-  void *new_end = (void *)((unsigned int)commit_base + commit_size);
-  mspace->committed_heap_size = (unsigned int)new_end - (unsigned int)mspace->heap_start;
+  void *new_end = (void *)((POINTER_SIZE_INT)commit_base + commit_size);
+  mspace->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)mspace->heap_start;
   
   /* init the grown blocks */
   Block_Header *block = (Block_Header *)commit_base;
@@ -147,19 +147,19 @@
   if (object_is_array(p_obj)) {
     Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
     assert(!obj_is_primitive_array(p_obj));
-  
-    int32 array_length = array->array_len;
-    Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
     
+    int32 array_length = array->array_len;
+    Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+
     for (int i = 0; i < array_length; i++) {
       Partial_Reveal_Object** p_ref = p_refs + i;
       Partial_Reveal_Object*  p_element = *p_ref;
       if((p_element > start_address) && (p_element < end_address))
-        *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff);
+          *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff);
     }
     return;
   }
-  
+
   /* scan non-array object */
   int *offset_scanner = init_object_scanner(p_obj);
   while (true) {
@@ -168,7 +168,7 @@
   
     Partial_Reveal_Object*  p_element = *p_ref;
     if((p_element > start_address) && (p_element < end_address))
-      *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff);
+      *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff);
     offset_scanner = offset_next_ref(offset_scanner);
   }
 
@@ -209,14 +209,14 @@
   pool_iterator_init(pool);
 
   while(Vector_Block *root_set = pool_iterator_next(pool)){
-    unsigned int *iter = vector_block_iterator_init(root_set);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
       Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter;
       iter = vector_block_iterator_advance(root_set,iter);
 
       Partial_Reveal_Object *p_obj = *p_ref;
       if((p_obj > start_address) && (p_obj < end_address))
-        *p_ref = (Partial_Reveal_Object*)((unsigned int)p_obj - addr_diff);
+          *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj - addr_diff);
     }
   }
 }
@@ -248,14 +248,14 @@
     Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base;
     void *src_base = (void *)block->base;
     void *block_end = block->new_free;   // new_free or free depends on whether reset is done or not
-    unsigned int size = (unsigned int)block_end - (unsigned int)src_base;
-    Block_Header *dest_block = GC_BLOCK_HEADER((void *)((unsigned int)src_base - addr_diff));
+    POINTER_SIZE_INT size = (POINTER_SIZE_INT)block_end - (POINTER_SIZE_INT)src_base;
+    Block_Header *dest_block = GC_BLOCK_HEADER((void *)((POINTER_SIZE_INT)src_base - addr_diff));
     memmove(dest_block->base, src_base, size);
-    dest_block->new_free = (void *)((unsigned int)block_end - addr_diff);
+    dest_block->new_free = (void *)((POINTER_SIZE_INT)block_end - addr_diff);
     if(verify_live_heap)
       while (p_obj < block_end) {
-        event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((unsigned int)p_obj - addr_diff), collector);
-    	 p_obj = obj_end(p_obj);
+        event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_obj - addr_diff), collector);
+         p_obj = obj_end(p_obj);
       }
   }
 }
@@ -269,6 +269,9 @@
   Mspace *mspace = gc_gen->mos;
   Fspace *fspace = gc_gen->nos;
   Lspace *lspace = gc_gen->los;
+
+  /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
+  gc_gen->collect_kind = EXTEND_COLLECTION;
   
   unsigned int num_active_collectors = gc_gen->num_active_collectors;
   unsigned int old_num;
@@ -299,8 +302,10 @@
 }
 
 #else
+static volatile unsigned int num_recomputing_collectors = 0;
 static volatile unsigned int num_refixing_collectors = 0;
 static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_restoring_collectors = 0;
 
 void mspace_extend_compact(Collector *collector)
 {
@@ -308,6 +313,9 @@
   Mspace *mspace = gc_gen->mos;
   Fspace *fspace = gc_gen->nos;
   Lspace *lspace = gc_gen->los;
+
+  /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
+  gc_gen->collect_kind = EXTEND_COLLECTION;
   
   unsigned int num_active_collectors = gc_gen->num_active_collectors;
   unsigned int old_num;
@@ -322,16 +330,16 @@
     old_num = atomic_inc32(&num_space_changing_collectors);
     if( old_num == 0 ){
       unsigned int mem_changed_size = fspace_shrink(fspace);
-      mos_first_new_block = mspace_extend_without_link(mspace, mem_changed_size);
+      mos_first_new_block = mspace_extend_without_link(mspace, fspace, mem_changed_size);
       
       set_first_and_end_block_to_move(collector, mem_changed_size);
       //mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move);
       mspace_block_iter_init_for_extension(mspace, (Block_Header *)mspace->blocks);
-    
+      
       num_space_changing_collectors++;
     }
     while(num_space_changing_collectors != num_active_collectors + 1);
-    
+
     atomic_cas32( &num_refixing_collectors, 0, num_active_collectors+1);
     
     mspace_refix_repointed_refs(collector, mspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Sun Jan 28 06:41:08 2007
@@ -15,7 +15,7 @@
  */
 
 /**
- * @author Chunrong Lai, 2006/12/01
+ * @author Chunrong Lai, 2006/12/25
  */
 
 #include "mspace_collect_compact.h"
@@ -56,22 +56,22 @@
       /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
       obj_unmark_in_oi(p_obj); 
       
-      unsigned int curr_sector_size = (unsigned int)start_pos - (unsigned int)src_sector_addr;
+      POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr;
 
       /* check if dest block is not enough to hold this sector. If yes, grab next one */      
-      unsigned int block_end = (unsigned int)GC_BLOCK_END(dest_block);
-      if( ((unsigned int)dest_sector_addr + curr_sector_size) > block_end ){
+      POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
+      if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){
         dest_block->new_free = dest_sector_addr; 
         dest_block = mspace_get_next_target_block(collector, mspace);
         if(dest_block == NULL){ 
           collector->result = FALSE; 
           return; 
         }
-        block_end = (unsigned int)GC_BLOCK_END(dest_block);
+        block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
         dest_sector_addr = dest_block->base;
       }
         
-      assert(((unsigned int)dest_sector_addr + curr_sector_size) <= block_end );
+      assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );
 
       /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */      
       p_obj =  block_get_next_marked_object(curr_block, &start_pos);
@@ -79,26 +79,26 @@
         continue;
 
       /* current sector is done, let's move it. */
-      unsigned int sector_distance = (unsigned int)src_sector_addr - (unsigned int)dest_sector_addr;
+      POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
       curr_block->table[curr_sector] = sector_distance;
 
       if (verify_live_heap) {
-      	   Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr;
-      	   void *rescan_pos = (Partial_Reveal_Object *)((unsigned int)rescan_obj + vm_object_size(rescan_obj));
-      	   while ((unsigned int)rescan_obj < (unsigned int)src_sector_addr + curr_sector_size) {
-    	      Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((unsigned int)rescan_obj- sector_distance);
+           Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr;
+           void *rescan_pos = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj + vm_object_size(rescan_obj));
+           while ((POINTER_SIZE_INT)rescan_obj < (POINTER_SIZE_INT)src_sector_addr + curr_sector_size) {
+            Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj- sector_distance);
              if(is_fallback)
                event_collector_doublemove_obj(rescan_obj, targ_obj, collector);
              else
                event_collector_move_obj(rescan_obj, targ_obj, collector);
-      	      rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos);	
-      	      if(rescan_obj == NULL) break;
-      	   }
+              rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos);  
+              if(rescan_obj == NULL) break;
+           }
       }
          
       memmove(dest_sector_addr, src_sector_addr, curr_sector_size);
 
-      dest_sector_addr = (void*)((unsigned int) dest_sector_addr + curr_sector_size);
+      dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size);
       src_sector_addr = p_obj;
       curr_sector  = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
     }
@@ -147,7 +147,7 @@
   if(gc->collect_kind != FALLBACK_COLLECTION)    
        mark_scan_heap(collector);  
   else
-       fallback_mark_scan_heap(collector);  	
+       fallback_mark_scan_heap(collector);    
 
   old_num = atomic_inc32(&num_marking_collectors);
   if( ++old_num == num_active_collectors ){
@@ -160,10 +160,9 @@
 #ifndef BUILD_IN_REFERENT
     else {
       gc_set_weakref_sets(gc);
-      update_ref_ignore_finref(collector);
+      gc_update_weakref_ignore_finref(gc);
     }
 #endif
-
     
     /* let other collectors go */
     num_marking_collectors++; 
@@ -208,7 +207,7 @@
   }
   while(num_fixing_collectors != num_active_collectors + 1);
 
-   /* Dealing with out of space in mspace */  
+   /* Dealing with out of memory in mspace */  
   if(mspace->free_block_idx > fspace->first_block_idx){    
      atomic_cas32( &num_extending_collectors, 0, num_active_collectors);        
      mspace_extend_compact(collector);        
@@ -219,10 +218,6 @@
   /* Leftover: **************************************************
    */
   if( collector->thread_handle != 0 ) return;
-
-
-  if(!IGNORE_FINREF )
-    gc_put_finref_to_vm(gc);
 
   mspace_reset_after_compaction(mspace);
   fspace_reset_for_allocation(fspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Sun Jan 28 06:41:08 2007
@@ -23,23 +23,12 @@
 #include "../mark_sweep/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
-//#define VERIFY_SLIDING_COMPACT
 
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_mos(GC_Gen* gc);
 Space* gc_get_los(GC_Gen* gc);
 
-#ifdef VERIFY_SLIDING_COMPACT
-typedef struct {
-  unsigned int addr;
-  unsigned int dest_counter;
-  unsigned int collector;
-  Block_Header *src_list[1021];
-} Block_Verify_Info;
-static Block_Verify_Info block_info[32*1024][2];
-#endif
-
 static volatile Block_Header *last_block_for_dest;
 
 static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
@@ -49,10 +38,6 @@
   void *dest_addr = dest_block->base;
   Block_Header *last_src;
   
-#ifdef VERIFY_SLIDING_COMPACT
-  block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1;
-#endif
-  
   assert(!collector->rem_set);
   collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
   
@@ -72,9 +57,9 @@
     while( p_obj ){
       assert( obj_is_marked_in_vt(p_obj));
 
-      unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj;
+      unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);
       
-      if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
+      if( ((POINTER_SIZE_INT)dest_addr + obj_size) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)){
         dest_block->new_free = dest_addr;
         dest_block = mspace_get_next_target_block(collector, mspace);
         if(dest_block == NULL){ 
@@ -86,12 +71,8 @@
         last_src = curr_block;
         if(p_obj != first_obj)
           ++curr_block->dest_counter;
-
-#ifdef VERIFY_SLIDING_COMPACT
-        block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1;
-#endif
       }
-      assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
+      assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block));
       
       Obj_Info_Type obj_info = get_obj_info(p_obj);
 
@@ -103,7 +84,7 @@
       obj_set_fw_in_oi(p_obj, dest_addr);
       
       /* FIXME: should use alloc to handle alignment requirement */
-      dest_addr = (void *)((unsigned int) dest_addr + obj_size);
+      dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size);
       p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
     }
     
@@ -177,37 +158,28 @@
   } else {
     cur_dest_block = set_next_block_for_dest(mspace);
   }
-
-//  printf("Getting next dest block:\n");
-//  printf("next_block_for_dest: %d\n\n", next_block_for_dest ? next_block_for_dest->block_idx : 0);
   
   unsigned int total_dest_counter = 0;
   Block_Header *last_dest_block = (Block_Header *)last_block_for_dest;
   for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){
     if(cur_dest_block->status == BLOCK_DEST){
-//      printf("idx: %d  DEST  ", cur_dest_block->block_idx);
       continue;
     }
     if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){
-//      printf("idx: %d  DEST  FOUND!\n\n", cur_dest_block->block_idx);
       cur_dest_block->status = BLOCK_DEST;
       return cur_dest_block;
     } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){
-//      printf("idx: %d  NON_DEST  FOUND!\n\n", cur_dest_block->block_idx);
       return cur_dest_block;
     } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){
-//      printf("idx: %d  NO_SRC  ", cur_dest_block->block_idx);
       cur_dest_block->status = BLOCK_DEST;
     } else {
-//      printf("OTHER  ");
       total_dest_counter += cur_dest_block->dest_counter;
     }
   }
   
-  if(total_dest_counter){
-//    printf("\nNeed refind!\n\n");
+  if(total_dest_counter)
     return DEST_NOT_EMPTY;
-  }
+  
   return NULL;
 }
 
@@ -316,7 +288,7 @@
       assert(obj_is_marked_in_vt(p_obj));
       obj_unmark_in_vt(p_obj);
       
-      unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj;
+      unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);
       if(p_obj != p_target_obj){
         memmove(p_target_obj, p_obj, obj_size);
 
@@ -336,101 +308,12 @@
       p_target_obj = obj_get_fw_in_oi(p_obj);
     
     } while(GC_BLOCK_HEADER(p_target_obj) == dest_block);
-
-#ifdef VERIFY_SLIDING_COMPACT
-    printf("dest_block: %x   src_block: %x   collector: %x\n", (unsigned int)dest_block, (unsigned int)src_block, (unsigned int)collector->thread_handle);
-#endif
-
+    
     atomic_dec32(&src_block->dest_counter);
   }
 
-#ifdef VERIFY_SLIDING_COMPACT
-  static unsigned int fax = 0;
-  fax++;
-  printf("\n\n\nCollector %d   Sliding compact ends!   %d  \n\n\n", (unsigned int)collector->thread_handle, fax);
-#endif
-
 }
 
-#ifdef VERIFY_SLIDING_COMPACT
-
-static void verify_sliding_compact(Mspace *mspace, Boolean before)
-{
-  unsigned int i, j, k;
-  Block_Header *header;
-  
-  if(before)
-    j = 0;
-  else
-    j = 1;
-  
-  for(i = 0, header = (Block_Header *)mspace->blocks;
-      header;
-      header=header->next, ++i)
-  {
-    block_info[i][j].addr = (unsigned int)header;
-    block_info[i][j].dest_counter = header->dest_counter;
-    if(header->src){
-      Partial_Reveal_Object *src_obj = header->src;
-      k = 0;
-      printf("\nHeader: %x %x Collector: %x  ", (unsigned int)header, block_info[i][j].dest_counter, block_info[i][j].collector);
-      Block_Header *dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj));
-      while(dest_header == header){
-        block_info[i][j].src_list[k] = dest_header;
-        Block_Header *src_header = GC_BLOCK_HEADER(src_obj);
-        printf("%x %x ", (unsigned int)src_header, src_header->dest_counter);
-        src_obj = src_header->next_src;
-        if(!src_obj)
-          break;
-        dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj));
-        if(++k >= 1021)
-          assert(0);
-      }
-    }
-  }
-  
-  if(!before){
-    for(i = 0, header = (Block_Header *)mspace->blocks;
-        header;
-        header=header->next, ++i)
-    {
-      Boolean correct = TRUE;
-      if(block_info[i][0].addr != block_info[i][1].addr)
-        correct = FALSE;
-      if(block_info[i][0].dest_counter != block_info[i][1].dest_counter)
-        correct = FALSE;
-      for(k = 0; k < 1021; k++){
-        if(block_info[i][0].src_list[k] != block_info[i][1].src_list[k]){
-          correct = FALSE;
-          break;
-        }
-      }
-      if(!correct)
-        printf("header: %x %x   dest_counter: %x %x   src: %x %x",
-                block_info[i][0].addr, block_info[i][1].addr,
-                block_info[i][0].dest_counter, block_info[i][1].dest_counter,
-                block_info[i][0].src_list[k], block_info[i][1].src_list[k]);
-    }
-    
-    unsigned int *array = (unsigned int *)block_info;
-    memset(array, 0, 1024*32*1024*2);
-  }
-}
-#endif
-
-/*
-#define OI_RESTORING_THRESHOLD 8
-static volatile Boolean parallel_oi_restoring;
-unsigned int mspace_saved_obj_info_size(GC*gc){ return pool_size(gc->metadata->collector_remset_pool);} 
-*/
-
-static volatile unsigned int num_marking_collectors = 0;
-static volatile unsigned int num_repointing_collectors = 0;
-static volatile unsigned int num_fixing_collectors = 0;
-static volatile unsigned int num_moving_collectors = 0;
-static volatile unsigned int num_restoring_collectors = 0;
-static volatile unsigned int num_extending_collectors = 0;
-
 //For_LOS_extend
 void mspace_restore_block_chain(Mspace* mspace)
 {
@@ -442,6 +325,13 @@
   }
 }
 
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_repointing_collectors = 0;
+static volatile unsigned int num_fixing_collectors = 0;
+static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_restoring_collectors = 0;
+static volatile unsigned int num_extending_collectors = 0;
+
 void slide_compact_mspace(Collector* collector) 
 {
   GC* gc = collector->gc;
@@ -472,7 +362,7 @@
 #ifndef BUILD_IN_REFERENT
     else {
       gc_set_weakref_sets(gc);
-      update_ref_ignore_finref(collector);
+      gc_update_weakref_ignore_finref(gc);
     }
 #endif
     
@@ -494,8 +384,7 @@
     /* single thread world */
     gc->collect_result = gc_collection_result(gc);
     if(!gc->collect_result){
-      num_repointing_collectors++; 
-      assert(0);    // Now we should not be out of mem here. mspace_extend_compact() is backing up for this case.
+      num_repointing_collectors++;
       return;
     }
     
@@ -517,15 +406,9 @@
     /* last collector's world here */
     lspace_fix_repointed_refs(collector, lspace);
     gc_fix_rootset(collector);
-
-    if(!IGNORE_FINREF )
-      gc_put_finref_to_vm(gc);
-      
-#ifdef VERIFY_SLIDING_COMPACT
-    verify_sliding_compact(mspace, TRUE);
-#endif
     
     gc_init_block_for_sliding_compact(gc, mspace);
+
     num_fixing_collectors++; 
   }
   while(num_fixing_collectors != num_active_collectors + 1);
@@ -547,6 +430,7 @@
   
   old_num = atomic_inc32(&num_restoring_collectors);
   if( ++old_num == num_active_collectors ){
+    
     update_mspace_info_for_los_extension(mspace);
     
     num_restoring_collectors++;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h Sun Jan 28 06:41:08 2007
@@ -25,9 +25,9 @@
 #include "../utils/bit_ops.h"
 #include "../utils/bidir_list.h"
 
-#define ADDRESS_IS_KB_ALIGNED(addr) (!(((unsigned int)addr) & ((1 << BIT_SHIFT_TO_KILO)-1)))
-#define ALIGN_UP_TO_KILO(addr) (((unsigned int)(addr) + (KB - 1)) & (~(KB- 1)))
-#define ALIGN_DOWN_TO_KILO(addr) ((unsigned int)(addr) & (~(KB- 1)))
+#define ADDRESS_IS_KB_ALIGNED(addr) (!(((POINTER_SIZE_INT)addr) & ((1 << BIT_SHIFT_TO_KILO)-1)))
+#define ALIGN_UP_TO_KILO(addr) (((POINTER_SIZE_INT)(addr) + (KB - 1)) & (~(KB- 1)))
+#define ALIGN_DOWN_TO_KILO(addr) ((POINTER_SIZE_INT)(addr) & (~(KB- 1)))
 
 #define NUM_FREE_LIST 128
 
@@ -37,7 +37,7 @@
   Bidir_List* next;
   Bidir_List* prev;
   /* END of Bidir_List --> */
-  unsigned int lock;	
+  SpinLock lock;  
 }Lockable_Bidir_List;
 
 typedef struct Free_Area{
@@ -120,7 +120,7 @@
   /* set bit flag of the list */
   Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]);
   if(list->next == list){
-  	pool_list_clear_flag(pool, index);		
+  	pool_list_clear_flag(pool, index);    
   }
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp Sun Jan 28 06:41:08 2007
@@ -33,13 +33,14 @@
   /* commit mspace mem */    
   void* reserved_base = start;
   unsigned int committed_size = lspace_size;
-  vm_commit_mem(reserved_base, lspace_size);
+  if(!large_page_hint)
+    vm_commit_mem(reserved_base, lspace_size);
   memset(reserved_base, 0, lspace_size);
 
   lspace->committed_heap_size = committed_size;
   lspace->reserved_heap_size = committed_size;
   lspace->heap_start = reserved_base;
-  lspace->heap_end = (void *)((unsigned int)reserved_base + committed_size);
+  lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size);
 
   lspace->move_object = FALSE;
   lspace->gc = gc;
@@ -101,4 +102,9 @@
   lspace_reset_after_collection(lspace);  
   lspace_sweep(lspace);
   return;
+}
+
+unsigned int lspace_get_failure_size(Lspace* lspace)
+{
+  return lspace->failure_size;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h Sun Jan 28 06:41:08 2007
@@ -25,9 +25,6 @@
 #include "../thread/gc_thread.h"
 #include "free_area_pool.h"
 
-#define GC_MIN_LOS_SIZE ( 4 * 1024 * 1024)
-
-
 typedef struct Lspace{
   /* <-- first couple of fields are overloadded as Space */
   void* heap_start;
@@ -37,13 +34,18 @@
   unsigned int num_collections;
   int64 time_collections;
   float survive_ratio;
+  unsigned int collect_algorithm;  
   GC* gc;
   Boolean move_object;
+  /*For_statistic: size allocated science last time collect los, ie. last major*/
+  unsigned int alloced_size;
+  /*For_statistic: size survived after lspace_sweep*/  
+  unsigned int surviving_size;
   /* END of Space --> */
 
-//  void* alloc_free;
   Free_Area_Pool* free_pool;
-  
+  /*Size of allocation which caused lspace alloc failure.*/
+  unsigned int failure_size;
 }Lspace;
 
 void lspace_initialize(GC* gc, void* reserved_base, unsigned int lspace_size);
@@ -58,7 +60,7 @@
 
 inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index)
 {
-    unsigned int next_area_start = (unsigned int)lspace->heap_start + (*iterate_index) * KB;
+    POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB;
     BOOLEAN reach_heap_end = 0;
 
     while(!reach_heap_end){
@@ -66,11 +68,11 @@
         while(!*((unsigned int *)next_area_start)){
                 next_area_start += ((Free_Area*)next_area_start)->size;
         }
-        if(next_area_start < (unsigned int)lspace->heap_end){
+        if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
             //If there is a living object at this addr, return it, and update iterate_index
             if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){
                 unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start));
-                *iterate_index = (next_area_start + obj_size - (unsigned int)lspace->heap_start) >> BIT_SHIFT_TO_KILO;
+                *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO);
                 return (Partial_Reveal_Object*)next_area_start;
             //If this is a dead object, go on to find  a living one.
             }else{
@@ -93,5 +95,7 @@
 void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace);
 
 void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace);
+
+unsigned int lspace_get_failure_size(Lspace* lspace);
 
 #endif /*_LSPACE_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp Sun Jan 28 06:41:08 2007
@@ -22,30 +22,28 @@
 #include "../gen/gen.h"
 #include "../common/space_tuner.h"
 
-inline void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index){
+static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
+{
     Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
-    while (apr_atomic_casptr( 
-                (volatile void **) &(list_head->lock),
-                (void *) 1, (void *) 0) 
-            != (void *) 0) {
-        while (list_head->lock ==  1) {
-            ;   
-        }
-    }
-
+    lock(list_head->lock);
 }
-inline void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index){
-    ((Lockable_Bidir_List*)(&pool->sized_area_list[list_index]))->lock = 0;
+
+static void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
+{
+    Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
+    unlock(list_head->lock);
 }
-inline unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index){
+
+static unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index)
+{
     Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]);
     return (head->next == head);
 }
-inline void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size)
+static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size)
 {
     Free_Area* free_area;
     void* p_result;
-    unsigned int remain_size;
+    int remain_size;
     unsigned int alloc_size = ALIGN_UP_TO_KILO(size);
     unsigned int new_list_nr = 0;
     Lockable_Bidir_List* head = &pool->sized_area_list[list_hint];
@@ -60,10 +58,12 @@
     }
 
     free_area = (Free_Area*)(head->next);
+    /*if the list head is not NULL, it definitely satisfies the request. */   
     remain_size = free_area->size - alloc_size;
+    assert(remain_size >= 0);
     if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
         new_list_nr = pool_list_index_with_size(remain_size);
-        p_result = (void*)((unsigned int)free_area + remain_size);
+        p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
         if(new_list_nr == list_hint){
             free_area->size = remain_size;
             free_pool_unlock_nr_list(pool, list_hint);
@@ -78,23 +78,22 @@
             return p_result;            
         }
     }
-    else if(remain_size >= 0)
+    else
     {
         free_pool_remove_area(pool, free_area);
         free_pool_unlock_nr_list(pool, list_hint);
-        p_result = (void*)((unsigned int)free_area + remain_size);
+        p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
         if(remain_size > 0){
             assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
             free_area->size = remain_size;
         }
         return p_result;
     }
-    /*We never get here, because if the list head is not NULL, it definitely satisfy the request. */
     assert(0);
     return NULL;
 }
 
-inline void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size)
+static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size)
 {
     void* p_result;
     int remain_size = 0;
@@ -116,7 +115,7 @@
         remain_size = free_area->size - alloc_size;
         if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
             new_list_nr = pool_list_index_with_size(remain_size);
-            p_result = (void*)((unsigned int)free_area + remain_size);
+            p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
             if(new_list_nr == MAX_LIST_INDEX){
                 free_area->size = remain_size;
                 free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
@@ -135,7 +134,7 @@
         {
             free_pool_remove_area(pool, free_area);
             free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
-            p_result = (void*)((unsigned int)free_area + remain_size);
+            p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
             if(remain_size > 0){
                 assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
                 free_area->size = remain_size;
@@ -167,6 +166,12 @@
                 p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size);
                 if(p_result){
                     memset(p_result, 0, size);
+                    unsigned int vold = lspace->alloced_size;
+                    unsigned int vnew = vold + alloc_size;
+                    while( vold != atomic_cas32(&lspace->alloced_size, vnew, vold) ){
+                        vold = lspace->alloced_size;
+                        vnew = vold + alloc_size;
+                    }
                     return p_result;
                 }else{
                     list_hint ++;
@@ -180,6 +185,12 @@
                 p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size);
                 if(p_result){
                     memset(p_result, 0, size);
+                    unsigned int vold = lspace->alloced_size;
+                    unsigned int vnew = vold + alloc_size;
+                    while( vold != atomic_cas32(&lspace->alloced_size, vnew, vold) ){
+                        vold = lspace->alloced_size;
+                        vnew = vold + alloc_size;
+                    }
                     return p_result;
                 }
                 else break;
@@ -188,6 +199,7 @@
         /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/   
         if(try_count == 0){
             vm_gc_lock_enum();
+            lspace->failure_size = alloc_size;
             gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);
             vm_gc_unlock_enum();
             try_count ++;
@@ -214,19 +226,25 @@
         lspace->committed_heap_size += trans_size;
         lspace->reserved_heap_size += trans_size;
     }
+    /*For_statistic los information.*/
+    lspace->alloced_size = 0;    
+
+    lspace->failure_size = 0;
+
     los_boundary = lspace->heap_end;
 }
 
 void lspace_sweep(Lspace* lspace)
 {
+
+  lspace->surviving_size = 0;
+  
   /* reset the pool first because its info is useless now. */
   free_area_pool_reset(lspace->free_pool);
 
   unsigned int mark_bit_idx = 0, cur_size = 0;
   void *cur_area_start, *cur_area_end;
 
-
-
   Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
   Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
   if(p_next_obj){
@@ -235,6 +253,8 @@
        in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode,
        the last time marked object is thought to be already marked and not scanned for this cycle. */
     obj_clear_dual_bits_in_oi(p_next_obj);
+    /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
+    lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj));    
   }
 
   cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
@@ -242,7 +262,7 @@
 
 
   while(cur_area_end){
-    cur_size = (unsigned int)cur_area_end - (unsigned int)cur_area_start;
+    cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
       
     Free_Area* cur_area = free_area_new(cur_area_start, cur_size);
     /* successfully create an area */
@@ -254,16 +274,18 @@
     if(p_next_obj){
       obj_unmark_in_vt(p_next_obj);
       obj_clear_dual_bits_in_oi(p_next_obj);
+      /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
+      lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj));
     }
 
-    cur_area_start = (void*)ALIGN_UP_TO_KILO((unsigned int)p_prev_obj + vm_object_size(p_prev_obj));
+    cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj));
     cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
     
   }
 
    /* cur_area_end == NULL */
   cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end);
-  cur_size = (unsigned int)cur_area_end - (unsigned int)cur_area_start;
+  cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
   Free_Area* cur_area = free_area_new(cur_area_start, cur_size);
   /* successfully create an area */
   if( cur_area )
@@ -271,6 +293,11 @@
 
    mark_bit_idx = 0;
    assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
-   return;
+
+  /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/
+  /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/
+  lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size;
+
+  return;
 
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Sun Jan 28 06:41:08 2007
@@ -34,7 +34,7 @@
   assert(!collector->rem_set);
   
   while(Vector_Block *oi_block = pool_get_entry(remset_pool)){
-    unsigned int *iter = vector_block_iterator_init(oi_block);
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(oi_block);
     while(!vector_block_iterator_end(oi_block, iter)){
       Partial_Reveal_Object *p_target_obj = (Partial_Reveal_Object *)*iter;
       iter = vector_block_iterator_advance(oi_block, iter);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Sun Jan 28 06:41:08 2007
@@ -26,12 +26,12 @@
 void* mos_alloc(unsigned size, Allocator *allocator);
 
 /* NOS forward obj to MOS in MINOR_COLLECTION */
-inline Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
+FORCE_INLINE Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
 {
   Obj_Info_Type oi = get_obj_info_raw(p_obj);
 
   /* forwarded by somebody else */
-  if ((unsigned int)oi & FORWARD_BIT){
+  if ((POINTER_SIZE_INT)oi & FORWARD_BIT){
      return NULL;
   }
   
@@ -50,7 +50,7 @@
     
   /* else, take the obj by setting the forwarding flag atomically 
      we don't put a simple bit in vt because we need compute obj size later. */
-  if ((unsigned int)oi != atomic_cas32((unsigned int*)get_obj_info_addr(p_obj), ((unsigned int)p_targ_obj|FORWARD_BIT), (unsigned int)oi)) {
+  if ((void*)oi != atomic_casptr((volatile void**)get_obj_info_addr(p_obj), (void*)((POINTER_SIZE_INT)p_targ_obj|FORWARD_BIT), (void*)oi)) {
     /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched
        block. The remaining part of the switched block cannot be revivied for next allocation of 
        object that has smaller size than this one. */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h Sun Jan 28 06:41:08 2007
@@ -32,13 +32,13 @@
 inline void* gc_get_tls()
 { 
   void* tls_base = vm_thread_local();
-  return (void*)*(unsigned int*)((char*)tls_base + tls_gc_offset);
+  return (void*)*(POINTER_SIZE_INT*)((char*)tls_base + tls_gc_offset);
 }
 
 inline void gc_set_tls(void* gc_tls_info)
 { 
   void* tls_base = vm_thread_local();
-  *(unsigned int*)((char*)tls_base + tls_gc_offset) = (unsigned int)gc_tls_info;
+  *(POINTER_SIZE_INT*)((char*)tls_base + tls_gc_offset) = (POINTER_SIZE_INT)gc_tls_info;
 }
 
 /* NOTE:: don't change the position of free/ceiling, because the offsets are constants for inlining */
@@ -55,7 +55,7 @@
 inline void thread_local_unalloc(unsigned int size, Allocator* allocator)
 {
   void* free = allocator->free;    
-  allocator->free = (void*)((unsigned int)free - size);
+  allocator->free = (void*)((POINTER_SIZE_INT)free - size);
   return;
 }
 
@@ -63,16 +63,16 @@
 
 inline Partial_Reveal_Object* thread_local_alloc_zeroing(unsigned int size, Allocator* allocator)
 {
-  unsigned int  free = (unsigned int)allocator->free;
-  unsigned int ceiling = (unsigned int)allocator->ceiling;
+  POINTER_SIZE_INT free = (POINTER_SIZE_INT)allocator->free;
+  POINTER_SIZE_INT ceiling = (POINTER_SIZE_INT)allocator->ceiling;
   
-  unsigned int new_free = free + size;
+  POINTER_SIZE_INT new_free = free + size;
   
-  unsigned int block_ceiling = (unsigned int)allocator->end;
+  POINTER_SIZE_INT block_ceiling = (POINTER_SIZE_INT)allocator->end;
   if( new_free > block_ceiling) 
     return NULL;
 
-  unsigned int new_ceiling;
+  POINTER_SIZE_INT new_ceiling;
   new_ceiling =  new_free + ZEROING_SIZE;
   if( new_ceiling > block_ceiling )
     new_ceiling = block_ceiling;
@@ -88,10 +88,10 @@
 
 inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator)
 {
-  unsigned int  free = (unsigned int)allocator->free;
-  unsigned int ceiling = (unsigned int)allocator->ceiling;
+  POINTER_SIZE_INT free = (POINTER_SIZE_INT)allocator->free;
+  POINTER_SIZE_INT ceiling = (POINTER_SIZE_INT)allocator->ceiling;
   
-  unsigned int new_free = free + size;
+  POINTER_SIZE_INT new_free = free + size;
     
   if (new_free <= ceiling){
   	allocator->free= (void*)new_free;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Sun Jan 28 06:41:08 2007
@@ -38,7 +38,7 @@
   }
   
   if(!IGNORE_FINREF )
-    mutator->obj_with_fin = finref_get_free_block();
+    mutator->obj_with_fin = finref_get_free_block(gc);
   else
     mutator->obj_with_fin = NULL;
        
@@ -110,8 +110,6 @@
   Mutator *mutator = gc->mutator_list;
   while (mutator) {
     mutator->rem_set = free_set_pool_get_entry(gc->metadata);
-    if(!IGNORE_FINREF )
-      mutator_reset_obj_with_fin(mutator);
     mutator = mutator->next;
   }  
   return;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp Sun Jan 28 06:41:08 2007
@@ -24,6 +24,33 @@
 
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+//#define GC_OBJ_SIZE_STATISTIC
+
+#ifdef GC_OBJ_SIZE_STATISTIC
+#define GC_OBJ_SIZE_STA_MAX 256*KB
+unsigned int obj_size_distribution_map[GC_OBJ_SIZE_STA_MAX>>10];
+void gc_alloc_statistic_obj_distrubution(unsigned int size)
+{
+    unsigned int sta_precision = 16*KB;
+    unsigned int max_sta_size = 128*KB;
+    unsigned int sta_current = 0;    
+
+    assert(!(GC_OBJ_SIZE_STA_MAX % sta_precision));
+    assert(!(max_sta_size % sta_precision));    
+    while( sta_current < max_sta_size ){
+        if(size < sta_current){
+            unsigned int index = sta_current >> 10;
+            obj_size_distribution_map[index] ++;
+            return;
+        }
+        sta_current += sta_precision;
+    }
+    unsigned int index = sta_current >> 10;
+    obj_size_distribution_map[index]++;
+    return;
+}
+#endif
+
 Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls) 
 {
   Managed_Object_Handle p_obj = NULL;
@@ -33,7 +60,11 @@
   assert(ah);
 
   Allocator* allocator = (Allocator*)gc_get_tls();
-   
+
+#ifdef GC_OBJ_SIZE_STATISTIC
+  gc_alloc_statistic_obj_distrubution(size);
+#endif
+
   if ( size > GC_OBJ_SIZE_THRESHOLD )
     p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
   else{
@@ -59,6 +90,10 @@
   
   if(type_has_finalizer((Partial_Reveal_VTable *)ah))
     return NULL;
+
+#ifdef GC_OBJ_SIZE_STATISTIC
+  gc_alloc_statistic_obj_distrubution(size);
+#endif
   
   /* object should be handled specially */
   if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Sun Jan 28 06:41:08 2007
@@ -47,16 +47,17 @@
 
   void* reserved_base = start;
   /* commit fspace mem */    
-  vm_commit_mem(reserved_base, commit_size);
+  if(!large_page_hint)    
+    vm_commit_mem(reserved_base, commit_size);
   memset(reserved_base, 0, commit_size);
   
   fspace->committed_heap_size = commit_size;
   fspace->heap_start = reserved_base;
 
 #ifdef STATIC_NOS_MAPPING
-  fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->reserved_heap_size);
+  fspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + fspace->reserved_heap_size);
 #else /* for dynamic mapping, nos->heap_end is gc->heap_end */
-  fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->committed_heap_size);
+  fspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + fspace->committed_heap_size);
 #endif
 
   fspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT;
@@ -111,6 +112,7 @@
     fspace->free_block_idx = first_idx;
     fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1;  
     forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/
+	fspace->num_used_blocks = 0;
   
   }else{    
     if(forward_first_half){
@@ -124,6 +126,7 @@
       marked_start_idx = 0;
       marked_last_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1 - first_idx;
     }
+    fspace->num_used_blocks = marked_last_idx - marked_start_idx + 1;
     forward_first_half = forward_first_half^1;
   }
   
@@ -140,10 +143,8 @@
     block->status = BLOCK_FREE; 
     block->free = block->base;
 
-    num_freed ++;
   }
 
-  fspace->num_used_blocks = fspace->num_used_blocks - num_freed;
   return;
 }
 
@@ -159,7 +160,13 @@
   
   GC* gc = fspace->gc;
   mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx;
-    
+
+  if(gc_is_gen_mode()){
+    fspace->collect_algorithm = MINOR_GEN_FORWARD_POOL;
+  }else{
+    fspace->collect_algorithm = MINOR_NONGEN_FORWARD_POOL;
+  }
+  
   /* we should not destruct rootset structure in case we need fall back */
   pool_iterator_init(gc->metadata->gc_rootset_pool);
 
@@ -178,7 +185,7 @@
       break;
         
     default:
-      printf("\nSpecified minor collection algorithm doesn't exist in built module!\n");
+      printf("\nSpecified minor collection algorithm doesn't exist!\n");
       exit(0);    
       break;
   }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h Sun Jan 28 06:41:08 2007
@@ -45,6 +45,10 @@
   unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
+  /*Size allocted after last collection. Not available in fspace now.*/
+  unsigned int alloced_size;
+  /*For_statistic: not available now for fspace*/  
+  unsigned int surviving_size;
   /* END of Space --> */
 
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -64,11 +68,6 @@
 void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size);
 void fspace_destruct(Fspace *fspace);
 
-inline Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj)
-{
-  return (!obj_is_marked_or_fw_in_oi(p_obj)) ;
-}
-
 void* fspace_alloc(unsigned size, Allocator *allocator);
 
 void fspace_reset_for_allocation(Fspace* fspace);
@@ -88,5 +87,5 @@
 
 
 void fspace_collection(Fspace* fspace);
-  
+
 #endif // _FROM_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp Sun Jan 28 06:41:08 2007
@@ -51,7 +51,7 @@
 #else
     /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */
     unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES;
-    allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size);
+    allocator->ceiling = (void*)((POINTER_SIZE_INT)new_free + zeroing_size);
     memset(new_free, 0, zeroing_size);
 
 #endif /* #ifndef ALLOC_ZEROING */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp Sun Jan 28 06:41:08 2007
@@ -24,13 +24,13 @@
 #include "../common/gc_metadata.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
-static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
+static FORCE_INLINE Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
 {
   assert(obj_belongs_to_nos(p_obj));  
   return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
 }
 
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
+static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
 {
   Partial_Reveal_Object *p_obj = *p_ref;
   if (p_obj == NULL) return;  
@@ -43,7 +43,7 @@
   return;
 }
 
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
 {
   if (!object_has_ref_field(p_obj)) return;
     
@@ -90,7 +90,7 @@
 
 #include "../verify/verify_live_heap.h"
 
-static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
+static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
 {
   Space* space = collector->collect_space; 
   GC* gc = collector->gc;
@@ -184,7 +184,7 @@
 
   /* first step: copy all root objects to trace tasks. */ 
   while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
       iter = vector_block_iterator_advance(root_set,iter);
@@ -205,7 +205,7 @@
   Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
 
   while(trace_task){    
-    unsigned int* iter = vector_block_iterator_init(trace_task);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
     while(!vector_block_iterator_end(trace_task,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
       iter = vector_block_iterator_advance(trace_task,iter);
@@ -266,48 +266,19 @@
 #ifndef BUILD_IN_REFERENT
   else {
       gc_set_weakref_sets(gc);
-      update_ref_ignore_finref(collector);
+      gc_update_weakref_ignore_finref(gc);
     }
 #endif
   
   gc_fix_rootset(collector);
   
-  if(!IGNORE_FINREF )
-    gc_put_finref_to_vm(gc);
-  
   fspace_reset_for_allocation(space);  
 
   return;
   
 }
 
-void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref)
+void trace_obj_in_gen_fw(Collector *collector, void *p_ref)
 {
-  GC *gc = collector->gc;
-  GC_Metadata* metadata = gc->metadata;
-  
-  collector->trace_stack = free_task_pool_get_entry(metadata);
-  collector_tracestack_push(collector, p_ref);
-  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
-  
-//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */
-  collector->trace_stack = free_task_pool_get_entry(metadata);
-  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
-  while(trace_task){    
-    unsigned int* iter = vector_block_iterator_init(trace_task);
-    while(!vector_block_iterator_end(trace_task,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(trace_task,iter);
-      assert(*p_ref);
-      trace_object(collector, p_ref);
-    }
-    vector_stack_clear(trace_task);
-    pool_put_entry(metadata->free_task_pool, trace_task);
-    trace_task = pool_get_entry(metadata->mark_task_pool);
-  }
-  
-  trace_task = (Vector_Block*)collector->trace_stack;
-  vector_stack_clear(trace_task);
-  pool_put_entry(metadata->free_task_pool, trace_task);   
-  collector->trace_stack = NULL;
+  trace_object(collector, (Partial_Reveal_Object **)p_ref);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp Sun Jan 28 06:41:08 2007
@@ -27,7 +27,7 @@
 
 #ifdef MARK_BIT_FLIPPING
 
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
+static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) 
 {
   Partial_Reveal_Object *p_obj = *p_ref;
   if(p_obj == NULL) return;  
@@ -36,7 +36,7 @@
   return;
 }
 
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
 {
   if (!object_has_ref_field_before_scan(p_obj)) return;
     
@@ -46,7 +46,7 @@
   
     Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
     unsigned int array_length = array->array_len; 
-    p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
+    p_ref = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
 
     for (unsigned int i = 0; i < array_length; i++) {
       scan_slot(collector, p_ref+i);
@@ -81,7 +81,7 @@
 */
 
 #include "../verify/verify_live_heap.h"
-static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
+static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) 
 {
   GC* gc = collector->gc;
   Partial_Reveal_Object *p_obj = *p_ref;
@@ -157,7 +157,7 @@
 
   /* first step: copy all root objects to trace tasks. */ 
   while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
       iter = vector_block_iterator_advance(root_set,iter);
@@ -179,7 +179,7 @@
   Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
 
   while(trace_task){    
-    unsigned int* iter = vector_block_iterator_init(trace_task);
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
     while(!vector_block_iterator_end(trace_task,iter)){
       Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
       iter = vector_block_iterator_advance(trace_task,iter);
@@ -237,19 +237,21 @@
 #ifndef BUILD_IN_REFERENT
   else {
       gc_set_weakref_sets(gc);
-      update_ref_ignore_finref(collector);
+      gc_update_weakref_ignore_finref(gc);
     }
 #endif
   
   gc_fix_rootset(collector);
   
-  if(!IGNORE_FINREF )
-    gc_put_finref_to_vm(gc);
-  
-  fspace_reset_for_allocation(space);  
+  fspace_reset_for_allocation(space);
 
   return;
   
+}
+
+void trace_obj_in_nongen_fw(Collector *collector, void *p_ref)
+{
+  trace_object(collector, (Partial_Reveal_Object **)p_ref);
 }
 
 #endif /* MARK_BIT_FLIPPING */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h Sun Jan 28 06:41:08 2007
@@ -52,7 +52,7 @@
 {
   unsigned int bit_offset;
   
-  assert((start_idx >= 0) && (start_idx < 128));
+  assert(start_idx < 128);
   
   unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;
   unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;
@@ -79,9 +79,9 @@
 
 inline void words_set_bit(unsigned int* words, unsigned int count, unsigned int start_idx)
 {
-  assert((start_idx >= 0) && (start_idx < 128));
+  assert(start_idx < 128);
   
-  unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;	
+  unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;  
   unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;
   
   if(word_index >= count) return;
@@ -98,7 +98,7 @@
 
 inline void words_clear_bit(unsigned int* words, unsigned int count, unsigned int start_idx)
 {
-  assert((start_idx >= 0) && (start_idx < 128));
+  assert(start_idx < 128);
   
   unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD;
   unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_queue.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_queue.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_queue.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_queue.h Sun Jan 28 06:41:08 2007
@@ -72,24 +72,24 @@
 	node->value = value;
 	node->next.ptr = NULL;
 	while(TRUE){
-		QLINK_VAL(tail) = QLINK_VAL(queue->tail);
-		QLINK_VAL(next) = QLINK_VAL(tail.ptr->next);
-		if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){
-			if( next.ptr==NULL ){
-				tmp1.ptr = node;
-				tmp1.count = next.count + 1;
-				node->next.count = tmp1.count; 
-				QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1))
-				if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2))
-				  break;
-				  
-			}else{
-				tmp1.ptr = next.ptr;
-				tmp1.count = tail.count + 1;
-				atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
-			}
-		}
-	}
+  	QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+  	QLINK_VAL(next) = QLINK_VAL(tail.ptr->next);
+  	if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){
+    	if( next.ptr==NULL ){
+      	tmp1.ptr = node;
+      	tmp1.count = next.count + 1;
+      	node->next.count = tmp1.count; 
+      	QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1))
+      	if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2))
+          break;
+          
+      }else{
+      	tmp1.ptr = next.ptr;
+      	tmp1.count = tail.count + 1;
+      	atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+      }
+    }
+  }
 	tmp1.ptr = node;
 	tmp1.count = tail.count + 1;
 	atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
@@ -100,31 +100,31 @@
 {
 	Queue_Link head, tail, next, tmp1, tmp2;
 	while(TRUE){
-		QLINK_VAL(head) = QLINK_VAL(queue->head);
-		QLINK_VAL(tail) = QLINK_VAL(queue->tail);
-		QLINK_VAL(next) = QLINK_VAL(head.ptr->next);
-		
-		if( QLINK_VAL(head) == QLINK_VAL(queue->head)){
-			if( head.ptr== tail.ptr )
-				if( next.ptr == NULL )
-					return FALSE;
-				else{
-					tmp1.ptr = next.ptr;
-					tmp1.count = tail.count+1;
+  	QLINK_VAL(head) = QLINK_VAL(queue->head);
+  	QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+  	QLINK_VAL(next) = QLINK_VAL(head.ptr->next);
+    
+  	if( QLINK_VAL(head) == QLINK_VAL(queue->head)){
+    	if( head.ptr== tail.ptr )
+      	if( next.ptr == NULL )
+        	return FALSE;
+      	else{
+        	tmp1.ptr = next.ptr;
+        	tmp1.count = tail.count+1;
         	atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
-				}
-			else{
-				*pvalue = next.ptr->value;
-				tmp1.ptr = next.ptr;
-				tmp1.count = head.count+1;
-				QLINK_VAL(tmp2) =	atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1));
-				if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1))
-					break;
-			}
-		}
-	}
+        }
+    	else{
+        *pvalue = next.ptr->value;
+      	tmp1.ptr = next.ptr;
+      	tmp1.count = head.count+1;
+      	QLINK_VAL(tmp2) =	atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1));
+      	if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1))
+        	break;
+      }
+    }
+  }
 	free( head.ptr );
 	return TRUE;
 }
-	
+  
 #endif /* _SYNC_QUEUE_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h Sun Jan 28 06:41:08 2007
@@ -33,22 +33,32 @@
 /*
  * ATTENTION: only for reference
  * Perhaps in some platforms compilers compile this struct in a way different from what we expect
- */
+ * GCC requires to specify "packed" attribute
+#ifdef __linux__
+typedef struct Stack_Top{
+  POINTER_SIZE_INT version: SYNC_STACK_VERSION_MASK_SHIFT;
+  POINTER_SIZE_INT entry: (BITS_OF_POINTER_SIZE_INT-SYNC_STACK_VERSION_MASK_SHIFT);
+}Stack_Top __attribute__((packed));
+#else
 typedef struct Stack_Top{
-  unsigned int version: SYNC_STACK_VERSION_MASK_SHIFT;
-  unsigned int entry: (32-SYNC_STACK_VERSION_MASK_SHIFT);
+  POINTER_SIZE_INT version: SYNC_STACK_VERSION_MASK_SHIFT;
+  POINTER_SIZE_INT entry: (BITS_OF_POINTER_SIZE_INT-SYNC_STACK_VERSION_MASK_SHIFT);
 }Stack_Top;
+#endif
+ */
+
+typedef POINTER_SIZE_INT Stack_Top;
 
 typedef struct Sync_Stack{
   Stack_Top top; /* pointing to the first filled entry */
   Node* cur; /* pointing to the current accessed entry, only for iterator */
 }Sync_Stack;
 
-#define stack_top_get_entry(top) ((Node*)((*(unsigned int*)&(top)) & ~SYNC_STACK_VERSION_MASK))
+#define stack_top_get_entry(top) ((Node*)((*(POINTER_SIZE_INT*)&(top)) & ~SYNC_STACK_VERSION_MASK))
 /* The alternative way: (Node*)(top.entry<<SYNC_STACK_VERSION_MASK_SHIFT) */
-#define stack_top_get_version(top) ((*(unsigned int*)&(top)) & SYNC_STACK_VERSION_MASK)
+#define stack_top_get_version(top) ((*(POINTER_SIZE_INT*)&(top)) & SYNC_STACK_VERSION_MASK)
 /* The alternative way: (top.version) */
-#define stack_top_contruct(entry, version) ((unsigned int)(entry) | (version))
+#define stack_top_contruct(entry, version) ((POINTER_SIZE_INT)(entry) | (version))
 #define stack_top_get_next_version(top) ((stack_top_get_version(top) + 1) & SYNC_STACK_VERSION_MASK)
 
 inline Sync_Stack* sync_stack_init()
@@ -57,7 +67,7 @@
   Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
   memset(stack, 0, size);
   stack->cur = NULL;
-  unsigned int temp_top = 0;
+  POINTER_SIZE_INT temp_top = 0;
   stack->top = *(Stack_Top*)&temp_top;
   return stack;
 }
@@ -92,13 +102,13 @@
 {
   Stack_Top cur_top = stack->top;
   Node* top_entry = stack_top_get_entry(cur_top);
-  unsigned int version = stack_top_get_version(cur_top);
+  POINTER_SIZE_INT version = stack_top_get_version(cur_top);
   
   while( top_entry != NULL ){
-    unsigned int temp = stack_top_contruct(top_entry->next, version);
+    POINTER_SIZE_INT temp = stack_top_contruct(top_entry->next, version);
     Stack_Top new_top = *(Stack_Top*)&temp;
-    temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
-    if(temp == *(unsigned int*)&cur_top){ /* got it */ 
+    temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
+    if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */ 
       top_entry->next = NULL;
       return top_entry;
     }
@@ -113,13 +123,13 @@
 {
   Stack_Top cur_top = stack->top;
   node->next = stack_top_get_entry(cur_top);
-  unsigned int new_version = stack_top_get_next_version(cur_top);
-  unsigned int temp = stack_top_contruct(node, new_version);
+  POINTER_SIZE_INT new_version = stack_top_get_next_version(cur_top);
+  POINTER_SIZE_INT temp = stack_top_contruct(node, new_version);
   Stack_Top new_top = *(Stack_Top*)&temp;
   
   while( TRUE ){
-    temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
-    if(temp == *(unsigned int*)&cur_top){ /* got it */  
+    temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top);
+    if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */  
       return TRUE;
     }
     cur_top = stack->top;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h?view=diff&rev=500803&r1=500802&r2=500803
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h Sun Jan 28 06:41:08 2007
@@ -23,46 +23,47 @@
 
 typedef struct Vector_Block{
   void* next; /* point to next block */
-  unsigned int* head;  /* point to the first filled entry */
-  unsigned int* tail;  /* point to the entry after the last filled one */
-  unsigned int* heap_end;   /* point to heap_end of the block (right after the last entry) */
-  unsigned int entries[1];
+  POINTER_SIZE_INT* head;  /* point to the first filled entry */
+  POINTER_SIZE_INT* tail;  /* point to the entry after the last filled one */
+  POINTER_SIZE_INT* heap_end;   /* point to heap_end of the block (right after the last entry) */
+  POINTER_SIZE_INT entries[1];
 }Vector_Block;
 
 
 /* this size better be 2's power */
 #define VECTOR_BLOCK_DATA_SIZE_BYTES (2*KB)
 
-#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((unsigned int)((Vector_Block*)0)->entries)
-#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_PER_WORD)
+#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((POINTER_SIZE_INT)((Vector_Block*)0)->entries)
+#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_OF_POINTER_SIZE_INT )
 
 inline void vector_block_init(Vector_Block* block, unsigned int size)
 {
-  block->heap_end = (unsigned int*)((unsigned int)block + size);
-  block->head = (unsigned int*)block->entries;
-  block->tail = (unsigned int*)block->entries;
-  memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+  block->heap_end = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)block + size);
+  block->head = (POINTER_SIZE_INT*)block->entries;
+  block->tail = (POINTER_SIZE_INT*)block->entries;
+  memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries);
   return;  
 }
 
 inline unsigned int vector_block_entry_count(Vector_Block* block)
 { return (unsigned int)(block->tail - block->head); }
 
-/*
+
 inline Boolean vector_block_is_full(Vector_Block* block)
 { return block->tail == block->heap_end; }
 
+/*
 inline Boolean vector_block_is_empty(Vector_Block* block)
 { return block->tail == block->head; }
-*/
 
 inline Boolean vector_block_is_full(Vector_Block* block)
 { return (block->tail - block->entries) == VECTOR_BLOCK_ENTRY_NUM; }
+*/
 
 inline Boolean vector_block_is_empty(Vector_Block* block)
 { return block->tail == block->entries; }
 
-inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
+inline void vector_block_add_entry(Vector_Block* block, POINTER_SIZE_INT value)
 {
 #ifdef _DEBUG 
   assert(value && !*(block->tail));
@@ -73,21 +74,21 @@
 
 inline void vector_block_clear(Vector_Block* block)
 {
-  block->head = (unsigned int*)block->entries;
-  block->tail = (unsigned int*)block->entries;
+  block->head = (POINTER_SIZE_INT*)block->entries;
+  block->tail = (POINTER_SIZE_INT*)block->entries;
 #ifdef _DEBUG
-  memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+ memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries);
 #endif
 }
 
 /* Below is for sequential local access */
-inline unsigned int* vector_block_iterator_init(Vector_Block* block)
+inline POINTER_SIZE_INT* vector_block_iterator_init(Vector_Block* block)
 {  return block->head;  }
 
-inline unsigned int* vector_block_iterator_advance(Vector_Block* block, unsigned int* iter)
+inline POINTER_SIZE_INT* vector_block_iterator_advance(Vector_Block* block, POINTER_SIZE_INT* iter)
 {  return ++iter; }
 
-inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter)
+inline Boolean vector_block_iterator_end(Vector_Block* block, POINTER_SIZE_INT* iter)
 {  return iter == block->tail; }
 
 
@@ -102,22 +103,22 @@
 {
   vector_stack_init(block);
 #ifdef _DEBUG
-  memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+  memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries);
 #endif
 }
 
-/*
 inline Boolean vector_stack_is_empty(Vector_Block* block)
 {  return (block->head == block->tail); }
-*/
 
+/*
 inline Boolean vector_stack_is_empty(Vector_Block* block)
 { return (block->head - block->entries) == VECTOR_BLOCK_ENTRY_NUM; }
+*/
 
 inline Boolean vector_stack_is_full(Vector_Block* block)
 {  return (block->head == block->entries); }
 
-inline void vector_stack_push(Vector_Block* block, unsigned int value)
+inline void vector_stack_push(Vector_Block* block, POINTER_SIZE_INT value)
 { 
   block->head--;
 #ifdef _DEBUG
@@ -126,9 +127,9 @@
   *(block->head) = value;
 }
 
-inline unsigned int vector_stack_pop(Vector_Block* block)
+inline POINTER_SIZE_INT vector_stack_pop(Vector_Block* block)
 {   
-  unsigned int value = *block->head;
+  POINTER_SIZE_INT value = *block->head;
 #ifdef _DEBUG
   *block->head = 0;
 #endif
@@ -138,7 +139,7 @@
 
 inline void vector_block_integrity_check(Vector_Block* block)
 {
-  unsigned int* iter = vector_block_iterator_init(block);
+  POINTER_SIZE_INT* iter = vector_block_iterator_init(block);
   while(!vector_block_iterator_end(block, iter)){
     assert(*iter);
     iter = vector_block_iterator_advance(block, iter);



Mime
View raw message