harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r566913 [2/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ thread/ trace_forward/ utils/ verify/
Date Fri, 17 Aug 2007 04:33:10 GMT
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp Thu Aug 16 21:33:02 2007
@@ -17,162 +17,80 @@
 
 #include "sspace.h"
 #include "sspace_chunk.h"
-#include "sspace_mark_sweep.h"
+//#include "sspace_mark_sweep.h"
+#include "sspace_alloc.h"
 #include "gc_ms.h"
 #include "../gen/gen.h"
 
-static Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
-{
-  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
-  unsigned int word_index = color_bits_index / BITS_PER_WORD;
-  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
-  
-  return (Boolean)(table[word_index] & (cur_alloc_color << index_in_word));
-}
-
-static void alloc_slot_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
-{
-  assert(!slot_is_alloc_in_table(table, slot_index));
-  
-  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
-  unsigned int word_index = color_bits_index / BITS_PER_WORD;
-  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
-  
-  table[word_index] |= cur_alloc_color << index_in_word;
-}
 
-static unsigned int first_free_index_in_color_word(POINTER_SIZE_INT word)
+/* Only used in pfc_set_slot_index() */
+inline unsigned int first_free_index_in_color_word(POINTER_SIZE_INT word, POINTER_SIZE_INT alloc_color)
 {
-  unsigned int index = 0;
-  
-  while(index < BITS_PER_WORD){
-    if(!(word & (cur_mark_color << index)))
+  for(unsigned int index = 0; index < BITS_PER_WORD; index += COLOR_BITS_PER_OBJ)
+    if(!(word & (alloc_color << index)))
       return index;
-    index += COLOR_BITS_PER_OBJ;
-  }
   
   assert(0);  /* There must be a free obj in this table word */
   return MAX_SLOT_INDEX;
 }
 
-static Boolean next_free_index_in_color_word(POINTER_SIZE_INT word, unsigned int &index)
-{
-  while(index < BITS_PER_WORD){
-    if(!(word & (cur_alloc_color << index)))
-      return TRUE;
-    index += COLOR_BITS_PER_OBJ;
-  }
-  return FALSE;
-}
-
-static unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word)
-{
-  unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word;
-  return color_bits_index/COLOR_BITS_PER_OBJ;
-}
-
-static unsigned int next_free_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
-{
-  assert(slot_is_alloc_in_table(table, slot_index));
-  
-  unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
-  Boolean found = FALSE;
-  
-  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
-  unsigned int word_index = color_bits_index / BITS_PER_WORD;
-  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
-  
-  while(word_index < max_word_index){
-    found = next_free_index_in_color_word(table[word_index], index_in_word);
-    if(found)
-      return composed_slot_index(word_index, index_in_word);
-    ++word_index;
-    index_in_word = 0;
-   }
-  
-  index_in_word = 0;
-  found = next_free_index_in_color_word(table[word_index], index_in_word);
-  if(found)
-    return composed_slot_index(word_index, index_in_word);
-  
-  return MAX_SLOT_INDEX;
-}
-
-/* Used for collecting pfc */
-void chunk_set_slot_index(Chunk_Header* chunk, unsigned int first_free_word_index)
+/* Given an index word in table, set pfc's slot_index
+ * The value of argument alloc_color can be cur_alloc_color or cur_mark_color.
+ * It depends on in which phase this func is called.
+ * In sweeping phase, sspace has been marked but alloc and mark colors have not been flipped,
+ * so we have to use cur_mark_color as alloc_color.
+ * In compaction phase, two colors have been flipped, so we use cur_alloc_color.
+ */
+void pfc_set_slot_index(Chunk_Header *chunk, unsigned int first_free_word_index, POINTER_SIZE_INT alloc_color)
 {
-  unsigned int index_in_word = first_free_index_in_color_word(chunk->table[first_free_word_index]);
+  unsigned int index_in_word = first_free_index_in_color_word(chunk->table[first_free_word_index], alloc_color);
   assert(index_in_word != MAX_SLOT_INDEX);
   chunk->slot_index = composed_slot_index(first_free_word_index, index_in_word);
 }
 
-
-/* 1. No need of synchronization. This is a allocator local chunk no matter it is a small or medium obj chunk.
- * 2. If this chunk runs out of space, clear the chunk pointer.
- *    So it is important to give an argument which is a local chunk pointer of a allocator while invoking this func.
- */
-static void *alloc_in_chunk(Chunk_Header* &chunk)
+/* From the table's beginning search the first free slot, and set it to pfc's slot_index */
+void pfc_reset_slot_index(Chunk_Header *chunk)
 {
   POINTER_SIZE_INT *table = chunk->table;
-  unsigned int slot_index = chunk->slot_index;
   
-  void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
-  alloc_slot_in_table(table, slot_index);
-  if(chunk->status & CHUNK_NEED_ZEROING)
-    memset(p_obj, 0, chunk->slot_size);
-#ifdef SSPACE_VERIFY
-  sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
-#endif
-  
-  chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num);
-  if(chunk->slot_index == MAX_SLOT_INDEX){
-    chunk->status = CHUNK_USED | CHUNK_NORMAL;
-    chunk = NULL;
+  unsigned int index_word_num = (chunk->slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    if(table[i] != cur_alloc_mask){
+      pfc_set_slot_index(chunk, i, cur_alloc_color);
+      return;
+    }
   }
-  
-  return p_obj;
 }
 
-/* alloc small without-fin object in sspace without getting new free chunk */
+/* Alloc small without-fin object in sspace without getting new free chunk */
 void *sspace_thread_local_alloc(unsigned size, Allocator *allocator)
 {
-  if(size > SUPER_OBJ_THRESHOLD) return NULL;
+  if(size > LARGE_OBJ_THRESHOLD) return NULL;
   
   Sspace *sspace = gc_get_sspace(allocator->gc);
-  void *p_obj = NULL;
   
-  unsigned int seg_index = 0;
-  Size_Segment *size_seg = sspace->size_segments[0];
+  /* Flexible alloc mechanism:
+  Size_Segment *size_seg = sspace_get_size_seg(sspace, size);
+  unsigned int seg_index = size_seg->seg_index;
+  */
+  unsigned int seg_index = (size-GC_OBJECT_ALIGNMENT) / MEDIUM_OBJ_THRESHOLD;
+  assert(seg_index <= 2);
+  Size_Segment *size_seg = sspace->size_segments[seg_index];
+  assert(size_seg->local_alloc);
   
-  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg)
-    if(size <= size_seg->size_max) break;
-  assert(seg_index < SIZE_SEGMENT_NUM);
-  
-  size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
   unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
-  Boolean local_alloc = size_seg->local_alloc;
-  Chunk_Header *chunk = NULL;
   
-  if(local_alloc){
-    Chunk_Header **chunks = allocator->local_chunks[seg_index];
-    chunk = chunks[index];
-    if(!chunk){
-      chunk = sspace_get_pfc(sspace, seg_index, index);
-      //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
-      if(!chunk) return NULL;
-      chunk->status |= CHUNK_IN_USE;
-      chunks[index] = chunk;
-    }
-    p_obj = alloc_in_chunk(chunks[index]);
-  } else {
+  Chunk_Header **chunks = allocator->local_chunks[seg_index];
+  Chunk_Header *chunk = chunks[index];
+  if(!chunk){
     chunk = sspace_get_pfc(sspace, seg_index, index);
     //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
     if(!chunk) return NULL;
-    p_obj = alloc_in_chunk(chunk);
-    if(chunk)
-      sspace_put_pfc(sspace, chunk);
+    chunk->status |= CHUNK_IN_USE;
+    chunks[index] = chunk;
   }
-  
+  void *p_obj = alloc_in_chunk(chunks[index]);
   assert(p_obj);
 
 #ifdef SSPACE_ALLOC_INFO
@@ -184,23 +102,19 @@
 
   return p_obj;
 }
+
 static void *sspace_alloc_normal_obj(Sspace *sspace, unsigned size, Allocator *allocator)
 {
-  void *p_obj = NULL;
-  
-  unsigned int seg_index = 0;
-  Size_Segment *size_seg = sspace->size_segments[0];
+  Size_Segment *size_seg = sspace_get_size_seg(sspace, size);
+  unsigned int seg_index = size_seg->seg_index;
   
-  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg)
-    if(size <= size_seg->size_max) break;
-  assert(seg_index < SIZE_SEGMENT_NUM);
-  
-  size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
   unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
-  Boolean local_alloc = size_seg->local_alloc;
+  
   Chunk_Header *chunk = NULL;
+  void *p_obj = NULL;
   
-  if(local_alloc){
+  if(size_seg->local_alloc){
     Chunk_Header **chunks = allocator->local_chunks[seg_index];
     chunk = chunks[index];
     if(!chunk){
@@ -223,6 +137,8 @@
     }
     //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index);
     if(!chunk) return NULL;
+    assert(chunk->alloc_num < chunk->slot_num);
+    ++chunk->alloc_num;
     p_obj = alloc_in_chunk(chunk);
     if(chunk)
       sspace_put_pfc(sspace, chunk);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp Thu Aug 16 21:33:02 2007
@@ -85,7 +85,7 @@
   
   /* Init the first free chunk: from heap start to heap end */
   Free_Chunk *free_chunk = (Free_Chunk*)sspace->heap_start;
-  free_chunk->adj_next = (Chunk_Heaer_Basic*)sspace->heap_end;
+  free_chunk->adj_next = (Chunk_Header_Basic*)sspace->heap_end;
   POINTER_SIZE_INT chunk_size = sspace->reserved_heap_size;
   assert(chunk_size > CHUNK_GRANULARITY && !(chunk_size % CHUNK_GRANULARITY));
   sspace_put_free_chunk(sspace, free_chunk);
@@ -103,12 +103,6 @@
   steal_flag = steal_threshold ? FALSE : TRUE;
 }
 
-static void empty_pool(Pool *pool)
-{
-  pool->top = (Stack_Top)NULL;
-  pool->cur = NULL;
-}
-
 void sspace_clear_chunk_list(GC *gc)
 {
   unsigned int i, j;
@@ -119,7 +113,7 @@
     for(j = size_segments[i]->chunk_num; j--;){
       Pool *pool = pfc_pools[i][j];
       pfc_pool_set_steal_flag(pool, steal_threshold, pfc_steal_flags[i][j]);
-      empty_pool(pool);
+      pool_empty(pool);
     }
   }
   
@@ -202,15 +196,15 @@
 {
   assert(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES);
   
-  Chunk_Heaer_Basic *adj_next = chunk->adj_next;
+  Chunk_Header_Basic *adj_next = chunk->adj_next;
   Free_Chunk *normal_chunk = (Free_Chunk*)(((POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES-1) & NORMAL_CHUNK_HIGH_MASK);
   
   if(chunk != normal_chunk){
     assert(chunk < normal_chunk);
-    chunk->adj_next = (Chunk_Heaer_Basic*)normal_chunk;
+    chunk->adj_next = (Chunk_Header_Basic*)normal_chunk;
     sspace_put_free_chunk(sspace, chunk);
   }
-  normal_chunk->adj_next = (Chunk_Heaer_Basic*)((POINTER_SIZE_INT)normal_chunk + NORMAL_CHUNK_SIZE_BYTES);
+  normal_chunk->adj_next = (Chunk_Header_Basic*)((POINTER_SIZE_INT)normal_chunk + NORMAL_CHUNK_SIZE_BYTES);
   if(normal_chunk->adj_next != adj_next){
     assert(normal_chunk->adj_next < adj_next);
     Free_Chunk *back_chunk = (Free_Chunk*)normal_chunk->adj_next;
@@ -232,7 +226,7 @@
   
   Free_Chunk *back_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk + chunk_size);
   back_chunk->adj_next = chunk->adj_next;
-  chunk->adj_next = (Chunk_Heaer_Basic*)back_chunk;
+  chunk->adj_next = (Chunk_Header_Basic*)back_chunk;
   sspace_put_free_chunk(sspace, back_chunk);
 }
 
@@ -373,6 +367,40 @@
   return chunk;
 }
 
+typedef struct PFC_Pool_Iterator {
+  volatile unsigned int seg_index;
+  volatile unsigned int chunk_index;
+  SpinLock lock;
+} PFC_Pool_Iterator;
+
+static PFC_Pool_Iterator pfc_pool_iterator;
+
+void sspace_init_pfc_pool_iterator(Sspace *sspace)
+{
+  assert(pfc_pool_iterator.lock == FREE_LOCK);
+  pfc_pool_iterator.seg_index = 0;
+  pfc_pool_iterator.chunk_index = 0;
+}
+
+Pool *sspace_grab_next_pfc_pool(Sspace *sspace)
+{
+  Pool *pfc_pool = NULL;
+  
+  lock(pfc_pool_iterator.lock);
+  for(; pfc_pool_iterator.seg_index < SIZE_SEGMENT_NUM; ++pfc_pool_iterator.seg_index){
+    for(; pfc_pool_iterator.chunk_index < size_segments[pfc_pool_iterator.seg_index]->chunk_num; ++pfc_pool_iterator.chunk_index){
+      pfc_pool = pfc_pools[pfc_pool_iterator.seg_index][pfc_pool_iterator.chunk_index];
+      ++pfc_pool_iterator.chunk_index;
+      unlock(pfc_pool_iterator.lock);
+      return pfc_pool;
+    }
+    pfc_pool_iterator.chunk_index = 0;
+  }
+  unlock(pfc_pool_iterator.lock);
+  
+  return NULL;
+}
+
 #define min_value(x, y) (((x) < (y)) ? (x) : (y))
 
 Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index)
@@ -389,154 +417,158 @@
   return NULL;
 }
 
-/* Because this computation doesn't use lock, its result is not accurate. And it is enough. */
-POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace)
+static POINTER_SIZE_INT free_mem_in_pfc_pools(Sspace *sspace, Boolean show_chunk_info)
 {
-  POINTER_SIZE_INT free_size = 0;
+  Size_Segment **size_segs = sspace->size_segments;
+  Pool ***pfc_pools = sspace->pfc_pools;
+  POINTER_SIZE_INT free_mem_size = 0;
   
-  vm_gc_lock_enum();
-  
-  for(unsigned int i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
-    free_size += NORMAL_CHUNK_SIZE_BYTES * (i+1) * sspace->aligned_free_chunk_lists[i].chunk_num;
-  
-  for(unsigned int i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
-    free_size += CHUNK_GRANULARITY * (i+1) * sspace->unaligned_free_chunk_lists[i].chunk_num;
-  
-  Free_Chunk *hyper_chunk = sspace->hyper_free_chunk_list->head;
-  while(hyper_chunk){
-    free_size += CHUNK_SIZE(hyper_chunk);
-    hyper_chunk = hyper_chunk->next;
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    for(unsigned int j = 0; j < size_segs[i]->chunk_num; ++j){
+      Pool *pfc_pool = pfc_pools[i][j];
+      if(pool_is_empty(pfc_pool))
+        continue;
+      pool_iterator_init(pfc_pool);
+      Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+      assert(chunk);
+      unsigned int slot_num = chunk->slot_num;
+      unsigned int chunk_num = 0;
+      unsigned int alloc_num = 0;
+      while(chunk){
+        assert(chunk->slot_num == slot_num);
+        ++chunk_num;
+        alloc_num += chunk->alloc_num;
+        chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+      }
+      unsigned int total_slot_num = slot_num * chunk_num;
+      assert(alloc_num < total_slot_num);
+#ifdef SSPACE_CHUNK_INFO
+      if(show_chunk_info)
+        printf("Size: %x\tchunk num: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segs[i]), chunk_num, (float)alloc_num/total_slot_num);
+#endif
+      free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segs[i]) * (total_slot_num-alloc_num);
+      assert(free_mem_size < sspace->committed_heap_size);
+    }
   }
   
-  vm_gc_unlock_enum();
-  
-  return free_size;
+  return free_mem_size;
 }
 
-
-#ifdef SSPACE_CHUNK_INFO
-
-extern POINTER_SIZE_INT alloc_mask_in_table;
-static POINTER_SIZE_INT free_mem_size;
-
-static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+static POINTER_SIZE_INT free_mem_in_free_lists(Sspace *sspace, Free_Chunk_List *lists, unsigned int list_num, Boolean show_chunk_info)
 {
-  unsigned int count = 0;
+  POINTER_SIZE_INT free_mem_size = 0;
   
-  while(word){
-    word &= word - 1;
-    ++count;
+  for(unsigned int index = 0; index < list_num; ++index){
+    Free_Chunk *chunk = lists[index].head;
+    if(!chunk) continue;
+    POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+    assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+    unsigned int chunk_num = 0;
+    while(chunk){
+      assert(CHUNK_SIZE(chunk) == chunk_size);
+      ++chunk_num;
+      chunk = chunk->next;
+    }
+    free_mem_size += chunk_size * chunk_num;
+    assert(free_mem_size < sspace->committed_heap_size);
+#ifdef SSPACE_CHUNK_INFO
+    if(show_chunk_info)
+      printf("Free Size: %x\tnum: %d\n", chunk_size, chunk_num);
+#endif
   }
-  return count;
+  
+  return free_mem_size;
 }
 
-static unsigned int pfc_info(Chunk_Header *chunk, Boolean before_gc)
+static POINTER_SIZE_INT free_mem_in_hyper_free_list(Sspace *sspace, Boolean show_chunk_info)
 {
-  POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
-  unsigned int slot_num = chunk->slot_num;
-  unsigned int live_num = 0;
+  POINTER_SIZE_INT free_mem_size = 0;
   
-  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
-  for(unsigned int i=0; i<index_word_num; ++i){
-    table[i] &= alloc_mask_in_table;
-    unsigned int live_num_in_word = (table[i] == alloc_mask_in_table) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
-    live_num += live_num_in_word;
-  }
-  if(before_gc){
-    unsigned int slot_num_in_last_word = slot_num % SLOT_NUM_PER_WORD_IN_TABLE;
-    if(slot_num_in_last_word){
-      unsigned int fake_live_num_in_last_word = SLOT_NUM_PER_WORD_IN_TABLE - slot_num_in_last_word;
-      assert(live_num >= fake_live_num_in_last_word);
-      live_num -= fake_live_num_in_last_word;
-    }
+  Free_Chunk_List *list = sspace->hyper_free_chunk_list;
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+#ifdef SSPACE_CHUNK_INFO
+    if(show_chunk_info)
+      printf("Size: %x\n", CHUNK_SIZE(chunk));
+#endif
+    free_mem_size += CHUNK_SIZE(chunk);
+    assert(free_mem_size < sspace->committed_heap_size);
+    chunk = chunk->next;
   }
-  assert(live_num <= slot_num);
-  return live_num;
+  
+  return free_mem_size;
 }
 
-static void pfc_pools_info(Sspace *sspace, Boolean before_gc)
+POINTER_SIZE_INT free_mem_in_sspace(Sspace *sspace, Boolean show_chunk_info)
 {
-  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
-    for(unsigned int j = 0; j < size_segments[i]->chunk_num; ++j){
-      Pool *pool = pfc_pools[i][j];
-      Chunk_Header *chunk = NULL;
-      unsigned int chunk_counter = 0;
-      unsigned int slot_num = 0;
-      unsigned int live_num = 0;
-      pool_iterator_init(pool);
-      while(chunk = (Chunk_Header*)pool_iterator_next(pool)){
-        ++chunk_counter;
-        slot_num += chunk->slot_num;
-        live_num += pfc_info(chunk, before_gc);
-      }
-      if(slot_num){
-        printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segments[i]), chunk_counter, live_num, slot_num, (float)live_num/slot_num);
-        assert(live_num < slot_num);
-        free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segments[i]) * (slot_num-live_num);
-        assert(free_mem_size < sspace->committed_heap_size);
-      }
-    }
-  }
-}
+  POINTER_SIZE_INT free_mem_size = 0;
 
-enum Chunk_Type {
-  ALIGNED_CHUNK,
-  UNALIGNED_CHUNK
-};
-static unsigned int chunk_index_to_size(unsigned int index, Chunk_Type type)
-{
-  if(type == ALIGNED_CHUNK)
-    return ALIGNED_CHUNK_INDEX_TO_SIZE(index);
-  assert(type == UNALIGNED_CHUNK);
-  return UNALIGNED_CHUNK_INDEX_TO_SIZE(index);
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nPFC INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_pfc_pools(sspace, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_free_lists(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nUNALIGNED FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_free_lists(sspace, unaligned_free_chunk_lists, NUM_UNALIGNED_FREE_CHUNK_BUCKET, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nSUPER FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_hyper_free_list(sspace, show_chunk_info);
+  
+  return free_mem_size;
 }
 
-static void free_lists_info(Sspace *sspace, Free_Chunk_List *lists, unsigned int list_num, Chunk_Type type)
+
+#ifdef SSPACE_CHUNK_INFO
+void sspace_chunks_info(Sspace *sspace, Boolean show_info)
 {
-  unsigned int index;
+  if(!show_info) return;
   
-  for(index = 0; index < list_num; ++index){
-    Free_Chunk *chunk = lists[index].head;
-    unsigned int chunk_counter = 0;
-    while(chunk){
-      ++chunk_counter;
-      unsigned int chunk_size = CHUNK_SIZE(chunk);
-      assert(chunk_size <= HYPER_OBJ_THRESHOLD);
-      free_mem_size += chunk_size;
-      assert(free_mem_size < sspace->committed_heap_size);
-      chunk = chunk->next;
-    }
-    printf("Free Size: %x\tnum: %d\n", chunk_index_to_size(index, type), chunk_counter);
-  }
+  POINTER_SIZE_INT free_mem_size = free_mem_in_sspace(sspace, TRUE);
+  
+  float free_mem_ratio = (float)free_mem_size / sspace->committed_heap_size;
+  printf("\n\nFree mem ratio: %f\n\n", free_mem_ratio);
 }
+#endif
+
 
-void sspace_chunks_info(Sspace *sspace, Boolean before_gc)
+/* Because this computation doesn't use lock, its result is not accurate. And it is enough. */
+POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace)
 {
-  if(!before_gc) return;
-  
-  printf("\n\nPFC INFO:\n\n");
-  pfc_pools_info(sspace, before_gc);
+  POINTER_SIZE_INT free_size = 0;
   
-  printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
-  free_lists_info(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, ALIGNED_CHUNK);
+  vm_gc_lock_enum();
+  /*
+  for(unsigned int i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += NORMAL_CHUNK_SIZE_BYTES * (i+1) * sspace->aligned_free_chunk_lists[i].chunk_num;
   
-  printf("\n\nUNALIGNED FREE CHUNK INFO:\n\n");
-  free_lists_info(sspace, unaligned_free_chunk_lists, NUM_UNALIGNED_FREE_CHUNK_BUCKET, UNALIGNED_CHUNK);
+  for(unsigned int i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += CHUNK_GRANULARITY * (i+1) * sspace->unaligned_free_chunk_lists[i].chunk_num;
   
-  printf("\n\nSUPER FREE CHUNK INFO:\n\n");
-  Free_Chunk_List *list = &hyper_free_chunk_list;
-  Free_Chunk *chunk = list->head;
-  while(chunk){
-    printf("Size: %x\n", CHUNK_SIZE(chunk));
-    free_mem_size += CHUNK_SIZE(chunk);
-    assert(free_mem_size < sspace->committed_heap_size);
-    chunk = chunk->next;
+  Free_Chunk *hyper_chunk = sspace->hyper_free_chunk_list->head;
+  while(hyper_chunk){
+    free_size += CHUNK_SIZE(hyper_chunk);
+    hyper_chunk = hyper_chunk->next;
   }
-  printf("\n\nFree mem ratio: %f\n\n", (float)free_mem_size / sspace->committed_heap_size);
-  free_mem_size = 0;
+  */
+  free_size = free_mem_in_sspace(sspace, FALSE);
+  vm_gc_unlock_enum();
+  
+  return free_size;
 }
 
-#endif
 
 #ifdef SSPACE_ALLOC_INFO
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h Thu Aug 16 21:33:02 2007
@@ -23,33 +23,36 @@
 enum Chunk_Status {
   CHUNK_NIL = 0,
   CHUNK_FREE = 0x1,
-  CHUNK_IN_USE = 0x2,
-  CHUNK_USED = 0x4,
+  CHUNK_FRESH = 0x2,
   CHUNK_NORMAL = 0x10,
   CHUNK_ABNORMAL = 0x20,
-  CHUNK_NEED_ZEROING = 0x100
+  CHUNK_NEED_ZEROING = 0x100,
+  CHUNK_TO_MERGE = 0x200,
+  CHUNK_IN_USE = 0x400, /* just keep info for now, not used */
+  CHUNK_USED = 0x800 /* just keep info for now, not used */
 };
 
 typedef volatile POINTER_SIZE_INT Chunk_Status_t;
 
-typedef struct Chunk_Heaer_Basic {
-  Chunk_Heaer_Basic *next;
+typedef struct Chunk_Header_Basic {
+  Chunk_Header_Basic *next;
   Chunk_Status_t status;
-  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
-  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
-} Chunk_Heaer_Basic;
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+} Chunk_Header_Basic;
 
 typedef struct Chunk_Header {
-  /* Beginning of Chunk_Heaer_Basic */
+  /* Beginning of Chunk_Header_Basic */
   Chunk_Header *next;           /* pointing to the next pfc in the pfc pool */
   Chunk_Status_t status;
-  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
-  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
-  /* End of Chunk_Heaer_Basic */
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+  /* End of Chunk_Header_Basic */
   void *base;
   unsigned int slot_size;
   unsigned int slot_num;
   unsigned int slot_index;      /* the index of which is the first free slot in this chunk */
+  unsigned int alloc_num;       /* the index of which is the first free slot in this chunk */
   POINTER_SIZE_INT table[1];
 } Chunk_Header;
 
@@ -88,6 +91,7 @@
 #define CHUNK_END(chunk)  ((chunk)->adj_next)
 #define CHUNK_SIZE(chunk) ((POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk)
 
+
 inline void *slot_index_to_addr(Chunk_Header *chunk, unsigned int index)
 { return (void*)((POINTER_SIZE_INT)chunk->base + chunk->slot_size * index); }
 
@@ -95,12 +99,12 @@
 { return (unsigned int)(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) / chunk->slot_size); }
 
 typedef struct Free_Chunk {
-  /* Beginning of Chunk_Heaer_Basic */
+  /* Beginning of Chunk_Header_Basic */
   Free_Chunk *next;             /* pointing to the next free Free_Chunk */
   Chunk_Status_t status;
-  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
-  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
-  /* End of Chunk_Heaer_Basic */
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+  /* End of Chunk_Header_Basic */
   Free_Chunk *prev;             /* pointing to the prev free Free_Chunk */
 } Free_Chunk;
 
@@ -147,7 +151,18 @@
   chunk->table[last_word_index] |= padding_mask;
 }
 
-extern POINTER_SIZE_INT alloc_mask_in_table;
+/* Depadding the last index word in table to facilitate allocation */
+inline void chunk_depad_last_index_word(Chunk_Header *chunk)
+{
+  unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+  if(!ceiling_index_in_last_word)
+    return;
+  POINTER_SIZE_INT depadding_mask = (1 << ceiling_index_in_last_word) - 1;
+  unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  chunk->table[last_word_index] &= depadding_mask;
+}
+
+extern POINTER_SIZE_INT cur_alloc_mask;
 /* Used for allocating a fixed-size chunk from free area lists */
 inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size)
 {
@@ -155,13 +170,14 @@
   assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES);
   
   chunk->next = NULL;
-  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  chunk->status = CHUNK_FRESH | CHUNK_NORMAL | CHUNK_NEED_ZEROING;
   chunk->slot_size = slot_size;
   chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk);
   chunk->slot_index = 0;
+  chunk->alloc_num = 0;
   chunk->base = NORMAL_CHUNK_BASE(chunk);
   memset(chunk->table, 0, NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk));//memset table
-  chunk_pad_last_index_word(chunk, alloc_mask_in_table);
+  chunk_pad_last_index_word(chunk, cur_alloc_mask);
 }
 
 /* Used for allocating a chunk for large object from free area lists */
@@ -171,7 +187,7 @@
   assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size);
   
   chunk->next = NULL;
-  chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL;
+  chunk->status = CHUNK_ABNORMAL;
   chunk->slot_size = obj_size;
   chunk->slot_num = 1;
   chunk->slot_index = 0;
@@ -215,11 +231,14 @@
 #define ALIGNED_CHUNK_INDEX_TO_SIZE(index)    (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT)
 #define UNALIGNED_CHUNK_INDEX_TO_SIZE(index)  (((index) + 1) << CHUNK_GRANULARITY_BITS)
 
+#define SUPER_OBJ_MASK ((Obj_Info_Type)0x1)  /* the lowest bit in obj info */
 
 #define PFC_STEAL_NUM   3
 #define PFC_STEAL_THRESHOLD   3
 
+
 #define SIZE_SEGMENT_NUM  3
+
 typedef struct Size_Segment {
   unsigned int size_min;
   unsigned int size_max;
@@ -232,6 +251,17 @@
   POINTER_SIZE_INT gran_high_mask;
 } Size_Segment;
 
+inline Size_Segment *sspace_get_size_seg(Sspace *sspace, unsigned int size)
+{
+  Size_Segment **size_segs = sspace->size_segments;
+  
+  unsigned int seg_index = 0;
+  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index)
+    if(size <= size_segs[seg_index]->size_max) break;
+  assert(seg_index < SIZE_SEGMENT_NUM);
+  assert(size_segs[seg_index]->seg_index == seg_index);
+  return size_segs[seg_index];
+}
 
 inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index)
 {
@@ -245,30 +275,37 @@
 {
   unsigned int size = chunk->slot_size;
   assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
+  assert(chunk->slot_index < chunk->slot_num);
   
   Size_Segment **size_segs = sspace->size_segments;
   chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
   
   for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
-    if(size <= size_segs[i]->size_max){
-      assert(!(size & size_segs[i]->gran_low_mask));
-      assert(size > size_segs[i]->size_min);
-      unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
-      Pool *pfc_pool = sspace->pfc_pools[i][index];
-      pool_put_entry(pfc_pool, chunk);
-      return;
-    }
+    if(size > size_segs[i]->size_max) continue;
+    assert(!(size & size_segs[i]->gran_low_mask));
+    assert(size > size_segs[i]->size_min);
+    unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
+    Pool *pfc_pool = sspace->pfc_pools[i][index];
+    pool_put_entry(pfc_pool, chunk);
+    return;
   }
 }
 
 
 extern void sspace_init_chunks(Sspace *sspace);
 extern void sspace_clear_chunk_list(GC *gc);
+
 extern void sspace_put_free_chunk(Sspace *sspace, Free_Chunk *chunk);
 extern Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace);
 extern Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size);
 extern Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk);
+
+extern void sspace_init_pfc_pool_iterator(Sspace *sspace);
+extern Pool *sspace_grab_next_pfc_pool(Sspace *sspace);
+
 extern Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int index);
+
+extern POINTER_SIZE_INT free_mem_in_sspace(Sspace *sspace, Boolean show_chunk_info);
 
 extern void zeroing_free_chunk(Free_Chunk *chunk);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp Thu Aug 16 21:33:02 2007
@@ -27,7 +27,7 @@
   POINTER_SIZE_INT color_word = *p_color_word;
   POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word;
   
-  return color_word & mark_color;
+  return (Boolean)(color_word & mark_color);
 }
 
 static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
@@ -191,5 +191,6 @@
 
 void trace_obj_in_ms_marking(Collector *collector, void *p_obj)
 {
+  obj_mark_in_table((Partial_Reveal_Object*)p_obj);
   trace_object(collector, (Partial_Reveal_Object *)p_obj);
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp Thu Aug 16 21:33:02 2007
@@ -23,18 +23,18 @@
 #include "../finalizer_weakref/finalizer_weakref.h"
 
 
-POINTER_SIZE_INT alloc_mask_in_table = ~BLACK_MASK_IN_TABLE;
-POINTER_SIZE_INT mark_mask_in_table = BLACK_MASK_IN_TABLE;
 POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE;
 POINTER_SIZE_INT cur_mark_color = OBJ_COLOR_BLACK;
+POINTER_SIZE_INT cur_alloc_mask = ~BLACK_MASK_IN_TABLE;
+POINTER_SIZE_INT cur_mark_mask = BLACK_MASK_IN_TABLE;
 
 static void ops_color_flip(void)
 {
   POINTER_SIZE_INT temp = cur_alloc_color;
   cur_alloc_color = cur_mark_color;
   cur_mark_color = temp;
-  alloc_mask_in_table = ~alloc_mask_in_table;
-  mark_mask_in_table = ~mark_mask_in_table;
+  cur_alloc_mask = ~cur_alloc_mask;
+  cur_mark_mask = ~cur_mark_mask;
 }
 
 void collector_init_free_chunk_list(Collector *collector)
@@ -44,19 +44,25 @@
   collector->free_chunk_list = list;
 }
 
-extern Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
-static void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace)
+/* Argument need_construct stands for whether or not the dual-directon list needs constructing */
+Chunk_Header_Basic *sspace_grab_next_chunk(Sspace *sspace, Chunk_Header_Basic *volatile *shared_next_chunk, Boolean need_construct)
 {
-  next_chunk_for_sweep = (Chunk_Heaer_Basic*)space_heap_start((Space*)sspace);
-  next_chunk_for_sweep->adj_prev = NULL;
+  Chunk_Header_Basic *cur_chunk = *shared_next_chunk;
   
-  unsigned int i = gc->num_collectors;
-  while(i--){
-    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
-    assert(!list->head);
-    assert(!list->tail);
-    assert(list->lock == FREE_LOCK);
+  Chunk_Header_Basic *sspace_ceiling = (Chunk_Header_Basic*)space_heap_end((Space*)sspace);
+  while(cur_chunk < sspace_ceiling){
+    Chunk_Header_Basic *next_chunk = CHUNK_END(cur_chunk);
+    
+    Chunk_Header_Basic *temp = (Chunk_Header_Basic*)atomic_casptr((volatile void**)shared_next_chunk, next_chunk, cur_chunk);
+    if(temp == cur_chunk){
+      if(need_construct && next_chunk < sspace_ceiling)
+        next_chunk->adj_prev = cur_chunk;
+      return cur_chunk;
+    }
+    cur_chunk = *shared_next_chunk;
   }
+  
+  return NULL;
 }
 
 
@@ -72,11 +78,11 @@
   
   /* Pass 1: **************************************************
      mark all live objects in heap ****************************/
-  unsigned int old_num = atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
+  atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
   
   sspace_mark_scan(collector);
   
-  old_num = atomic_inc32(&num_marking_collectors);
+  unsigned int old_num = atomic_inc32(&num_marking_collectors);
   if( ++old_num == num_active_collectors ){
     /* last collector's world here */
 #ifdef SSPACE_TIME
@@ -98,28 +104,50 @@
   
   /* Pass 2: **************************************************
      sweep dead objects ***************************************/
-  atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors);
+  atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors+1);
   
   sspace_sweep(collector, sspace);
   
-  atomic_inc32(&num_sweeping_collectors);
-  while(num_sweeping_collectors != num_active_collectors);
+  old_num = atomic_inc32(&num_sweeping_collectors);
+  if( ++old_num == num_active_collectors ){
+#ifdef SSPACE_TIME
+    sspace_sweep_time(FALSE, sspace->need_compact);
+#endif
+    ops_color_flip();
+#ifdef SSPACE_CHUNK_INFO
+    sspace_chunks_info(sspace, TRUE);
+#endif
+#ifdef SSPACE_VERIFY
+    sspace_verify_after_sweep(gc);
+#endif
+    if(sspace->need_compact){
+      sspace_init_pfc_pool_iterator(sspace);
+    }
+    /* let other collectors go */
+    num_sweeping_collectors++;
+  }
+  while(num_sweeping_collectors != num_active_collectors + 1);
+  
+  if(sspace->need_compact)
+    compact_sspace(collector, sspace);
   
   if( collector->thread_handle != 0 )
     return;
   
-  /* Leftover: ************************************************ */
+  if(sspace->need_compact){
+    gc_fix_rootset(collector);
 #ifdef SSPACE_TIME
-  sspace_sweep_time(FALSE);
+    sspace_fix_time(FALSE);
 #endif
-
+  }
+  
   gc_collect_free_chunks(gc, sspace);
-
 #ifdef SSPACE_TIME
   sspace_merge_time(FALSE);
 #endif
+
+  /* Leftover: ************************************************ */
   
-  ops_color_flip();
   gc->root_set = NULL;  // FIXME:: should be placed to a more appopriate place
   gc_set_pool_clear(gc->metadata->gc_rootset_pool);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h Thu Aug 16 21:33:02 2007
@@ -22,6 +22,10 @@
 #include "sspace_verify.h"
 
 #define PFC_REUSABLE_RATIO 0.1
+#define SSPACE_COMPACT_RATIO 0.15
+
+inline Boolean chunk_is_reusable(Chunk_Header *chunk)
+{ return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; }
 
 enum Obj_Color {
   OBJ_COLOR_BLUE = 0x0,
@@ -37,15 +41,10 @@
   #define BLACK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0xAAAAAAAA)
 #endif
 
-extern POINTER_SIZE_INT alloc_mask_in_table;
-extern POINTER_SIZE_INT mark_mask_in_table;
 extern POINTER_SIZE_INT cur_alloc_color;
 extern POINTER_SIZE_INT cur_mark_color;
-
-#define SUPER_OBJ_MASK ((Obj_Info_Type)0x1)  /* the lowest bit in obj info */
-
-inline void set_super_obj_mask(void *large_obj)
-{ ((Partial_Reveal_Object*)large_obj)->obj_info |= SUPER_OBJ_MASK; }
+extern POINTER_SIZE_INT cur_alloc_mask;
+extern POINTER_SIZE_INT cur_mark_mask;
 
 inline Boolean is_super_obj(Partial_Reveal_Object *obj)
 {
@@ -92,9 +91,11 @@
     POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
     if(temp == old_word){
 #ifdef SSPACE_VERIFY
+#ifndef SSPACE_VERIFY_FINREF
       assert(obj_is_marked_in_vt(obj));
+#endif
       obj_unmark_in_vt(obj);
-      sspace_verify_mark(obj, vm_object_size(obj));
+      sspace_record_mark(obj, vm_object_size(obj));
 #endif
       return TRUE;
     }
@@ -105,10 +106,29 @@
   return FALSE;
 }
 
+inline void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
+{
+  Free_Chunk_List *list = collector->free_chunk_list;
+  
+  chunk->status = CHUNK_FREE | CHUNK_TO_MERGE;
+  chunk->next = list->head;
+  chunk->prev = NULL;
+  if(list->head)
+    list->head->prev = chunk;
+  else
+    list->tail = chunk;
+  list->head = chunk;
+}
+
+
 extern void sspace_mark_scan(Collector *collector);
+extern void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace);
 extern void sspace_sweep(Collector *collector, Sspace *sspace);
+extern void compact_sspace(Collector *collector, Sspace *sspace);
 extern void gc_collect_free_chunks(GC *gc, Sspace *sspace);
+extern Chunk_Header_Basic *sspace_grab_next_chunk(Sspace *sspace, Chunk_Header_Basic *volatile *shared_next_chunk, Boolean need_construct);
 
-extern void chunk_set_slot_index(Chunk_Header* chunk, unsigned int first_free_word_index);
+extern void pfc_set_slot_index(Chunk_Header *chunk, unsigned int first_free_word_index, POINTER_SIZE_INT alloc_color);
+extern void pfc_reset_slot_index(Chunk_Header *chunk);
 
 #endif // _SSPACE_MARK_SWEEP_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp Thu Aug 16 21:33:02 2007
@@ -19,9 +19,23 @@
 #include "sspace_mark_sweep.h"
 
 
-Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
+static Chunk_Header_Basic *volatile next_chunk_for_sweep;
 
 
+void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace)
+{
+  next_chunk_for_sweep = (Chunk_Header_Basic*)space_heap_start((Space*)sspace);
+  next_chunk_for_sweep->adj_prev = NULL;
+  
+  unsigned int i = gc->num_collectors;
+  while(i--){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    assert(!list->head);
+    assert(!list->tail);
+    assert(list->lock == FREE_LOCK);
+  }
+}
+
 static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
 {
   unsigned int count = 0;
@@ -33,40 +47,6 @@
   return count;
 }
 
-static Chunk_Heaer_Basic *sspace_get_next_sweep_chunk(Collector *collector, Sspace *sspace)
-{
-  Chunk_Heaer_Basic *cur_sweep_chunk = next_chunk_for_sweep;
-  
-  Chunk_Heaer_Basic *sspace_ceiling = (Chunk_Heaer_Basic*)space_heap_end((Space*)sspace);
-  while(cur_sweep_chunk < sspace_ceiling){
-    Chunk_Heaer_Basic *next_sweep_chunk = CHUNK_END(cur_sweep_chunk);
-    
-    Chunk_Heaer_Basic *temp = (Chunk_Heaer_Basic*)atomic_casptr((volatile void **)&next_chunk_for_sweep, next_sweep_chunk, cur_sweep_chunk);
-    if(temp == cur_sweep_chunk){
-      if(next_sweep_chunk < sspace_ceiling)
-        next_sweep_chunk->adj_prev = cur_sweep_chunk;
-      return cur_sweep_chunk;
-    }
-    cur_sweep_chunk = next_chunk_for_sweep;
-  }
-  
-  return NULL;
-}
-
-static void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
-{
-  Free_Chunk_List *list = collector->free_chunk_list;
-  
-  chunk->status = CHUNK_FREE | CHUNK_IN_USE;
-  chunk->next = list->head;
-  chunk->prev = NULL;
-  if(list->head)
-    list->head->prev = chunk;
-  else
-    list->tail = chunk;
-  list->head = chunk;
-}
-
 void zeroing_free_chunk(Free_Chunk *chunk)
 {
   assert(chunk->status == CHUNK_FREE);
@@ -98,7 +78,7 @@
   POINTER_SIZE_INT index_word = table[word_index];
   POINTER_SIZE_INT mark_color = cur_mark_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE));
   for(; slot_index < slot_num; ++slot_index){
-    assert(!(index_word & ~mark_mask_in_table));
+    assert(!(index_word & ~cur_mark_mask));
     if(index_word & mark_color){
       if(cur_free_slot_num){
         memset((void*)base, 0, slot_size*cur_free_slot_num);
@@ -123,7 +103,7 @@
       mark_color = cur_mark_color;
       ++word_index;
       index_word = table[word_index];
-      while(index_word == mark_mask_in_table && cur_free_slot_num == 0 && slot_index < slot_num){
+      while(index_word == cur_mark_mask && cur_free_slot_num == 0 && slot_index < slot_num){
         slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
         ++word_index;
         index_word = table[word_index];
@@ -152,43 +132,52 @@
   
   unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
   for(unsigned int i=0; i<index_word_num; ++i){
-    table[i] &= mark_mask_in_table;
-    unsigned int live_num_in_word = (table[i] == mark_mask_in_table) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
+    table[i] &= cur_mark_mask;
+    unsigned int live_num_in_word = (table[i] == cur_mark_mask) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
     live_num += live_num_in_word;
     if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
       first_free_word_index = i;
-      chunk_set_slot_index((Chunk_Header*)chunk, first_free_word_index);
+      pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_mark_color);
     }
   }
   assert(live_num <= slot_num);
+  chunk->alloc_num = live_num;
 #ifdef SSPACE_VERIFY
   collector->live_obj_num += live_num;
-  //printf("Chunk: %x  live obj: %d slot num: %d\n", (POINTER_SIZE_INT)chunk, live_num, slot_num);
 #endif
   if(!live_num){  /* all objects in this chunk are dead */
     collector_add_free_chunk(collector, (Free_Chunk*)chunk);
-  } else if((float)(slot_num-live_num)/slot_num > PFC_REUSABLE_RATIO){  /* most objects in this chunk are swept, add chunk to pfc list*/
-#ifdef SSPACE_VERIFY
-    //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
-#endif
-    chunk_pad_last_index_word((Chunk_Header*)chunk, mark_mask_in_table);
+  } else if(chunk_is_reusable(chunk)){  /* most objects in this chunk are swept, add chunk to pfc list*/
+    chunk->alloc_num = live_num;
+    chunk_pad_last_index_word((Chunk_Header*)chunk, cur_mark_mask);
     sspace_put_pfc(sspace, chunk);
   }
-  /* the rest: chunks with free rate < 0.1. we don't use them */
+  /* the rest: chunks with free rate < PFC_REUSABLE_RATIO. we don't use them */
+}
+
+static inline void collector_sweep_abnormal_chunk(Collector *collector, Sspace *sspace, Chunk_Header *chunk)
+{
+  assert(chunk->status == CHUNK_ABNORMAL);
+  POINTER_SIZE_INT *table = chunk->table;
+  table[0] &= cur_mark_mask;
+  if(!table[0]){
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+  }
 #ifdef SSPACE_VERIFY
-  //else// if(live_num < slot_num)
-    //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
+  else {
+    collector->live_obj_num++;
+  }
 #endif
 }
 
 void sspace_sweep(Collector *collector, Sspace *sspace)
 {
-  Chunk_Heaer_Basic *chunk;
+  Chunk_Header_Basic *chunk;
 #ifdef SSPACE_VERIFY
   collector->live_obj_num = 0;
 #endif
 
-  chunk = sspace_get_next_sweep_chunk(collector, sspace);
+  chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_sweep, TRUE);
   while(chunk){
     /* chunk is free before GC */
     if(chunk->status == CHUNK_FREE){
@@ -196,20 +185,10 @@
     } else if(chunk->status & CHUNK_NORMAL){   /* chunk is used as a normal sized obj chunk */
       collector_sweep_normal_chunk(collector, sspace, (Chunk_Header*)chunk);
     } else {  /* chunk is used as a super obj chunk */
-      assert(chunk->status & (CHUNK_IN_USE | CHUNK_ABNORMAL));
-      POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
-      table[0] &= mark_mask_in_table;
-      if(!table[0]){
-        collector_add_free_chunk(collector, (Free_Chunk*)chunk);
-      }
-#ifdef SSPACE_VERIFY
-      else {
-        collector->live_obj_num++;
-      }
-#endif
+      collector_sweep_abnormal_chunk(collector, sspace, (Chunk_Header*)chunk);
     }
     
-    chunk = sspace_get_next_sweep_chunk(collector, sspace);
+    chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_sweep, TRUE);
   }
 }
 
@@ -249,14 +228,14 @@
   
   Free_Chunk *chunk = free_chunk_list.head;
   while(chunk){
-    assert(chunk->status == (CHUNK_FREE | CHUNK_IN_USE));
+    assert(chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE));
     /* Remove current chunk from the chunk list */
     free_chunk_list.head = chunk->next;
     if(free_chunk_list.head)
       free_chunk_list.head->prev = NULL;
     /* Check if the back adjcent chunks are free */
     Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
-    while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+    while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){
       /* Remove back_chunk from list */
       free_list_detach_chunk(&free_chunk_list, back_chunk);
       chunk->adj_next = back_chunk->adj_next;
@@ -264,7 +243,7 @@
     }
     /* Check if the prev adjacent chunks are free */
     Free_Chunk *prev_chunk = (Free_Chunk*)chunk->adj_prev;
-    while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+    while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){
       /* Remove prev_chunk from list */
       free_list_detach_chunk(&free_chunk_list, prev_chunk);
       prev_chunk->adj_next = chunk->adj_next;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp Thu Aug 16 21:33:02 2007
@@ -19,6 +19,7 @@
 #include "sspace_chunk.h"
 #include "sspace_mark_sweep.h"
 #include "../utils/vector_block.h"
+#include "gc_ms.h"
 #include "../gen/gen.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
@@ -42,19 +43,17 @@
 static Verify_Card *alloc_verify_cards = NULL;
 static Verify_Card *mark_verify_cards = NULL;
 static POINTER_SIZE_INT card_num = 0;
-static POINTER_SIZE_INT alloc_obj = 0;
-volatile POINTER_SIZE_INT live_obj_in_mark = 0;
+static volatile POINTER_SIZE_INT alloc_obj_num = 0;
+static volatile POINTER_SIZE_INT live_obj_in_mark = 0;
+static volatile POINTER_SIZE_INT live_obj_in_fix = 0;
 
 void sspace_verify_init(GC *gc)
 {
   gc_in_verify = gc;
-
-#ifndef ONLY_SSPACE_IN_HEAP
-  POINTER_SIZE_INT heap_size = gc_gen_total_memory_size((GC_Gen*)gc);
-#else
-  POINTER_SIZE_INT heap_size = gc_ms_total_memory_size((GC_MS*)gc);
-#endif
-  card_num = heap_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  
+  Sspace *sspace = gc_get_sspace(gc);
+  POINTER_SIZE_INT space_size = space_committed_size((Space*)sspace);
+  card_num = space_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
   POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num;
   
   alloc_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
@@ -123,7 +122,7 @@
 void sspace_verify_alloc(void *addr, unsigned int size)
 {
   assert(address_belongs_to_gc_heap(addr, gc_in_verify));
-  ++alloc_obj;
+  atomic_inc32(&alloc_obj_num);
   
   unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
   unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
@@ -158,11 +157,28 @@
   return TRUE;
 }
 
-/* size is real size of obj */
-void sspace_verify_mark(void *addr, unsigned int size)
+static void sspace_verify_weakref(Partial_Reveal_Object *p_obj)
+{
+  WeakReferenceType type = special_reference_type(p_obj);
+  if(type == NOT_REFERENCE) return;
+  
+  REF *p_referent_field = obj_get_referent_field(p_obj);
+  Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
+  if (!p_referent) return;
+  
+  unsigned int size = vm_object_size(p_referent);
+  if(size <= SUPER_OBJ_THRESHOLD){
+    Sspace *sspace = gc_get_sspace(gc_in_verify);
+    Size_Segment *size_seg = sspace_get_size_seg(sspace, size);
+    size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  }
+  
+  assert(obj_position_is_correct(p_referent, size));
+}
+
+static void mark_card_add_entry(void *addr, unsigned int size)
 {
   assert(address_belongs_to_gc_heap(addr, gc_in_verify));
-  atomic_inc32(&live_obj_in_mark);
   
   unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
   unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
@@ -171,12 +187,12 @@
   verify_card_get_block(card);
   Vector_Block *block = card->block;
   
-  if(size <= MEDIUM_OBJ_THRESHOLD)
-    size = SMALL_SIZE_ROUNDUP(size);
-  else if(size <= LARGE_OBJ_THRESHOLD)
-    size = MEDIUM_SIZE_ROUNDUP(size);
-  else if(size <= SUPER_OBJ_THRESHOLD)
-    size = LARGE_SIZE_ROUNDUP(size);
+  if(size <= SUPER_OBJ_THRESHOLD){
+    Sspace *sspace = gc_get_sspace(gc_in_verify);
+    Size_Segment *size_seg = sspace_get_size_seg(sspace, size);
+    size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  }
+  
   assert(obj_position_is_correct(addr, size));
   Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
   
@@ -188,9 +204,17 @@
   }
   vector_block_add_entry(block, obj_addr);
   unlock(card->lock);
+
+}
+
+/* size is real size of obj */
+void sspace_record_mark(void *addr, unsigned int size)
+{
+  atomic_inc32(&live_obj_in_mark);
+  mark_card_add_entry(addr, size);
 }
 
-static void reverify_mark(void *addr, unsigned int size)
+static void verify_mark(void *addr, unsigned int size, Boolean destructively)
 {
   assert(address_belongs_to_gc_heap(addr, gc_in_verify));
   
@@ -206,7 +230,8 @@
   Obj_Addr *p_addr = block->head;
   while(p_addr < block->tail){
     if(obj_addr == *p_addr){
-      *p_addr = 0;
+      if(destructively)
+        *p_addr = 0;
       break;
     }
     p_addr++;
@@ -214,7 +239,20 @@
   assert(p_addr < block->tail);
 }
 
-static void check_mark_cards(void)
+void sspace_modify_mark_in_compact(void *new_addr, void *old_addr, unsigned int size)
+{
+  /* Verify the old addr and remove it in the according mark card */
+  verify_mark(old_addr, size, TRUE);
+  /* Add new_addr into mark card */
+  mark_card_add_entry(new_addr, size);
+}
+
+void sspace_verify_fix_in_compact(void)
+{
+  atomic_inc32(&live_obj_in_fix);
+}
+
+static void check_and_clear_mark_cards(void)
 {
   for(POINTER_SIZE_INT i=0; i<card_num; i++){
     Vector_Block *block = mark_verify_cards[i].block;
@@ -234,7 +272,7 @@
   }
 }
 
-static void clear_verify_cards(void)
+static void clear_alloc_cards(void)
 {
   for(POINTER_SIZE_INT i=0; i<card_num; i++){
     Verify_Card *card = &alloc_verify_cards[i];
@@ -262,40 +300,30 @@
     assert(!*p_value++);
 }
 
-void sspace_verify_after_collection(GC *gc)
+static POINTER_SIZE_INT sspace_live_obj_num(Sspace *sspace, Boolean gc_finished)
 {
-  printf("Allocated obj: %d\n", alloc_obj);
-  alloc_obj = 0;
-  printf("Live obj in marking: %d\n", live_obj_in_mark);
-  live_obj_in_mark = 0;
-  
-  summarize_sweep_verify(gc);
-  
-  clear_verify_cards();
-  
-  Sspace *sspace = gc_get_sspace(gc);
   Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
   Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
-  POINTER_SIZE_INT total_live_obj = 0;
+  POINTER_SIZE_INT live_num = 0;
   
   for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
     /* chunk is free before GC */
-    if(chunk->status == CHUNK_FREE){
-      unsigned int header_size = sizeof(Free_Chunk);
-      //sspace_verify_free_area((POINTER_SIZE_INT*)((POINTER_SIZE_INT)chunk + header_size), (POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk - header_size);
+    if(chunk->status & CHUNK_FREE){
+      assert((gc_finished && chunk->status==CHUNK_FREE)
+              || (!gc_finished && chunk->status==(CHUNK_FREE|CHUNK_TO_MERGE)));
       continue;
     }
     if(chunk->status & CHUNK_ABNORMAL){
-      assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+      assert(chunk->status == CHUNK_ABNORMAL);
       assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
       Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
       assert(chunk->slot_size == vm_object_size(obj));
       assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
     }
-    /* chunk is used as a normal sized obj chunk */
+    /* chunk is used as a normal or abnormal one in which there are live objects */
     unsigned int slot_num = chunk->slot_num;
     POINTER_SIZE_INT *table = chunk->table;
-    POINTER_SIZE_INT live_obj_in_chunk = 0;
+    POINTER_SIZE_INT live_num_in_chunk = 0;
     
     unsigned int word_index = 0;
     for(unsigned int i=0; i<slot_num; ++i){
@@ -303,17 +331,49 @@
       word_index = color_index / BITS_PER_WORD;
       void *p_obj = slot_index_to_addr(chunk, i);
       if(table[word_index] & (cur_alloc_color << (color_index % BITS_PER_WORD))){
-        sspace_verify_alloc(p_obj, chunk->slot_size);
-        reverify_mark(p_obj, chunk->slot_size);
-        ++live_obj_in_chunk;
-      } else {
-        //sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+        ++live_num_in_chunk;
+        verify_mark(p_obj, chunk->slot_size, gc_finished);
+        if(gc_finished){
+          sspace_verify_alloc(p_obj, chunk->slot_size);
+          sspace_verify_weakref((Partial_Reveal_Object*)p_obj);
+        }
       }
     }
-    total_live_obj += live_obj_in_chunk;
+    live_num += live_num_in_chunk;
   }
+  
+  return live_num;
+}
+
+void sspace_verify_before_collection(GC *gc)
+{
+  printf("Allocated obj: %d\n", alloc_obj_num);
+  alloc_obj_num = 0;
+}
+
+void sspace_verify_after_sweep(GC *gc)
+{
+  printf("Live obj in marking: %d\n", live_obj_in_mark);
+  live_obj_in_mark = 0;
+  
+  summarize_sweep_verify(gc);
+  
+  Sspace *sspace = gc_get_sspace(gc);
+  POINTER_SIZE_INT total_live_obj = sspace_live_obj_num(sspace, FALSE);
+  printf("Live obj after sweep: %d\n", total_live_obj);
+}
+
+void sspace_verify_after_collection(GC *gc)
+{
+  printf("Live obj in fixing: %d\n", live_obj_in_fix);
+  live_obj_in_fix = 0;
+  
+  clear_alloc_cards();
+  
+  Sspace *sspace = gc_get_sspace(gc);
+  POINTER_SIZE_INT total_live_obj = sspace_live_obj_num(sspace, TRUE);
   printf("Live obj after collection: %d\n", total_live_obj);
-  check_mark_cards();
+  check_and_clear_mark_cards();
 }
 
 /*
@@ -325,7 +385,7 @@
   
   for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
     if(chunk->status & CHUNK_ABNORMAL){
-      assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+      assert(chunk->status == CHUNK_ABNORMAL);
       assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
       Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
       assert(chunk->slot_size == vm_object_size(obj));
@@ -492,6 +552,8 @@
 static uint64 gc_start_time;
 static uint64 mark_start_time;
 static uint64 sweep_start_time;
+static uint64 compact_start_time;
+static uint64 fix_start_time;
 static uint64 merge_start_time;
 
 void sspace_gc_time(GC *gc, Boolean before_gc)
@@ -519,7 +581,7 @@
   }
 }
 
-void sspace_sweep_time(Boolean before_sweep)
+void sspace_sweep_time(Boolean before_sweep, Boolean sspace_need_compact)
 {
   assert(before_sweep == FALSE);
   if(before_sweep){
@@ -528,6 +590,35 @@
     uint64 end_time = tsc();
     assert(end_time > sweep_start_time);
     printf("\nSweep time: %dms\n", (end_time-sweep_start_time) / CPU_HZ);
+    if(sspace_need_compact)
+      compact_start_time = end_time;
+    else
+      merge_start_time = end_time;
+  }
+}
+
+void sspace_compact_time(Boolean before_compact)
+{
+  assert(before_compact == FALSE);
+  if(before_compact){
+    compact_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > compact_start_time);
+    printf("\nCompact time: %dms\n", (end_time-compact_start_time) / CPU_HZ);
+    fix_start_time = end_time;
+  }
+}
+
+void sspace_fix_time(Boolean before_fix)
+{
+  assert(before_fix == FALSE);
+  if(before_fix){
+    fix_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > fix_start_time);
+    printf("\nFix time: %dms\n", (end_time-fix_start_time) / CPU_HZ);
     merge_start_time = end_time;
   }
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h Thu Aug 16 21:33:02 2007
@@ -21,6 +21,7 @@
 #include "../common/gc_common.h"
 
 //#define SSPACE_VERIFY
+//#define SSPACE_VERIFY_FINREF
 //#define SSPACE_CHUNK_INFO
 //#define SSPACE_ALLOC_INFO
 //#define SSPACE_TIME
@@ -30,17 +31,23 @@
 void sspace_verify_init(GC *gc);
 void sspace_verify_alloc(void *addr, unsigned int size);
 void sspace_verify_vtable_mark(GC *gc);
-void sspace_verify_mark(void *addr, unsigned int size);
+void sspace_record_mark(void *addr, unsigned int size);
+void sspace_modify_mark_in_compact(void *new_addr, void *old_addr, unsigned int size);
+void sspace_verify_fix_in_compact(void);
 void sspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size);
+void sspace_verify_before_collection(GC *gc);
+void sspace_verify_after_sweep(GC *gc);
 void sspace_verify_after_collection(GC *gc);
 
-void sspace_chunks_info(Sspace *sspace, Boolean beore_gc);
+void sspace_chunks_info(Sspace *sspace, Boolean show_info);
 void sspace_alloc_info(unsigned int size);
 void sspace_alloc_info_summary(void);
 
 void sspace_gc_time(GC *gc, Boolean before_gc);
 void sspace_mark_time(Boolean before_mark);
-void sspace_sweep_time(Boolean before_sweep);
+void sspace_sweep_time(Boolean before_sweep, Boolean sspace_need_compact);
+void sspace_compact_time(Boolean before_compact);
+void sspace_fix_time(Boolean before_fix);
 void sspace_merge_time(Boolean before_merge);
 
 #endif // _SSPACE_VERIFY_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Thu Aug 16 21:33:02 2007
@@ -101,7 +101,7 @@
   collector_reset_weakref_sets(collector);
 #endif
 
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   /*For LOS_Shrink and LOS_Extend*/
   if(collector->gc->tuner->kind != TRANS_NOTHING){
     collector->non_los_live_obj_size = 0;
@@ -224,12 +224,37 @@
   return;
 }
 
+#include "../common/gc_common.h"
+#ifdef GC_GEN_STATS
+
+#include "../gen/gen_stats.h"
+
+void collector_init_stats(Collector* collector)
+{
+#ifndef USE_MARK_SWEEP_GC
+  gc_gen_collector_stats_initialize(collector);
+#endif
+}
+
+void collector_destruct_stats(Collector* collector)
+{
+#ifndef USE_MARK_SWEEP_GC
+  gc_gen_collector_stats_destruct(collector);
+#endif
+}
+
+#endif
+
 void collector_destruct(GC* gc) 
 {
+  TRACE2("gc.process", "GC: GC collectors destruct ...");
   for(unsigned int i=0; i<gc->num_collectors; i++)
   {
     Collector* collector = gc->collectors[i];
     collector_terminate_thread(collector);
+#ifdef GC_GEN_STATS
+    collector_destruct_stats(collector);
+#endif
     STD_FREE(collector);
    
   }
@@ -241,21 +266,12 @@
 
 unsigned int NUM_COLLECTORS = 0;
 
-struct GC_Gen;
-unsigned int gc_get_processor_num(GC_Gen*);
-#ifdef ONLY_SSPACE_IN_HEAP
-struct GC_MS;
-unsigned int gc_ms_get_processor_num(GC_MS *gc);
-#endif
-
 void collector_initialize(GC* gc)
 {
+  TRACE2("gc.process", "GC: GC collectors init ... \n");
+
   //FIXME::
-#ifndef ONLY_SSPACE_IN_HEAP
-  unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc);
-#else
-  unsigned int num_processors = gc_ms_get_processor_num((GC_MS*)gc);
-#endif
+  unsigned int num_processors = gc_get_processor_num(gc);
   
   unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); 
 
@@ -273,10 +289,14 @@
     collector->gc = gc;
     collector_init_thread(collector);
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
     collector_init_free_chunk_list(collector);
 #endif
-    
+
+#ifdef GC_GEN_STATS
+    collector_init_stats(collector);
+#endif
+
     gc->collectors[i] = collector;
   }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Thu Aug 16 21:33:02 2007
@@ -80,6 +80,12 @@
   POINTER_SIZE_INT los_live_obj_size;
   POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM];
   unsigned int result;
+
+  /*for collect statistics info*/
+#ifdef GC_GEN_STATS
+  void* stats;
+#endif
+
  
 }Collector;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Thu Aug 16 21:33:02 2007
@@ -54,6 +54,7 @@
   if(p_targ_obj == NULL){
     /* failed to forward an obj */
     collector->result = FALSE;
+    TRACE2("gc.collect", "failed to forward an obj, minor collection failed.");
     return NULL;
   }
     

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Thu Aug 16 21:33:02 2007
@@ -44,7 +44,7 @@
   else
     mutator->obj_with_fin = NULL;
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
   allocator_init_local_chunks((Allocator*)mutator);
 #endif
   
@@ -68,7 +68,7 @@
 
   alloc_context_reset((Allocator*)mutator);
 
-#ifdef ONLY_SSPACE_IN_HEAP
+#ifdef USE_MARK_SWEEP_GC
   allocactor_destruct_local_chunks((Allocator*)mutator);
 #endif
 
@@ -105,6 +105,7 @@
 
 void gc_reset_mutator_context(GC* gc)
 {
+  TRACE2("gc.process", "GC: reset mutator context  ...\n");
   Mutator *mutator = gc->mutator_list;
   while (mutator) {
     alloc_context_reset((Allocator*)mutator);    
@@ -122,6 +123,5 @@
   }  
   return;
 }
-
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp Thu Aug 16 21:33:02 2007
@@ -24,6 +24,10 @@
 #include "../mark_sweep/gc_ms.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 //#define GC_OBJ_SIZE_STATISTIC
 
 #ifdef GC_OBJ_SIZE_STATISTIC
@@ -73,11 +77,19 @@
   gc_alloc_statistic_obj_distrubution(size);
 #endif
 
-#ifndef ONLY_SSPACE_IN_HEAP
-  if ( size > GC_OBJ_SIZE_THRESHOLD )
+#ifndef USE_MARK_SWEEP_GC
+  if ( size > GC_OBJ_SIZE_THRESHOLD ){
     p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
-  else
-    p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
+#ifdef GC_GEN_STATS
+    if (p_obj != NULL){
+      GC_Gen* gc = (GC_Gen*)allocator->gc;
+      gc->stats->obj_num_los_alloc++;
+      gc->stats->total_size_los_alloc += size;
+    }
+#endif
+  }else{
+      p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
+  }
 #else
   p_obj = (Managed_Object_Handle)gc_ms_alloc(size, allocator);
 #endif
@@ -116,7 +128,7 @@
  
   /* Try to allocate an object from the current Thread Local Block */
   Managed_Object_Handle p_obj;
-#ifndef ONLY_SSPACE_IN_HEAP
+#ifndef USE_MARK_SWEEP_GC
   p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator);
 #else
   p_obj = (Managed_Object_Handle)gc_ms_fast_alloc(size, allocator);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Thu Aug 16 21:33:02 2007
@@ -185,6 +185,11 @@
 
 unsigned int mspace_free_block_idx;
 
+
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 /* world is stopped when starting fspace_collection */      
 void fspace_collection(Fspace *fspace)
 {
@@ -206,20 +211,30 @@
 
 #ifdef MARK_BIT_FLIPPING
     
-    case MINOR_NONGEN_FORWARD_POOL:
-      collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace);    
-      break;
+case MINOR_NONGEN_FORWARD_POOL:
+  TRACE2("gc.process", "GC: nongen_forward_pool algo start ... \n");
+  collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace);   
+  TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n");
+#ifdef GC_GEN_STATS
+  gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL);
+#endif
+  break;
         
 #endif /*#ifdef MARK_BIT_FLIPPING */
 
-    case MINOR_GEN_FORWARD_POOL:
-      collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace);
-      break;
+case MINOR_GEN_FORWARD_POOL:
+  TRACE2("gc.process", "gen_forward_pool algo start ... \n");
+  collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace);
+  TRACE2("gc.process", "\nGC: end of gen forward algo ... \n");
+#ifdef GC_GEN_STATS
+  gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL);
+#endif
+  break;
         
-    default:
-      printf("\nSpecified minor collection algorithm doesn't exist!\n");
-      exit(0);    
-      break;
+default:
+  DIE2("gc.collection","Specified minor collection algorithm doesn't exist!");
+  exit(0);    
+  break;
   }
 
   return; 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp Thu Aug 16 21:33:02 2007
@@ -48,6 +48,9 @@
 }
 
 /* FIXME:: the collection should be separated from the allocation */
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 void* fspace_alloc(unsigned size, Allocator *allocator) 
 {
   void*  p_return = NULL;
@@ -64,6 +67,11 @@
     /* after holding lock, try if other thread collected already */
     if ( !space_has_free_block((Blocked_Space*)fspace) ) {  
         if(attempts < 2) {
+#ifdef GC_GEN_STATS
+        GC_Gen* gc = (GC_Gen*)allocator->gc;
+        GC_Gen_Stats* stats = gc->stats;
+        gc_gen_update_nos_alloc_obj_stats(stats, fspace->committed_heap_size);
+#endif
           gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); 
           if(allocator->alloc_block){
             vm_gc_unlock_enum();  
@@ -85,6 +93,4 @@
   return p_return;
   
 }
-
-
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp Thu Aug 16 21:33:02 2007
@@ -26,6 +26,9 @@
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../common/compressed_ref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
 static FORCE_INLINE Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
 {
   assert(obj_belongs_to_nos(p_obj));  
@@ -114,9 +117,14 @@
     if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
       collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
     
-    if(obj_mark_in_oi(p_obj)) 
+    if(obj_mark_in_oi(p_obj)){
       scan_object(collector, p_obj);
-    
+#ifdef GC_GEN_STATS
+      GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+      gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
+#endif
+
+    }
     return;
   }
     
@@ -142,6 +150,13 @@
     return;
   }  
   /* otherwise, we successfully forwarded */
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+  gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
+  gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
+#endif
+
   write_slot(p_ref, p_target_obj);
 
 
@@ -170,6 +185,9 @@
 {
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
   
   unsigned int num_active_collectors = gc->num_active_collectors;
   atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
@@ -181,6 +199,8 @@
   Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
 
   /* first step: copy all root objects to trace tasks. */ 
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......");
   while(root_set){
     POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
@@ -189,6 +209,11 @@
       
       if(!*p_ref) continue;  /* root ref cann't be NULL, but remset can be */
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
+
+#ifdef GC_GEN_STATS
+      gc_gen_collector_update_rootset_ref_num(stats);
+#endif
+
       if(obj_belongs_to_nos(p_obj)){
         collector_tracestack_push(collector, p_ref);
       }
@@ -201,6 +226,10 @@
   /* second step: iterate over the trace tasks and forward objects */
   collector->trace_stack = free_task_pool_get_entry(metadata);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......");
+
 retry:
   Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
 
@@ -238,6 +267,7 @@
     atomic_dec32(&num_finished_collectors);
     goto retry;      
   }
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");
 
   /* now we are done, but each collector has a private stack that is empty */  
   trace_task = (Vector_Block*)collector->trace_stack;
@@ -256,7 +286,10 @@
   collector_trace_rootsets(collector);
   
   /* the rest work is not enough for parallelization, so let only one thread go */
-  if( collector->thread_handle != 0 ) return;
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) {
+    TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished");
+    return;
+  }
 
   gc->collect_result = gc_collection_result(gc);
   if(!gc->collect_result){
@@ -280,6 +313,8 @@
   gc_fix_rootset(collector);
   
   fspace_reset_for_allocation(space);  
+
+  TRACE2("gc.process", "GC: collector[0] finished");
 
   return;
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp Thu Aug 16 21:33:02 2007
@@ -26,6 +26,10 @@
 #include "../common/gc_metadata.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
 #ifdef MARK_BIT_FLIPPING
 
 static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
@@ -87,8 +91,13 @@
   Partial_Reveal_Object *p_obj = read_slot(p_ref);
 
   if(!obj_belongs_to_nos(p_obj)){
-    if(obj_mark_in_oi(p_obj))
+    if(obj_mark_in_oi(p_obj)){
+#ifdef GC_GEN_STATS
+      GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+      gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
+#endif
       scan_object(collector, p_obj);
+    }
     return;
   }
 
@@ -114,6 +123,12 @@
     return;
   }
   /* otherwise, we successfully forwarded */
+
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+  gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
+  gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
+#endif
   write_slot(p_ref, p_target_obj);
 
   scan_object(collector, p_target_obj); 
@@ -140,6 +155,9 @@
 {
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
   
   unsigned int num_active_collectors = gc->num_active_collectors;
   atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
@@ -151,6 +169,8 @@
   Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
 
   /* first step: copy all root objects to trace tasks. */ 
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
   while(root_set){
     POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
     while(!vector_block_iterator_end(root_set,iter)){
@@ -160,6 +180,10 @@
       assert(*p_ref);  /* root ref cann't be NULL, but remset can be */
 
       collector_tracestack_push(collector, p_ref);
+
+#ifdef GC_GEN_STATS    
+      gc_gen_collector_update_rootset_ref_num(stats);
+#endif
     } 
     root_set = pool_iterator_next(metadata->gc_rootset_pool);
   }
@@ -169,6 +193,10 @@
   /* second step: iterate over the trace tasks and forward objects */
   collector->trace_stack = free_task_pool_get_entry(metadata);
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");
+
 retry:
   Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
 
@@ -205,6 +233,8 @@
     goto retry; 
   }
 
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");
+
   /* now we are done, but each collector has a private stack that is empty */  
   trace_task = (Vector_Block*)collector->trace_stack;
   vector_stack_clear(trace_task);
@@ -221,8 +251,10 @@
   
   collector_trace_rootsets(collector);  
   /* the rest work is not enough for parallelization, so let only one thread go */
-  if( collector->thread_handle != 0 ) return;
-
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) {
+    TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished");
+    return;
+  }
   gc->collect_result = gc_collection_result(gc);
   if(!gc->collect_result){
 #ifndef BUILD_IN_REFERENT
@@ -245,6 +277,8 @@
   gc_fix_rootset(collector);
   
   fspace_reset_for_allocation(space);
+
+  TRACE2("gc.process", "GC: collector[0] finished");
 
   return;
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h Thu Aug 16 21:33:02 2007
@@ -30,6 +30,7 @@
 inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
 
 inline Boolean pool_is_empty(Pool* pool){ return sync_stack_is_empty(pool);}
+inline void pool_empty(Pool* pool) { sync_stack_empty(pool); }
 
 inline unsigned int pool_size(Pool* pool){ return sync_stack_size(pool); }
 
@@ -50,5 +51,6 @@
 inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
 
 #endif /* #ifndef _SYNC_POOL_H_ */
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h Thu Aug 16 21:33:02 2007
@@ -147,6 +147,12 @@
   return (stack_top_get_entry(stack->top) == NULL);
 }
 
+inline void sync_stack_empty(Sync_Stack* stack)
+{
+  stack->top = (Stack_Top)NULL;
+  stack->cur = NULL;
+}
+
 inline unsigned int sync_stack_size(Sync_Stack* stack)
 {
   unsigned int entry_count = 0;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp Thu Aug 16 21:33:02 2007
@@ -422,8 +422,3 @@
   heap_verifier->all_obj_scanner   = verifier_scan_all_objects;
 }
 
-
-
-
-
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp?view=diff&rev=566913&r1=566912&r2=566913
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp Thu Aug 16 21:33:02 2007
@@ -533,5 +533,3 @@
 void verifier_reset_hash_distance()
 { hash_obj_distance = 0;}
 
-
-



Mime
View raw message