harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r553050 [2/2] - in /harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc_gen/src/common/ vm/gc_gen/src/finalizer_weakref/ vm/gc_gen/src/gen/ vm/gc_gen/src/los/ vm/gc_gen/src/mark_compact/ vm/gc_gen/src/mark_sweep/ vm/gc_gen/src/thr...
Date Wed, 04 Jul 2007 03:01:03 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp Tue Jul  3 20:01:01 2007
@@ -0,0 +1,652 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_chunk.h"
+
+/* PFC stands for partially free chunk */
+#define SMALL_PFC_POOL_NUM    SMALL_LOCAL_CHUNK_NUM
+#define MEDIUM_PFC_POOL_NUM   MEDIUM_LOCAL_CHUNK_NUM
+#define LARGE_PFC_POOL_NUM    ((SUPER_OBJ_THRESHOLD - LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS)
+#define NUM_ALIGNED_FREE_CHUNK_BUCKET   (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT)
+#define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS)
+
+
+/* PFC stands for partially free chunk */
+static Pool  *small_pfc_pools[SMALL_PFC_POOL_NUM];
+static Pool  *medium_pfc_pools[MEDIUM_PFC_POOL_NUM];
+static Pool  *large_pfc_pools[LARGE_PFC_POOL_NUM];
+static Free_Chunk_List  aligned_free_chunk_lists[NUM_ALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List  unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List  hyper_free_chunk_list;
+
+static Boolean  small_pfc_steal_flags[SMALL_PFC_POOL_NUM];
+static Boolean  medium_pfc_steal_flags[MEDIUM_PFC_POOL_NUM];
+static Boolean  large_pfc_steal_flags[LARGE_PFC_POOL_NUM];
+
+void sspace_init_chunks(Sspace *sspace)
+{
+  unsigned int i;
+  
+  /* Init small obj partially free chunk pools */
+  for(i=SMALL_PFC_POOL_NUM; i--;){
+    small_pfc_steal_flags[i] = FALSE;
+    small_pfc_pools[i] = sync_pool_create();
+  }
+  
+  /* Init medium obj partially free chunk pools */
+  for(i=MEDIUM_PFC_POOL_NUM; i--;){
+    medium_pfc_steal_flags[i] = FALSE;
+    medium_pfc_pools[i] = sync_pool_create();
+  }
+  
+  /* Init large obj partially free chunk pools */
+  for(i=LARGE_PFC_POOL_NUM; i--;){
+    large_pfc_steal_flags[i] = FALSE;
+    large_pfc_pools[i] = sync_pool_create();
+  }
+  
+  /* Init aligned free chunk lists */
+  for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_init(&aligned_free_chunk_lists[i]);
+  
+  /* Init nonaligned free chunk lists */
+  for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_init(&unaligned_free_chunk_lists[i]);
+  
+  /* Init super free chunk lists */
+  free_chunk_list_init(&hyper_free_chunk_list);
+    
+  /* Init Sspace struct's chunk fields */
+  sspace->small_pfc_pools = small_pfc_pools;
+  sspace->medium_pfc_pools = medium_pfc_pools;
+  sspace->large_pfc_pools = large_pfc_pools;
+  sspace->aligned_free_chunk_lists = aligned_free_chunk_lists;
+  sspace->unaligned_free_chunk_lists = unaligned_free_chunk_lists;
+  sspace->hyper_free_chunk_list = &hyper_free_chunk_list;
+  
+  /* Init the first free chunk: from heap start to heap end */
+  Free_Chunk *free_chunk = (Free_Chunk*)sspace->heap_start;
+  free_chunk->adj_next = (Chunk_Heaer_Basic*)sspace->heap_end;
+  POINTER_SIZE_INT chunk_size = sspace->reserved_heap_size;
+  assert(chunk_size > CHUNK_GRANULARITY && !(chunk_size % CHUNK_GRANULARITY));
+  sspace_put_free_chunk(sspace, free_chunk);
+}
+
+static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, unsigned int &steal_flag)
+{
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pool);
+  while(chunk){
+    steal_threshold--;
+    if(!steal_threshold)
+      break;
+    chunk = chunk->next;
+  }
+  steal_flag = steal_threshold ? FALSE : TRUE;
+}
+
+static void empty_pool(Pool *pool)
+{
+  pool->top = (Stack_Top)NULL;
+  pool->cur = NULL;
+}
+
+void sspace_clear_chunk_list(GC *gc)
+{
+  unsigned int i;
+  unsigned int collector_num = gc->num_collectors;
+  unsigned int steal_threshold;
+  
+  steal_threshold = collector_num << SMALL_PFC_STEAL_THRESHOLD;
+  for(i=SMALL_PFC_POOL_NUM; i--;){
+    Pool *pool = small_pfc_pools[i];
+    pfc_pool_set_steal_flag(pool, steal_threshold, small_pfc_steal_flags[i]);
+    empty_pool(pool);
+  }
+  
+  steal_threshold = collector_num << MEDIUM_PFC_STEAL_THRESHOLD;
+  for(i=MEDIUM_PFC_POOL_NUM; i--;){
+    Pool *pool = medium_pfc_pools[i];
+    pfc_pool_set_steal_flag(pool, steal_threshold, medium_pfc_steal_flags[i]);
+    empty_pool(pool);
+  }
+  
+  steal_threshold = collector_num << LARGE_PFC_STEAL_THRESHOLD;
+  for(i=LARGE_PFC_POOL_NUM; i--;){
+    Pool *pool = large_pfc_pools[i];
+    pfc_pool_set_steal_flag(pool, steal_threshold, large_pfc_steal_flags[i]);
+    empty_pool(pool);
+  }
+  
+  for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_clear(&aligned_free_chunk_lists[i]);
+  
+  for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_clear(&unaligned_free_chunk_lists[i]);
+  
+  free_chunk_list_clear(&hyper_free_chunk_list);
+  
+  /* release small obj chunks of each mutator */
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    Chunk_Header **chunks = mutator->small_chunks;
+    for(i=SMALL_LOCAL_CHUNK_NUM; i--;)
+      chunks[i] = NULL;
+    chunks = mutator->medium_chunks;
+    for(i=MEDIUM_LOCAL_CHUNK_NUM; i--;)
+      chunks[i] = NULL;
+    mutator = mutator->next;
+  }
+}
+
+/* Simply put the free chunk to the according list
+ * Don't merge continuous free chunks
+ * The merging job is taken by sweeping
+ */
+static void list_put_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  chunk->status = CHUNK_FREE;
+  chunk->adj_prev = NULL;
+  chunk->prev = NULL;
+
+  lock(list->lock);
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  unlock(list->lock);
+}
+
+static Free_Chunk *free_list_get_head(Free_Chunk_List *list)
+{
+  lock(list->lock);
+  Free_Chunk *chunk = list->head;
+  if(chunk){
+    list->head = chunk->next;
+    if(list->head)
+      list->head->prev = NULL;
+    assert(list->chunk_num);
+    --list->chunk_num;
+    assert(chunk->status == CHUNK_FREE);
+  }
+  unlock(list->lock);
+  return chunk;
+}
+
+void sspace_put_free_chunk(Sspace *sspace, Free_Chunk *chunk)
+{
+  POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  if(chunk_size > HYPER_OBJ_THRESHOLD)
+    list_put_free_chunk(sspace->hyper_free_chunk_list, chunk);
+  else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+    list_put_free_chunk(&sspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+  else
+    list_put_free_chunk(&sspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+}
+
+static Free_Chunk *partition_normal_free_chunk(Sspace *sspace, Free_Chunk *chunk)
+{
+  assert(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES);
+  
+  Chunk_Heaer_Basic *adj_next = chunk->adj_next;
+  Free_Chunk *normal_chunk = (Free_Chunk*)(((POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES-1) & NORMAL_CHUNK_HIGH_MASK);
+  
+  if(chunk != normal_chunk){
+    assert(chunk < normal_chunk);
+    chunk->adj_next = (Chunk_Heaer_Basic*)normal_chunk;
+    sspace_put_free_chunk(sspace, chunk);
+  }
+  normal_chunk->adj_next = (Chunk_Heaer_Basic*)((POINTER_SIZE_INT)normal_chunk + NORMAL_CHUNK_SIZE_BYTES);
+  if(normal_chunk->adj_next != adj_next){
+    assert(normal_chunk->adj_next < adj_next);
+    Free_Chunk *back_chunk = (Free_Chunk*)normal_chunk->adj_next;
+    back_chunk->adj_next = adj_next;
+    sspace_put_free_chunk(sspace, back_chunk);
+  }
+  
+  normal_chunk->status = CHUNK_FREE;
+  return normal_chunk;
+}
+
+/* Partition the free chunk to two free chunks:
+ * the first one's size is chunk_size
+ * the second will be inserted into free chunk list according to its size
+ */
+static void partition_abnormal_free_chunk(Sspace *sspace,Free_Chunk *chunk, unsigned int chunk_size)
+{
+  assert(CHUNK_SIZE(chunk) > chunk_size);
+  
+  Free_Chunk *back_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk + chunk_size);
+  back_chunk->adj_next = chunk->adj_next;
+  chunk->adj_next = (Chunk_Heaer_Basic*)back_chunk;
+  sspace_put_free_chunk(sspace, back_chunk);
+}
+
+Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace)
+{
+  Free_Chunk_List *aligned_lists = sspace->aligned_free_chunk_lists;
+  Free_Chunk_List *unaligned_lists = sspace->unaligned_free_chunk_lists;
+  Free_Chunk_List *list = NULL;
+  Free_Chunk *chunk = NULL;
+  
+  /* Search in aligned chunk lists first */
+  unsigned int index = 0;
+  while(index < NUM_ALIGNED_FREE_CHUNK_BUCKET){
+    list = &aligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES)
+        chunk = partition_normal_free_chunk(sspace, chunk);
+      //zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    index++;
+  }
+  assert(!chunk);
+  
+  /* Search in unaligned chunk lists with larger chunk.
+     (NORMAL_CHUNK_SIZE_BYTES + (NORMAL_CHUNK_SIZE_BYTES-CHUNK_GRANULARITY))
+     is the smallest size which can guarantee the chunk includes a normal chunk.
+  */
+  index = UNALIGNED_CHUNK_SIZE_TO_INDEX((NORMAL_CHUNK_SIZE_BYTES<<1) - CHUNK_GRANULARITY);
+  while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      chunk = partition_normal_free_chunk(sspace, chunk);
+      assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+      //zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    index++;
+  }
+  assert(!chunk);
+  
+  /* search in the hyper free chunk list */
+  chunk = sspace_get_hyper_free_chunk(sspace, NORMAL_CHUNK_SIZE_BYTES, TRUE);
+  assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+  
+  return chunk;
+}
+
+Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size)
+{
+  assert(chunk_size > CHUNK_GRANULARITY);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+  
+  Free_Chunk_List *unaligned_lists = sspace->unaligned_free_chunk_lists;
+  Free_Chunk_List *list = NULL;
+  Free_Chunk *chunk = NULL;
+  unsigned int index = 0;
+  
+  /* Search in the list with chunk size of multiple chunk_size */
+  unsigned int search_size = chunk_size;
+  while(search_size <= HYPER_OBJ_THRESHOLD){
+    index = UNALIGNED_CHUNK_SIZE_TO_INDEX(search_size);
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(search_size > chunk_size)
+        partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+      zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    search_size += chunk_size;
+  }
+  assert(!chunk);
+  
+  /* search in the hyper free chunk list */
+  chunk = sspace_get_hyper_free_chunk(sspace, chunk_size, FALSE);
+  if(chunk) return chunk;
+  
+  /* Search again in abnormal chunk lists */
+  index = UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size);
+  while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(index > UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size))
+        partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+      zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    ++index;
+  }
+  
+  return chunk;
+}
+
+Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk)
+{
+  assert(chunk_size >= CHUNK_GRANULARITY);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  Free_Chunk_List *list = sspace->hyper_free_chunk_list;
+  lock(list->lock);
+  Free_Chunk **p_next = &list->head;
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+    if(CHUNK_SIZE(chunk) >= chunk_size){
+      Free_Chunk *next_chunk = chunk->next;
+      *p_next = next_chunk;
+      if(next_chunk){
+        if(chunk != list->head)
+          next_chunk->prev = (Free_Chunk *)p_next;  /* utilize an assumption: next is the first field of Free_Chunk */
+        else
+          next_chunk->prev = NULL;
+      }
+      break;
+    }
+    p_next = &chunk->next;
+    chunk = chunk->next;
+  }
+  unlock(list->lock);
+  
+  if(chunk){
+    if(is_normal_chunk)
+      chunk = partition_normal_free_chunk(sspace, chunk);
+    else if(CHUNK_SIZE(chunk) > chunk_size)
+      partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+    if(!is_normal_chunk)
+      zeroing_free_chunk(chunk);
+  }
+  
+  return chunk;
+}
+
+#define min_value(x, y) (((x) < (y)) ? (x) : (y))
+
+Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index)
+{
+  Chunk_Header *pfc = NULL;
+  unsigned int max_index = min_value(index + SMALL_PFC_STEAL_NUM + 1, SMALL_PFC_POOL_NUM);
+  ++index;
+  for(; index < max_index; ++index){
+    if(!small_pfc_steal_flags[index]) continue;
+    pfc = sspace_get_small_pfc(sspace, index);
+    if(pfc) return pfc;
+  }
+  return NULL;
+}
+Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index)
+{
+  Chunk_Header *pfc = NULL;
+  unsigned int max_index = min_value(index + MEDIUM_PFC_STEAL_NUM + 1, MEDIUM_PFC_POOL_NUM);
+  ++index;
+  for(; index < max_index; ++index){
+    if(!medium_pfc_steal_flags[index]) continue;
+    pfc = sspace_get_medium_pfc(sspace, index);
+    if(pfc) return pfc;
+  }
+  return NULL;
+}
+Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index)
+{
+  Chunk_Header *pfc = NULL;
+  unsigned int max_index = min_value(index + LARGE_PFC_STEAL_NUM + 1, LARGE_PFC_POOL_NUM);
+  ++index;
+  for(; index < max_index; ++index){
+    if(!large_pfc_steal_flags[index]) continue;
+    pfc = sspace_get_large_pfc(sspace, index);
+    if(pfc) return pfc;
+  }
+  return NULL;
+}
+
+/* Because this computation doesn't use lock, its result is not accurate. And it is enough. */
+POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace)
+{
+  POINTER_SIZE_INT free_size = 0;
+  
+  vm_gc_lock_enum();
+  
+  for(unsigned int i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += NORMAL_CHUNK_SIZE_BYTES * (i+1) * sspace->aligned_free_chunk_lists[i].chunk_num;
+  
+  for(unsigned int i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += CHUNK_GRANULARITY * (i+1) * sspace->unaligned_free_chunk_lists[i].chunk_num;
+  
+  Free_Chunk *hyper_chunk = sspace->hyper_free_chunk_list->head;
+  while(hyper_chunk){
+    free_size += CHUNK_SIZE(hyper_chunk);
+    hyper_chunk = hyper_chunk->next;
+  }
+  
+  vm_gc_unlock_enum();
+  
+  return free_size;
+}
+
+
+#ifdef SSPACE_CHUNK_INFO
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+static POINTER_SIZE_INT free_mem_size;
+
+static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+{
+  unsigned int count = 0;
+  
+  while(word){
+    word &= word - 1;
+    ++count;
+  }
+  return count;
+}
+
+static unsigned int pfc_info(Chunk_Header *chunk, Boolean before_gc)
+{
+  POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int live_num = 0;
+  
+  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    table[i] &= alloc_mask_in_table;
+    unsigned int live_num_in_word = (table[i] == alloc_mask_in_table) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
+    live_num += live_num_in_word;
+  }
+  if(before_gc){
+    unsigned int slot_num_in_last_word = slot_num % SLOT_NUM_PER_WORD_IN_TABLE;
+    if(slot_num_in_last_word){
+      unsigned int fake_live_num_in_last_word = SLOT_NUM_PER_WORD_IN_TABLE - slot_num_in_last_word;
+      assert(live_num >= fake_live_num_in_last_word);
+      live_num -= fake_live_num_in_last_word;
+    }
+  }
+  assert(live_num <= slot_num);
+  return live_num;
+}
+
+enum Obj_Type {
+  SMALL_OBJ,
+  MEDIUM_OBJ,
+  LARGE_OBJ
+};
+static unsigned int index_to_size(unsigned int index, Obj_Type type)
+{
+  if(type == SMALL_OBJ)
+    return SMALL_INDEX_TO_SIZE(index);
+  if(type == MEDIUM_OBJ)
+    return MEDIUM_INDEX_TO_SIZE(index);
+  assert(type == LARGE_OBJ);
+  return LARGE_INDEX_TO_SIZE(index);
+}
+
+static void pfc_pools_info(Sspace *sspace, Pool **pools, unsigned int pool_num, Obj_Type type, Boolean before_gc)
+{
+  unsigned int index;
+  
+  for(index = 0; index < pool_num; ++index){
+    Pool *pool = pools[index];
+    Chunk_Header *chunk = NULL;
+    unsigned int chunk_counter = 0;
+    unsigned int slot_num = 0;
+    unsigned int live_num = 0;
+    pool_iterator_init(pool);
+    while(chunk = (Chunk_Header*)pool_iterator_next(pool)){
+      ++chunk_counter;
+      slot_num += chunk->slot_num;
+      live_num += pfc_info(chunk, before_gc);
+    }
+    if(slot_num){
+      printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", index_to_size(index, type), chunk_counter, live_num, slot_num, (float)live_num/slot_num);
+      assert(live_num < slot_num);
+      free_mem_size += index_to_size(index, type) * (slot_num-live_num);
+      assert(free_mem_size < sspace->committed_heap_size);
+    }
+  }
+}
+
+enum Chunk_Type {
+  ALIGNED_CHUNK,
+  UNALIGNED_CHUNK
+};
+static unsigned int chunk_index_to_size(unsigned int index, Chunk_Type type)
+{
+  if(type == ALIGNED_CHUNK)
+    return ALIGNED_CHUNK_INDEX_TO_SIZE(index);
+  assert(type == UNALIGNED_CHUNK);
+  return UNALIGNED_CHUNK_INDEX_TO_SIZE(index);
+}
+
+static void free_lists_info(Sspace *sspace, Free_Chunk_List *lists, unsigned int list_num, Chunk_Type type)
+{
+  unsigned int index;
+  
+  for(index = 0; index < list_num; ++index){
+    Free_Chunk *chunk = lists[index].head;
+    unsigned int chunk_counter = 0;
+    while(chunk){
+      ++chunk_counter;
+      unsigned int chunk_size = CHUNK_SIZE(chunk);
+      assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+      free_mem_size += chunk_size;
+      assert(free_mem_size < sspace->committed_heap_size);
+      chunk = chunk->next;
+    }
+    printf("Free Size: %x\tnum: %d\n", chunk_index_to_size(index, type), chunk_counter);
+  }
+}
+
+void sspace_chunks_info(Sspace *sspace, Boolean before_gc)
+{
+  if(!before_gc) return;
+  
+  printf("\n\nSMALL PFC INFO:\n\n");
+  pfc_pools_info(sspace, small_pfc_pools, SMALL_PFC_POOL_NUM, SMALL_OBJ, before_gc);
+  
+  printf("\n\nMEDIUM PFC INFO:\n\n");
+  pfc_pools_info(sspace, medium_pfc_pools, MEDIUM_PFC_POOL_NUM, MEDIUM_OBJ, before_gc);
+  
+  printf("\n\nLARGE PFC INFO:\n\n");
+  pfc_pools_info(sspace, large_pfc_pools, LARGE_PFC_POOL_NUM, LARGE_OBJ, before_gc);
+  
+  printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
+  free_lists_info(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, ALIGNED_CHUNK);
+  
+  printf("\n\nUNALIGNED FREE CHUNK INFO:\n\n");
+  free_lists_info(sspace, unaligned_free_chunk_lists, NUM_UNALIGNED_FREE_CHUNK_BUCKET, UNALIGNED_CHUNK);
+  
+  printf("\n\nSUPER FREE CHUNK INFO:\n\n");
+  Free_Chunk_List *list = &hyper_free_chunk_list;
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+    printf("Size: %x\n", CHUNK_SIZE(chunk));
+    free_mem_size += CHUNK_SIZE(chunk);
+    assert(free_mem_size < sspace->committed_heap_size);
+    chunk = chunk->next;
+  }
+  printf("\n\nFree mem ratio: %f\n\n", (float)free_mem_size / sspace->committed_heap_size);
+  free_mem_size = 0;
+}
+
+#endif
+
+#ifdef SSPACE_ALLOC_INFO
+
+#define MEDIUM_THRESHOLD 256
+#define LARGE_THRESHOLD (1024)
+#define SUPER_THRESHOLD (6*KB)
+#define HYPER_THRESHOLD (64*KB)
+
+#define SMALL_OBJ_ARRAY_NUM  (MEDIUM_THRESHOLD >> 2)
+#define MEDIUM_OBJ_ARRAY_NUM (LARGE_THRESHOLD >> 4)
+#define LARGE_OBJ_ARRAY_NUM  (SUPER_THRESHOLD >> 6)
+#define SUPER_OBJ_ARRAY_NUM  (HYPER_THRESHOLD >> 10)
+
+volatile unsigned int small_obj_num[SMALL_OBJ_ARRAY_NUM];
+volatile unsigned int medium_obj_num[MEDIUM_OBJ_ARRAY_NUM];
+volatile unsigned int large_obj_num[LARGE_OBJ_ARRAY_NUM];
+volatile unsigned int super_obj_num[SUPER_OBJ_ARRAY_NUM];
+volatile unsigned int hyper_obj_num;
+
+void sspace_alloc_info(unsigned int size)
+{
+  if(size <= MEDIUM_THRESHOLD)
+    atomic_inc32(&small_obj_num[(size>>2)-1]);
+  else if(size <= LARGE_THRESHOLD)
+    atomic_inc32(&medium_obj_num[(size>>4)-1]);
+  else if(size <= SUPER_THRESHOLD)
+    atomic_inc32(&large_obj_num[(size>>6)-1]);
+  else if(size <= HYPER_THRESHOLD)
+    atomic_inc32(&super_obj_num[(size>>10)-1]);
+  else
+    atomic_inc32(&hyper_obj_num);
+}
+
+void sspace_alloc_info_summary(void)
+{
+  unsigned int i;
+  
+  printf("\n\nNORMAL OBJ\n\n");
+  for(i = 0; i < SMALL_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<2, small_obj_num[i]);
+    small_obj_num[i] = 0;
+  }
+  
+  i = ((MEDIUM_THRESHOLD + (1<<4))>>4) - 1;
+  for(; i < MEDIUM_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<4, medium_obj_num[i]);
+    medium_obj_num[i] = 0;
+  }
+  
+  i = ((LARGE_THRESHOLD + (1<<6))>>6) - 1;
+  for(; i < LARGE_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<6, large_obj_num[i]);
+    large_obj_num[i] = 0;
+  }
+  
+  i = ((SUPER_THRESHOLD + (1<<10))>>10) - 1;
+  for(; i < SUPER_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<10, super_obj_num[i]);
+    super_obj_num[i] = 0;
+  }
+  
+  printf("\n\nHYPER OBJ\n\n");
+  printf("num: %d\n", hyper_obj_num);
+  hyper_obj_num = 0;
+}
+
+#endif

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h Tue Jul  3 20:01:01 2007
@@ -0,0 +1,338 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_CHUNK_H_
+#define _SSPACE_CHUNK_H_
+
+#include "sspace.h"
+
+enum Chunk_Status {
+  CHUNK_NIL = 0,
+  CHUNK_FREE = 0x1,
+  CHUNK_IN_USE = 0x2,
+  CHUNK_USED = 0x4,
+  CHUNK_NORMAL = 0x10,
+  CHUNK_ABNORMAL = 0x20,
+  CHUNK_NEED_ZEROING = 0x100
+};
+
+typedef volatile POINTER_SIZE_INT Chunk_Status_t;
+
+typedef struct Chunk_Heaer_Basic {
+  Chunk_Heaer_Basic *next;
+  Chunk_Status_t status;
+  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
+} Chunk_Heaer_Basic;
+
+typedef struct Chunk_Header {
+  /* Beginning of Chunk_Heaer_Basic */
+  Chunk_Header *next;           /* pointing to the next pfc in the pfc pool */
+  Chunk_Status_t status;
+  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
+  /* End of Chunk_Heaer_Basic */
+  void *base;
+  unsigned int slot_size;
+  unsigned int slot_num;
+  unsigned int slot_index;      /* the index of which is the first free slot in this chunk */
+  POINTER_SIZE_INT table[1];
+} Chunk_Header;
+
+
+#define NORMAL_CHUNK_SHIFT_COUNT    16
+#define NORMAL_CHUNK_SIZE_BYTES     (1 << NORMAL_CHUNK_SHIFT_COUNT)
+#define NORMAL_CHUNK_LOW_MASK       ((POINTER_SIZE_INT)(NORMAL_CHUNK_SIZE_BYTES - 1))
+#define NORMAL_CHUNK_HIGH_MASK      (~NORMAL_CHUNK_LOW_MASK)
+#define NORMAL_CHUNK_HEADER(addr)   ((Chunk_Header*)((POINTER_SIZE_INT)(addr) & NORMAL_CHUNK_HIGH_MASK))
+#define ABNORMAL_CHUNK_HEADER(addr) ((Chunk_Header*)((POINTER_SIZE_INT)addr & CHUNK_GRANULARITY_HIGH_MASK))
+
+#define MAX_SLOT_INDEX 0xFFffFFff
+#define COLOR_BITS_PER_OBJ 2   // should be powers of 2
+#define SLOT_NUM_PER_WORD_IN_TABLE  (BITS_PER_WORD /COLOR_BITS_PER_OBJ)
+
+/* Two equations:
+ * 1. CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES + slot_size*NORMAL_CHUNK_SLOT_NUM = NORMAL_CHUNK_SIZE_BYTES
+ * 2. (BITS_PER_BYTE * NORMAL_CHUNK_TABLE_SIZE_BYTES)/COLOR_BITS_PER_OBJ >= NORMAL_CHUNK_SLOT_NUM
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM <= BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM = BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ */
+
+#define CHUNK_HEADER_VARS_SIZE_BYTES      ((POINTER_SIZE_INT)&(((Chunk_Header*)0)->table))
+#define NORMAL_CHUNK_SLOT_AREA_SIZE_BITS  (BITS_PER_BYTE * (NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES))
+#define SIZE_BITS_PER_SLOT(chunk)         (BITS_PER_BYTE * chunk->slot_size + COLOR_BITS_PER_OBJ)
+
+#define NORMAL_CHUNK_SLOT_NUM(chunk)          (NORMAL_CHUNK_SLOT_AREA_SIZE_BITS / SIZE_BITS_PER_SLOT(chunk))
+#define NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk)  (((NORMAL_CHUNK_SLOT_NUM(chunk) + SLOT_NUM_PER_WORD_IN_TABLE-1) / SLOT_NUM_PER_WORD_IN_TABLE) * BYTES_PER_WORD)
+#define NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk) (CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk))
+
+#define NORMAL_CHUNK_BASE(chunk)    ((void*)((POINTER_SIZE_INT)(chunk) + NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk)))
+#define ABNORMAL_CHUNK_BASE(chunk)  ((void*)((POINTER_SIZE_INT)(chunk) + sizeof(Chunk_Header)))
+
+#define CHUNK_END(chunk)  ((chunk)->adj_next)
+#define CHUNK_SIZE(chunk) ((POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk)
+
+inline void *slot_index_to_addr(Chunk_Header *chunk, unsigned int index)
+{ return (void*)((POINTER_SIZE_INT)chunk->base + chunk->slot_size * index); }
+
+inline unsigned int slot_addr_to_index(Chunk_Header *chunk, void *addr)
+{ return (unsigned int)(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) / chunk->slot_size); }
+
+typedef struct Free_Chunk {
+  /* Beginning of Chunk_Heaer_Basic */
+  Free_Chunk *next;             /* pointing to the next free Free_Chunk */
+  Chunk_Status_t status;
+  Chunk_Heaer_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  Chunk_Heaer_Basic *adj_next;  // adjacent next chunk
+  /* End of Chunk_Heaer_Basic */
+  Free_Chunk *prev;             /* pointing to the prev free Free_Chunk */
+} Free_Chunk;
+
+typedef struct Free_Chunk_List {
+  Free_Chunk *head;  /* get new free chunk from head */
+  Free_Chunk *tail;  /* put free chunk to tail */
+  unsigned int chunk_num;
+  SpinLock lock;
+} Free_Chunk_List;
+
+/*
+typedef union Chunk{
+  Chunk_Header   header;
+  Free_Chunk     free_chunk;
+  unsigned char  raw_bytes[NORMAL_CHUNK_SIZE_BYTES];
+} Chunk;
+*/
+
+inline void free_chunk_list_init(Free_Chunk_List *list)
+{
+  list->head = NULL;
+  list->tail = NULL;
+  list->chunk_num = 0;
+  list->lock = FREE_LOCK;
+}
+
+inline void free_chunk_list_clear(Free_Chunk_List *list)
+{
+  list->head = NULL;
+  list->tail = NULL;
+  list->chunk_num = 0;
+  assert(list->lock == FREE_LOCK);
+}
+
+/* Padding the last index word in table to facilitate allocation */
+inline void chunk_pad_last_index_word(Chunk_Header *chunk, POINTER_SIZE_INT alloc_mask)
+{
+  unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+  if(!ceiling_index_in_last_word)
+    return;
+  POINTER_SIZE_INT padding_mask = ~((1 << ceiling_index_in_last_word) - 1);
+  padding_mask &= alloc_mask;
+  unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  chunk->table[last_word_index] |= padding_mask;
+}
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+/* Used for allocating a fixed-size chunk from free area lists */
+inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size)
+{
+  assert(chunk->status == CHUNK_FREE);
+  assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES);
+  
+  chunk->next = NULL;
+  chunk->status = CHUNK_NEED_ZEROING;
+  chunk->slot_size = slot_size;
+  chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk);
+  chunk->slot_index = 0;
+  chunk->base = NORMAL_CHUNK_BASE(chunk);
+  memset(chunk->table, 0, NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk));//memset table
+  chunk_pad_last_index_word(chunk, alloc_mask_in_table);
+}
+
+/* Used for allocating a chunk for large object from free area lists */
+inline void abnormal_chunk_init(Chunk_Header *chunk, unsigned int chunk_size, unsigned int obj_size)
+{
+  assert(chunk->status == CHUNK_FREE);
+  assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size);
+  
+  chunk->next = NULL;
+  chunk->status = CHUNK_NIL;
+  chunk->slot_size = obj_size;
+  chunk->slot_num = 1;
+  chunk->slot_index = 0;
+  chunk->base = ABNORMAL_CHUNK_BASE(chunk);
+}
+
+
+#ifdef POINTER64
+  #define GC_OBJECT_ALIGNMENT_BITS    3
+#else
+  #define GC_OBJECT_ALIGNMENT_BITS    2
+#endif
+
+#define MEDIUM_OBJ_THRESHOLD  (128)
+#define LARGE_OBJ_THRESHOLD   (256)
+#define SUPER_OBJ_THRESHOLD   (1024)
+#define HYPER_OBJ_THRESHOLD   (128*KB)
+
+#define SMALL_GRANULARITY_BITS  (GC_OBJECT_ALIGNMENT_BITS)
+#define MEDIUM_GRANULARITY_BITS (SMALL_GRANULARITY_BITS + 1)
+#define LARGE_GRANULARITY_BITS  7
+#define CHUNK_GRANULARITY_BITS  10
+
+#define SMALL_GRANULARITY       (1 << SMALL_GRANULARITY_BITS)
+#define MEDIUM_GRANULARITY      (1 << MEDIUM_GRANULARITY_BITS)
+#define LARGE_GRANULARITY       (1 << LARGE_GRANULARITY_BITS)
+#define CHUNK_GRANULARITY       (1 << CHUNK_GRANULARITY_BITS)
+
+#define SMALL_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(SMALL_GRANULARITY-1))
+#define SMALL_GRANULARITY_HIGH_MASK   (~SMALL_GRANULARITY_LOW_MASK)
+#define MEDIUM_GRANULARITY_LOW_MASK   ((POINTER_SIZE_INT)(MEDIUM_GRANULARITY-1))
+#define MEDIUM_GRANULARITY_HIGH_MASK  (~MEDIUM_GRANULARITY_LOW_MASK)
+#define LARGE_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(LARGE_GRANULARITY-1))
+#define LARGE_GRANULARITY_HIGH_MASK   (~LARGE_GRANULARITY_LOW_MASK)
+#define CHUNK_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(CHUNK_GRANULARITY-1))
+#define CHUNK_GRANULARITY_HIGH_MASK   (~CHUNK_GRANULARITY_LOW_MASK)
+
+#define SMALL_LOCAL_CHUNK_NUM   (MEDIUM_OBJ_THRESHOLD >> SMALL_GRANULARITY_BITS)
+#define MEDIUM_LOCAL_CHUNK_NUM  ((LARGE_OBJ_THRESHOLD - MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS)
+
+#define SMALL_SIZE_ROUNDUP(size)    (size)
+#define MEDIUM_SIZE_ROUNDUP(size)   (((size) + MEDIUM_GRANULARITY-1) & MEDIUM_GRANULARITY_HIGH_MASK)
+#define LARGE_SIZE_ROUNDUP(size)    (((size) + LARGE_GRANULARITY-1) & LARGE_GRANULARITY_HIGH_MASK)
+#define SUPER_OBJ_TOTAL_SIZE(size)  (sizeof(Chunk_Header) + (size))
+#define SUPER_SIZE_ROUNDUP(size)    ((SUPER_OBJ_TOTAL_SIZE(size) + CHUNK_GRANULARITY-1) & CHUNK_GRANULARITY_HIGH_MASK)
+
+#define SMALL_SIZE_TO_INDEX(size)   (((size) >> SMALL_GRANULARITY_BITS) - 1)
+#define MEDIUM_SIZE_TO_INDEX(size)  ((((size)-MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS) - 1)
+#define LARGE_SIZE_TO_INDEX(size)   ((((size)-LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS) - 1)
+#define ALIGNED_CHUNK_SIZE_TO_INDEX(size)     (((size) >> NORMAL_CHUNK_SHIFT_COUNT) - 1)
+#define UNALIGNED_CHUNK_SIZE_TO_INDEX(size)   (((size) >> CHUNK_GRANULARITY_BITS) - 1)
+
+#define SMALL_INDEX_TO_SIZE(index)  (((index) + 1) << SMALL_GRANULARITY_BITS)
+#define MEDIUM_INDEX_TO_SIZE(index) ((((index) + 1) << MEDIUM_GRANULARITY_BITS) + MEDIUM_OBJ_THRESHOLD)
+#define LARGE_INDEX_TO_SIZE(index)  ((((index) + 1) << LARGE_GRANULARITY_BITS) + LARGE_OBJ_THRESHOLD)
+#define ALIGNED_CHUNK_INDEX_TO_SIZE(index)    (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT)
+#define UNALIGNED_CHUNK_INDEX_TO_SIZE(index)  (((index) + 1) << CHUNK_GRANULARITY_BITS)
+
+#define SMALL_PFC_STEAL_NUM   3
+#define MEDIUM_PFC_STEAL_NUM  3
+#define LARGE_PFC_STEAL_NUM   3
+
+#define SMALL_PFC_STEAL_THRESHOLD   3
+#define MEDIUM_PFC_STEAL_THRESHOLD  3
+#define LARGE_PFC_STEAL_THRESHOLD   3
+
+
+inline Chunk_Header *sspace_get_small_pfc(Sspace *sspace, unsigned int index)
+{
+  Pool *pfc_pool = sspace->small_pfc_pools[index];
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+  return chunk;
+}
+inline void sspace_put_small_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+  assert(chunk);
+  
+  Pool *pfc_pool = sspace->small_pfc_pools[index];
+  pool_put_entry(pfc_pool, chunk);
+}
+
+inline Chunk_Header *sspace_get_medium_pfc(Sspace *sspace, unsigned int index)
+{
+  Pool *pfc_pool = sspace->medium_pfc_pools[index];
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+  return chunk;
+}
+inline void sspace_put_medium_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+  assert(chunk);
+  
+  Pool *pfc_pool = sspace->medium_pfc_pools[index];
+  pool_put_entry(pfc_pool, chunk);
+}
+
+inline Chunk_Header *sspace_get_large_pfc(Sspace *sspace, unsigned int index)
+{
+  Pool *pfc_pool = sspace->large_pfc_pools[index];
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+  return chunk;
+}
+inline void sspace_put_large_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+  assert(chunk);
+  
+  Pool *pfc_pool = sspace->large_pfc_pools[index];
+  pool_put_entry(pfc_pool, chunk);
+}
+
+/*
+inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int size)
+{
+  assert(size <= SUPER_OBJ_THRESHOLD);
+  
+  if(size > LARGE_OBJ_THRESHOLD)
+    return sspace_get_large_pfc(sspace, size);
+  else if(size > MEDIUM_OBJ_THRESHOLD)
+    return sspace_get_medium_pfc(sspace, size);
+  return sspace_get_small_pfc(sspace, size);
+}
+*/
+
+inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int size)
+{
+  assert(size <= SUPER_OBJ_THRESHOLD);
+  
+  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  unsigned int index;
+  
+  if(size > LARGE_OBJ_THRESHOLD){
+    assert(!(size & LARGE_GRANULARITY_LOW_MASK));
+    assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD));
+    index = LARGE_SIZE_TO_INDEX(size);
+    sspace_put_large_pfc(sspace, chunk, index);
+  } else if(size > MEDIUM_OBJ_THRESHOLD){
+    assert(!(size & MEDIUM_GRANULARITY_LOW_MASK));
+    assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD));
+    index = MEDIUM_SIZE_TO_INDEX(size);
+    sspace_put_medium_pfc(sspace, chunk, index);
+  } else {
+    assert(!(size & SMALL_GRANULARITY_LOW_MASK));
+    assert(size <= MEDIUM_OBJ_THRESHOLD);
+    index = SMALL_SIZE_TO_INDEX(size);
+    sspace_put_small_pfc(sspace, chunk, index);
+  }
+}
+
+
+extern void sspace_init_chunks(Sspace *sspace);
+extern void sspace_clear_chunk_list(GC *gc);
+extern void sspace_put_free_chunk(Sspace *sspace, Free_Chunk *chunk);
+extern Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace);
+extern Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size);
+extern Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk);
+extern Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index);
+extern Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index);
+extern Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index);
+
+extern void zeroing_free_chunk(Free_Chunk *chunk);
+
+
+#endif //#ifndef _SSPACE_CHUNK_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp Tue Jul  3 20:01:01 2007
@@ -0,0 +1,176 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  assert(address_belongs_to_gc_heap(p_obj, collector->gc));
+  if(obj_mark_in_table(p_obj)){
+    assert(p_obj);
+    collector_tracestack_push(collector, p_obj);
+  }
+}
+
+static FORCE_INLINE void scan_object(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  if(!object_has_ref_field(p_obj)) return;
+  
+  REF *p_ref;
+  
+  if(object_is_array(p_obj)){   /* scan array object */
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for (unsigned int i = 0; i < array_length; i++)
+      scan_slot(collector, p_ref+i);
+    
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+  
+  for(unsigned int i=0; i<num_refs; i++){
+    p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+    scan_slot(collector, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+
+}
+
+static void trace_object(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  scan_object(collector, p_obj);
+  
+  Vector_Block *trace_stack = collector->trace_stack;
+  while(!vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
+    scan_object(collector, p_obj);
+    trace_stack = collector->trace_stack;
+  }
+}
+
+/* NOTE:: This is another marking version: marking in color bitmap table.
+   Originally, we have to mark the object before put it into markstack, to
+   guarantee there is only one occurrance of an object in markstack. This is to
+   guarantee there is only one occurrance of a repointed ref slot in repset (they
+   are put to the set when the object is scanned). If the same object is put to
+   markstack twice, they will be scanned twice and their ref slots will be recorded twice.
+   Problem occurs when the ref slot is updated first time with new position,
+   the second time the value in the ref slot is not the old position as expected.
+   It needs to read the original obj header for forwarding pointer. With the new value,
+   it will read something nonsense since the obj is not moved yet.
+   This can be worked around if we want.
+   To do this we have to use atomic instruction for marking, which is undesirable.
+   So we abondoned this design. We no longer use the repset to remember repointed slots.
+*/
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+void sspace_mark_scan(Collector *collector)
+{
+  GC *gc = collector->gc;
+  GC_Metadata *metadata = gc->metadata;
+  
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32(&num_finished_collectors, 0, num_active_collectors);
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  /* first step: copy all root objects to mark tasks.
+     FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj!=NULL);
+      /* we have to mark the object before putting it into marktask, because
+         it is possible to have two slots containing a same object. They will
+         be scanned twice and their ref slots will be recorded twice. Problem
+         occurs after the ref slot is updated first time with new position
+         and the second time the value is the ref slot is the old position as expected.
+         This can be worked around if we want.
+      */
+      assert(address_belongs_to_gc_heap(p_obj, gc));
+      if(obj_mark_in_table(p_obj))
+        collector_tracestack_push(collector, p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+      
+      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
+         degenerate my stack into mark_task, and grab another mark_task */
+      trace_object(collector, p_obj);
+    }
+   /* run out one task, put back to the pool and grab another task */
+   vector_stack_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: We can simply spin waiting for num_finished_collectors, because each
+     generated new task would surely be processed by its generating collector eventually.
+     So code below is only for load balance optimization. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if(!pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;
+    }
+  }
+  
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+  collector->trace_stack = NULL;
+  
+  return;
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp Tue Jul  3 20:01:01 2007
@@ -0,0 +1,127 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_mark_sweep.h"
+#include "sspace_verify.h"
+#include "../gen/gen.h"
+#include "../thread/collector.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+
+POINTER_SIZE_INT alloc_mask_in_table = ~BLACK_MASK_IN_TABLE;
+POINTER_SIZE_INT mark_mask_in_table = BLACK_MASK_IN_TABLE;
+POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE;
+POINTER_SIZE_INT cur_mark_color = OBJ_COLOR_BLACK;
+
+static void ops_color_flip(void)
+{
+  uint32 temp = cur_alloc_color;
+  cur_alloc_color = cur_mark_color;
+  cur_mark_color = temp;
+  alloc_mask_in_table = ~alloc_mask_in_table;
+  mark_mask_in_table = ~mark_mask_in_table;
+}
+
+void collector_init_free_chunk_list(Collector *collector)
+{
+  Free_Chunk_List *list = (Free_Chunk_List*)STD_MALLOC(sizeof(Free_Chunk_List));
+  free_chunk_list_init(list);
+  collector->free_chunk_list = list;
+}
+
+extern Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
+static void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace)
+{
+  next_chunk_for_sweep = (Chunk_Heaer_Basic*)space_heap_start((Space*)sspace);
+  next_chunk_for_sweep->adj_prev = NULL;
+  
+  unsigned int i = gc->num_collectors;
+  while(i--){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    assert(!list->head);
+    assert(!list->tail);
+    assert(list->lock == FREE_LOCK);
+  }
+}
+
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_sweeping_collectors = 0;
+
+void mark_sweep_sspace(Collector *collector)
+{
+  GC *gc = collector->gc;
+  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: **************************************************
+     mark all live objects in heap ****************************/
+  unsigned int old_num = atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
+  
+  sspace_mark_scan(collector);
+  
+  old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+#ifdef SSPACE_TIME
+    sspace_mark_time(FALSE);
+#endif
+    if(!IGNORE_FINREF )
+      collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+    else {
+      gc_set_weakref_sets(gc);
+      gc_update_weakref_ignore_finref(gc);
+    }
+#endif
+    gc_init_chunk_for_sweep(gc, sspace);
+    /* let other collectors go */
+    num_marking_collectors++;
+  }
+  while(num_marking_collectors != num_active_collectors + 1);
+  
+  /* Pass 2: **************************************************
+     sweep dead objects ***************************************/
+  atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors);
+  
+  sspace_sweep(collector, sspace);
+  
+  atomic_inc32(&num_sweeping_collectors);
+  while(num_sweeping_collectors != num_active_collectors);
+  
+  if( collector->thread_handle != 0 )
+    return;
+  
+  /* Leftover: ************************************************ */
+#ifdef SSPACE_TIME
+  sspace_sweep_time(FALSE);
+#endif
+
+  gc_collect_free_chunks(gc, sspace);
+
+#ifdef SSPACE_TIME
+  sspace_merge_time(FALSE);
+#endif
+  
+  ops_color_flip();
+  gc->root_set = NULL;  // FIXME:: should be placed to a more appopriate place
+  gc_set_pool_clear(gc->metadata->gc_rootset_pool);
+
+#ifdef SSPACE_VERIFY
+  sspace_verify_after_collection(gc);
+#endif
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h Tue Jul  3 20:01:01 2007
@@ -0,0 +1,113 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_MARK_SWEEP_H_
+#define _SSPACE_MARK_SWEEP_H_
+
+#include "sspace_chunk.h"
+#include "sspace_verify.h"
+
+#define PFC_REUSABLE_RATIO 0.1
+
+enum Obj_Color {
+  OBJ_COLOR_BLUE = 0x0,
+  OBJ_COLOR_WHITE = 0x1,
+  OBJ_COLOR_BLACK = 0x2,
+  OBJ_COLOR_GRAY = 0x3,
+  OBJ_COLOR_MASK = 0x3
+};
+
+#ifdef POINTER64
+  #define BLACK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0xAAAAAAAAAAAAAAAA)
+#else
+  #define BLACK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0xAAAAAAAA)
+#endif
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+extern POINTER_SIZE_INT mark_mask_in_table;
+extern POINTER_SIZE_INT cur_alloc_color;
+extern POINTER_SIZE_INT cur_mark_color;
+
+#define SUPER_OBJ_MASK ((Obj_Info_Type)0x1)  /* the lowest bit in obj info */
+
+inline void set_super_obj_mask(void *large_obj)
+{ ((Partial_Reveal_Object*)large_obj)->obj_info |= SUPER_OBJ_MASK; }
+
+inline Boolean is_super_obj(Partial_Reveal_Object *obj)
+{
+  //return get_obj_info_raw(obj) & SUPER_OBJ_MASK;/*
+  if(vm_object_size(obj) > SUPER_OBJ_THRESHOLD){
+    return TRUE;
+  } else {
+    return FALSE;
+  }
+}
+
+inline POINTER_SIZE_INT *get_color_word_in_table(Partial_Reveal_Object *obj, unsigned int &index_in_word)
+{
+  Chunk_Header *chunk;
+  unsigned int index;
+  
+  if(is_super_obj(obj)){
+    chunk = ABNORMAL_CHUNK_HEADER(obj);
+    index = 0;
+  } else {
+    chunk = NORMAL_CHUNK_HEADER(obj);
+    index = slot_addr_to_index(chunk, obj);
+  }
+  unsigned int word_index = index / SLOT_NUM_PER_WORD_IN_TABLE;
+  index_in_word = COLOR_BITS_PER_OBJ * (index % SLOT_NUM_PER_WORD_IN_TABLE);
+  
+  return &chunk->table[word_index];
+}
+
+/* Accurate marking: TRUE stands for being marked by this collector, and FALSE for another collector */
+inline Boolean obj_mark_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;
+  POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+#ifdef SSPACE_VERIFY
+      assert(obj_is_marked_in_vt(obj));
+      obj_unmark_in_vt(obj);
+      sspace_verify_mark(obj, vm_object_size(obj));
+#endif
+      return TRUE;
+    }
+    old_word = *p_color_word;
+    new_word = (old_word & color_bits_mask) | mark_color;
+  }
+  
+  return FALSE;
+}
+
+extern void sspace_mark_scan(Collector *collector);
+extern void sspace_sweep(Collector *collector, Sspace *sspace);
+extern void gc_collect_free_chunks(GC *gc, Sspace *sspace);
+
+extern void chunk_set_slot_index(Chunk_Header* chunk, unsigned int first_free_word_index);
+
+#endif // _SSPACE_MARK_SWEEP_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp Tue Jul  3 20:01:01 2007
@@ -0,0 +1,281 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_chunk.h"
+#include "sspace_mark_sweep.h"
+
+
+Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
+
+
+static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+{
+  unsigned int count = 0;
+  
+  while(word){
+    word &= word - 1;
+    ++count;
+  }
+  return count;
+}
+
+static Chunk_Heaer_Basic *sspace_get_next_sweep_chunk(Collector *collector, Sspace *sspace)
+{
+  Chunk_Heaer_Basic *cur_sweep_chunk = next_chunk_for_sweep;
+  
+  Chunk_Heaer_Basic *sspace_ceiling = (Chunk_Heaer_Basic*)space_heap_end((Space*)sspace);
+  while(cur_sweep_chunk < sspace_ceiling){
+    Chunk_Heaer_Basic *next_sweep_chunk = CHUNK_END(cur_sweep_chunk);
+    
+    Chunk_Heaer_Basic *temp = (Chunk_Heaer_Basic*)atomic_casptr((volatile void **)&next_chunk_for_sweep, next_sweep_chunk, cur_sweep_chunk);
+    if(temp == cur_sweep_chunk){
+      if(next_sweep_chunk < sspace_ceiling)
+        next_sweep_chunk->adj_prev = cur_sweep_chunk;
+      return cur_sweep_chunk;
+    }
+    cur_sweep_chunk = next_chunk_for_sweep;
+  }
+  
+  return NULL;
+}
+
+static void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
+{
+  Free_Chunk_List *list = collector->free_chunk_list;
+  
+  chunk->status = CHUNK_FREE | CHUNK_IN_USE;
+  chunk->next = list->head;
+  chunk->prev = NULL;
+  if(list->head)
+    list->head->prev = chunk;
+  else
+    list->tail = chunk;
+  list->head = chunk;
+}
+
+void zeroing_free_chunk(Free_Chunk *chunk)
+{
+  assert(chunk->status == CHUNK_FREE);
+  
+  void *start = (void*)((POINTER_SIZE_INT)chunk + sizeof(Free_Chunk));
+  POINTER_SIZE_INT size = CHUNK_SIZE(chunk) - sizeof(Free_Chunk);
+  memset(start, 0, size);
+}
+
+/* Zeroing should be optimized to do it while sweeping index word */
+static void zeroing_free_areas_in_pfc(Chunk_Header *chunk, unsigned int live_num)
+{
+  assert(live_num);
+  
+  assert(chunk->status & CHUNK_NORMAL);
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int slot_size = chunk->slot_size;
+  POINTER_SIZE_INT chunk_base = (POINTER_SIZE_INT)chunk->base;
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  POINTER_SIZE_INT base = (POINTER_SIZE_INT)NULL;
+  assert(slot_num >= live_num);
+  unsigned int free_slot_num = slot_num - live_num;
+  unsigned int cur_free_slot_num = 0;
+  unsigned int slot_index = chunk->slot_index;
+  unsigned int word_index = slot_index / SLOT_NUM_PER_WORD_IN_TABLE;
+  assert(live_num >= slot_index);
+  live_num -= slot_index;
+  POINTER_SIZE_INT index_word = table[word_index];
+  POINTER_SIZE_INT mark_color = cur_mark_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE));
+  for(; slot_index < slot_num; ++slot_index){
+    assert(!(index_word & ~mark_mask_in_table));
+    if(index_word & mark_color){
+      if(cur_free_slot_num){
+        memset((void*)base, 0, slot_size*cur_free_slot_num);
+        assert(free_slot_num >= cur_free_slot_num);
+        free_slot_num -= cur_free_slot_num;
+        cur_free_slot_num = 0;
+        if(!free_slot_num) break;
+      }
+      assert(live_num);
+      --live_num;
+    } else {
+      if(cur_free_slot_num){
+        ++cur_free_slot_num;
+      } else {
+        base = chunk_base + slot_size * slot_index;
+        cur_free_slot_num = 1;
+        if(!live_num) break;
+      }
+    }
+    mark_color <<= COLOR_BITS_PER_OBJ;
+    if(!mark_color){
+      mark_color = cur_mark_color;
+      ++word_index;
+      index_word = table[word_index];
+      while(index_word == mark_mask_in_table && cur_free_slot_num == 0 && slot_index < slot_num){
+        slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+        ++word_index;
+        index_word = table[word_index];
+        assert(live_num >= SLOT_NUM_PER_WORD_IN_TABLE);
+        live_num -= SLOT_NUM_PER_WORD_IN_TABLE;
+      }
+      while(index_word == 0 && cur_free_slot_num > 0 && slot_index < slot_num){
+        slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+        ++word_index;
+        index_word = table[word_index];
+        cur_free_slot_num += SLOT_NUM_PER_WORD_IN_TABLE;
+      }
+    }
+  }
+  assert((cur_free_slot_num>0 && live_num==0) || (cur_free_slot_num==0 && live_num>0));
+  if(cur_free_slot_num)
+    memset((void*)base, 0, slot_size*free_slot_num);
+}
+
+static void collector_sweep_normal_chunk(Collector *collector, Sspace *sspace, Chunk_Header *chunk)
+{
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int live_num = 0;
+  unsigned int first_free_word_index = MAX_SLOT_INDEX;
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    table[i] &= mark_mask_in_table;
+    unsigned int live_num_in_word = (table[i] == mark_mask_in_table) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
+    live_num += live_num_in_word;
+    if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
+      first_free_word_index = i;
+      chunk_set_slot_index((Chunk_Header*)chunk, first_free_word_index);
+    }
+  }
+  assert(live_num <= slot_num);
+#ifdef SSPACE_VERIFY
+  collector->live_obj_num += live_num;
+  //printf("Chunk: %x  live obj: %d slot num: %d\n", (POINTER_SIZE_INT)chunk, live_num, slot_num);
+#endif
+  if(!live_num){  /* all objects in this chunk are dead */
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+  } else if((float)(slot_num-live_num)/slot_num > PFC_REUSABLE_RATIO){  /* most objects in this chunk are swept, add chunk to pfc list*/
+#ifdef SSPACE_VERIFY
+    //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
+#endif
+    chunk_pad_last_index_word((Chunk_Header*)chunk, mark_mask_in_table);
+    sspace_put_pfc(sspace, chunk, chunk->slot_size);
+  }
+  /* the rest: chunks with free rate < 0.1. we don't use them */
+#ifdef SSPACE_VERIFY
+  //else// if(live_num < slot_num)
+    //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
+#endif
+}
+
+void sspace_sweep(Collector *collector, Sspace *sspace)
+{
+  Chunk_Heaer_Basic *chunk;
+#ifdef SSPACE_VERIFY
+  collector->live_obj_num = 0;
+#endif
+
+  chunk = sspace_get_next_sweep_chunk(collector, sspace);
+  while(chunk){
+    /* chunk is free before GC */
+    if(chunk->status == CHUNK_FREE){
+      collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+    } else if(chunk->status & CHUNK_NORMAL){   /* chunk is used as a normal sized obj chunk */
+      collector_sweep_normal_chunk(collector, sspace, (Chunk_Header*)chunk);
+    } else {  /* chunk is used as a super obj chunk */
+      assert(chunk->status & (CHUNK_IN_USE | CHUNK_ABNORMAL));
+      POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
+      table[0] &= mark_mask_in_table;
+      if(!table[0]){
+        collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+      }
+#ifdef SSPACE_VERIFY
+      else {
+        collector->live_obj_num++;
+      }
+#endif
+    }
+    
+    chunk = sspace_get_next_sweep_chunk(collector, sspace);
+  }
+}
+
+static void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  if(chunk->prev)
+    chunk->prev->next = chunk->next;
+  else  // chunk is the head
+    list->head = chunk->next;
+  if(chunk->next)
+    chunk->next->prev = chunk->prev;
+}
+
+void gc_collect_free_chunks(GC *gc, Sspace *sspace)
+{
+  Free_Chunk *sspace_ceiling = (Free_Chunk*)space_heap_end((Space*)sspace);
+  
+  Free_Chunk_List free_chunk_list;
+  free_chunk_list.head = NULL;
+  free_chunk_list.tail = NULL;
+  
+  /* Collect free chunks from collectors to one list */
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    if(free_chunk_list.tail){
+      free_chunk_list.head->prev = list->tail;
+    } else {
+      free_chunk_list.tail = list->tail;
+    }
+    if(list->head){
+      list->tail->next = free_chunk_list.head;
+      free_chunk_list.head = list->head;
+    }
+    list->head = NULL;
+    list->tail = NULL;
+  }
+  
+  Free_Chunk *chunk = free_chunk_list.head;
+  while(chunk){
+    assert(chunk->status == (CHUNK_FREE | CHUNK_IN_USE));
+    /* Remove current chunk from the chunk list */
+    free_chunk_list.head = chunk->next;
+    if(free_chunk_list.head)
+      free_chunk_list.head->prev = NULL;
+    /* Check if the back adjcent chunks are free */
+    Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
+    while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+      /* Remove back_chunk from list */
+      free_list_detach_chunk(&free_chunk_list, back_chunk);
+      chunk->adj_next = back_chunk->adj_next;
+      back_chunk = (Free_Chunk*)chunk->adj_next;
+    }
+    /* Check if the prev adjacent chunks are free */
+    Free_Chunk *prev_chunk = (Free_Chunk*)chunk->adj_prev;
+    while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+      /* Remove prev_chunk from list */
+      free_list_detach_chunk(&free_chunk_list, prev_chunk);
+      prev_chunk->adj_next = chunk->adj_next;
+      chunk = prev_chunk;
+      prev_chunk = (Free_Chunk*)chunk->adj_prev;
+    }
+    
+    //zeroing_free_chunk(chunk);
+    
+    /* put the free chunk to the according free chunk list */
+    sspace_put_free_chunk(sspace, chunk);
+    
+    chunk = free_chunk_list.head;
+  }
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp Tue Jul  3 20:01:01 2007
@@ -0,0 +1,542 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_verify.h"
+#include "sspace_chunk.h"
+#include "sspace_mark_sweep.h"
+#include "../utils/vector_block.h"
+#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+#ifdef SSPACE_VERIFY
+
+#define VERIFY_CARD_SIZE_BYTES_SHIFT 12
+#define VERIFY_CARD_SIZE_BYTES (1 << VERIFY_CARD_SIZE_BYTES_SHIFT)
+#define VERIFY_CARD_LOW_MASK (VERIFY_CARD_SIZE_BYTES - 1)
+#define VERIFY_CARD_HIGH_MASK (~VERIFY_CARD_LOW_MASK)
+
+#define VERIFY_MAX_OBJ_SIZE_BYTES (1 << (32-VERIFY_CARD_SIZE_BYTES_SHIFT))
+
+typedef struct Verify_Card {
+  SpinLock lock;
+  Vector_Block *block;
+} Verify_Card;
+
+typedef unsigned int Obj_Addr;
+
+static GC *gc_in_verify = NULL;
+static Verify_Card *alloc_verify_cards = NULL;
+static Verify_Card *mark_verify_cards = NULL;
+static POINTER_SIZE_INT card_num = 0;
+static POINTER_SIZE_INT alloc_obj = 0;
+volatile POINTER_SIZE_INT live_obj_in_mark = 0;
+
+void sspace_verify_init(GC *gc)
+{
+  gc_in_verify = gc;
+  
+  POINTER_SIZE_INT heap_size = gc_gen_total_memory_size((GC_Gen*)gc);
+  card_num = heap_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num;
+  
+  alloc_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+  memset(alloc_verify_cards, 0, cards_size);
+  
+  mark_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+  memset(mark_verify_cards, 0, cards_size);
+}
+
+static Obj_Addr compose_obj_addr(unsigned int offset, unsigned int size)
+{
+  assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+  return offset | (size << VERIFY_CARD_SIZE_BYTES_SHIFT);
+}
+
+static void *decompose_obj_addr(Obj_Addr obj_addr, POINTER_SIZE_INT card_index, unsigned int & size)
+{
+  assert(card_index < card_num);
+  POINTER_SIZE_INT card_offset = obj_addr & VERIFY_CARD_LOW_MASK;
+  POINTER_SIZE_INT heap_offset = VERIFY_CARD_SIZE_BYTES * card_index + card_offset;
+  size = (obj_addr & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+  return (void*)(heap_offset + (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+}
+
+static Boolean obj_addr_overlapped(Obj_Addr addr1, Obj_Addr addr2)
+{
+  unsigned int offset1 = addr1 & VERIFY_CARD_LOW_MASK;
+  unsigned int size1 = (addr1 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  unsigned int ceiling1 = offset1 + size1;
+  unsigned int offset2 = addr2 & VERIFY_CARD_LOW_MASK;
+  unsigned int size2 = (addr2 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  unsigned int ceiling2 = offset2 + size2;
+  
+  unsigned int reason = 0;
+  if(offset1 == offset2)
+    reason = 1;
+  if((offset1 < offset2) && (ceiling1 > offset2))
+    reason = 2;
+  if((offset2 < offset1) && (ceiling2 > offset1))
+    reason = 3;
+  if(!reason)
+    return FALSE;
+  printf("\nreason: %d\nold offset: %x  size: %d\nnew offset: %x size: %d", reason, (POINTER_SIZE_INT)offset1, size1, (POINTER_SIZE_INT)offset2, size2);
+  return TRUE;
+}
+
+static Vector_Block *create_vector_block(unsigned int size)
+{
+  Vector_Block *block = (Vector_Block*)STD_MALLOC(size);
+  vector_block_init(block, size);
+  return block;
+}
+
+static void verify_card_get_block(Verify_Card *card)
+{
+  lock(card->lock);
+  if(card->block){
+    unlock(card->lock);
+    return;
+  }
+  card->block = create_vector_block(VECTOR_BLOCK_DATA_SIZE_BYTES);
+  unlock(card->lock);
+}
+
+void sspace_verify_alloc(void *addr, unsigned int size)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  ++alloc_obj;
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &alloc_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  verify_card_get_block(card);
+  Vector_Block *block = card->block;
+  
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  lock(card->lock);
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    assert(!obj_addr_overlapped(obj_addr, *p_addr));
+    p_addr++;
+  }
+  vector_block_add_entry(block, obj_addr);
+  unlock(card->lock);
+}
+
+/* size is rounded up size */
+static Boolean obj_position_is_correct(void *addr, unsigned int size)
+{
+  Chunk_Header *chunk = NULL;
+  
+  if(size <= SUPER_OBJ_THRESHOLD)
+    chunk = NORMAL_CHUNK_HEADER(addr);
+  else
+    chunk = ABNORMAL_CHUNK_HEADER(addr);
+  if(chunk->slot_size != size) return FALSE;
+  if(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) % size != 0) return FALSE;
+  return TRUE;
+}
+
+/* size is real size of obj */
+void sspace_verify_mark(void *addr, unsigned int size)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  atomic_inc32(&live_obj_in_mark);
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  verify_card_get_block(card);
+  Vector_Block *block = card->block;
+  
+  if(size <= MEDIUM_OBJ_THRESHOLD)
+    size = SMALL_SIZE_ROUNDUP(size);
+  else if(size <= LARGE_OBJ_THRESHOLD)
+    size = MEDIUM_SIZE_ROUNDUP(size);
+  else if(size <= SUPER_OBJ_THRESHOLD)
+    size = LARGE_SIZE_ROUNDUP(size);
+  assert(obj_position_is_correct(addr, size));
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  lock(card->lock);
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    assert(!obj_addr_overlapped(obj_addr, *p_addr));
+    p_addr++;
+  }
+  vector_block_add_entry(block, obj_addr);
+  unlock(card->lock);
+}
+
+static void reverify_mark(void *addr, unsigned int size)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  Vector_Block *block = card->block;
+  assert(block);
+  
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    if(obj_addr == *p_addr){
+      *p_addr = 0;
+      break;
+    }
+    p_addr++;
+  }
+  assert(p_addr < block->tail);
+}
+
+static void check_mark_cards(void)
+{
+  for(POINTER_SIZE_INT i=0; i<card_num; i++){
+    Vector_Block *block = mark_verify_cards[i].block;
+    if(!block)
+      continue;
+    Obj_Addr *p_addr = block->head;
+    while(p_addr < block->tail){
+      if(*p_addr){
+        unsigned int size = 0;
+        void *addr = NULL;
+        addr = decompose_obj_addr(*p_addr, i, size);
+        printf("Extra mark obj: %x  size: %d\n", (POINTER_SIZE_INT)addr, size);
+      }
+      p_addr++;
+    }
+    vector_block_clear(block);
+  }
+}
+
+static void clear_verify_cards(void)
+{
+  for(POINTER_SIZE_INT i=0; i<card_num; i++){
+    Verify_Card *card = &alloc_verify_cards[i];
+    if(card->block)
+      vector_block_clear(card->block);
+  }
+}
+
+static void summarize_sweep_verify(GC *gc)
+{
+  POINTER_SIZE_INT live_obj_num = 0;
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    live_obj_num += gc->collectors[i]->live_obj_num;
+  }
+  printf("Live obj in sweeping: %d\n", live_obj_num);
+}
+
+void sspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size)
+{
+  POINTER_SIZE_INT *p_value = start;
+  
+  assert(!(size % BYTES_PER_WORD));
+  size /= BYTES_PER_WORD;
+  while(size--)
+    assert(!*p_value++);
+}
+
+void sspace_verify_after_collection(GC *gc)
+{
+  printf("Allocated obj: %d\n", alloc_obj);
+  alloc_obj = 0;
+  printf("Live obj in marking: %d\n", live_obj_in_mark);
+  live_obj_in_mark = 0;
+  
+  summarize_sweep_verify(gc);
+  
+  clear_verify_cards();
+  
+  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
+  Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
+  POINTER_SIZE_INT total_live_obj = 0;
+  
+  for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+    /* chunk is free before GC */
+    if(chunk->status == CHUNK_FREE){
+      unsigned int header_size = sizeof(Free_Chunk);
+      //sspace_verify_free_area((POINTER_SIZE_INT*)((POINTER_SIZE_INT)chunk + header_size), (POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk - header_size);
+      continue;
+    }
+    if(chunk->status & CHUNK_ABNORMAL){
+      assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+      assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+      Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+      assert(chunk->slot_size == vm_object_size(obj));
+      assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+    }
+    /* chunk is used as a normal sized obj chunk */
+    unsigned int slot_num = chunk->slot_num;
+    POINTER_SIZE_INT *table = chunk->table;
+    POINTER_SIZE_INT live_obj_in_chunk = 0;
+    
+    unsigned int word_index = 0;
+    for(unsigned int i=0; i<slot_num; ++i){
+      unsigned int color_index = COLOR_BITS_PER_OBJ * i;
+      word_index = color_index / BITS_PER_WORD;
+      void *p_obj = slot_index_to_addr(chunk, i);
+      if(table[word_index] & (cur_alloc_color << (color_index % BITS_PER_WORD))){
+        sspace_verify_alloc(p_obj, chunk->slot_size);
+        reverify_mark(p_obj, chunk->slot_size);
+        ++live_obj_in_chunk;
+      } else {
+        //sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+      }
+    }
+    total_live_obj += live_obj_in_chunk;
+  }
+  printf("Live obj after collection: %d\n", total_live_obj);
+  check_mark_cards();
+}
+
+/*
+void sspace_verify_super_obj(GC *gc)
+{
+  Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+  Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
+  Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
+  
+  for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+    if(chunk->status & CHUNK_ABNORMAL){
+      assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+      assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+      Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+      assert(chunk->slot_size == vm_object_size(obj));
+      assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+    }
+  }
+}
+*/
+
+
+/* sspace verify marking with vtable marking in advance */
+
+static Pool *trace_pool = NULL;
+static Vector_Block *trace_stack = NULL;
+POINTER_SIZE_INT live_obj_in_verify_marking = 0;
+
+static Boolean obj_mark_in_vtable(GC *gc, Partial_Reveal_Object *obj)
+{
+  assert(address_belongs_to_gc_heap(obj, gc));
+  assert((vm_object_size(obj) <= SUPER_OBJ_THRESHOLD) || (get_obj_info_raw(obj) & SUPER_OBJ_MASK));
+  Boolean marked = obj_mark_in_vt(obj);
+#ifdef SSPACE_VERIFY
+  if(marked) live_obj_in_verify_marking++;
+#endif
+  return marked;
+}
+
+static void tracestack_push(void *p_obj)
+{
+  vector_stack_push(trace_stack, (POINTER_SIZE_INT)p_obj);
+  
+  if( !vector_stack_is_full(trace_stack)) return;
+  
+  pool_put_entry(trace_pool, trace_stack);
+  trace_stack = free_task_pool_get_entry(&gc_metadata);
+  assert(trace_stack);
+}
+
+static FORCE_INLINE void scan_slot(GC *gc, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  if(obj_mark_in_vtable(gc, p_obj))
+    tracestack_push(p_obj);
+  
+  return;
+}
+
+static FORCE_INLINE void scan_object(GC *gc, Partial_Reveal_Object *p_obj)
+{
+  if(!object_has_ref_field(p_obj) ) return;
+  
+  REF *p_ref;
+  
+  if (object_is_array(p_obj)) {   /* scan array object */
+    
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    
+    for (unsigned int i = 0; i < array_length; i++) {
+      scan_slot(gc, p_ref+i);
+    }
+  } else { /* scan non-array object */
+    
+    unsigned int num_refs = object_ref_field_num(p_obj);
+    
+    int *ref_iterator = object_ref_iterator_init(p_obj);
+    
+    for(unsigned int i=0; i<num_refs; i++){
+      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+      scan_slot(gc, p_ref);
+    }
+#ifndef BUILD_IN_REFERENT
+    //scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  }
+}
+
+static void trace_object(GC *gc, Partial_Reveal_Object *p_obj)
+{
+  scan_object(gc, p_obj);
+  
+  while( !vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack);
+    scan_object(gc, p_obj);
+  }
+}
+
+void sspace_verify_vtable_mark(GC *gc)
+{
+  GC_Metadata *metadata = gc->metadata;
+  Pool *rootset_pool = metadata->gc_rootset_pool;
+  
+  trace_stack = free_task_pool_get_entry(metadata);
+  trace_pool = sync_pool_create();
+  
+  pool_iterator_init(rootset_pool);
+  Vector_Block *root_set = pool_iterator_next(rootset_pool);
+  
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set, iter)){
+      REF *p_ref = (REF*)*iter;
+      iter = vector_block_iterator_advance(root_set, iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      assert(p_obj!=NULL);
+      if(obj_mark_in_vtable(gc, p_obj))
+        tracestack_push(p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(trace_pool, trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *mark_task = pool_get_entry(trace_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task, iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task, iter);
+      
+      trace_object(gc, p_obj);
+    }
+    /* run out one task, put back to the pool and grab another task */
+    vector_stack_clear(mark_task);
+    pool_put_entry(metadata->free_task_pool, mark_task);
+    mark_task = pool_get_entry(trace_pool);
+  }
+  
+  /* put back the last mark stack to the free pool */
+  vector_stack_clear(trace_stack);
+  pool_put_entry(metadata->free_task_pool, trace_stack);
+  trace_stack = NULL;
+  sync_pool_destruct(trace_pool);
+  trace_pool = NULL;
+  printf("Live obj in vtable marking: %d\n", live_obj_in_verify_marking);
+  live_obj_in_verify_marking = 0;
+}
+
+
+#endif
+
+
+
+#ifdef SSPACE_TIME
+
+inline uint64 tsc()
+{
+  __asm _emit 0x0F;
+  __asm _emit 0x31
+}
+
+#define CPU_HZ 3000000  // per ms
+
+static uint64 gc_start_time;
+static uint64 mark_start_time;
+static uint64 sweep_start_time;
+static uint64 merge_start_time;
+
+void sspace_gc_time(GC *gc, Boolean before_gc)
+{
+  if(before_gc){
+    gc_start_time = tsc();
+    mark_start_time = gc_start_time;
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > gc_start_time);
+    printf("\n\nGC %d time: %dms\n\n", gc->num_collections, (end_time-gc_start_time) / CPU_HZ);
+  }
+}
+
+void sspace_mark_time(Boolean before_mark)
+{
+  assert(before_mark == FALSE);
+  if(before_mark){
+    mark_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > mark_start_time);
+    printf("\nMark time: %dms\n", (end_time-mark_start_time) / CPU_HZ);
+    sweep_start_time = end_time;
+  }
+}
+
+void sspace_sweep_time(Boolean before_sweep)
+{
+  assert(before_sweep == FALSE);
+  if(before_sweep){
+    sweep_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > sweep_start_time);
+    printf("\nSweep time: %dms\n", (end_time-sweep_start_time) / CPU_HZ);
+    merge_start_time = end_time;
+  }
+}
+
+void sspace_merge_time(Boolean before_merge)
+{
+  assert(before_merge == FALSE);
+  if(before_merge){
+    merge_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > merge_start_time);
+    printf("\nMerge time: %dms\n\n", (end_time-merge_start_time) / CPU_HZ);
+  }
+}
+
+#endif

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h?view=auto&rev=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h Tue Jul  3 20:01:01 2007
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_VERIFY_H_
+#define _SSPACE_VERIFY_H_
+
+#include "../common/gc_common.h"
+
+#define SSPACE_VERIFY
+#define SSPACE_CHUNK_INFO
+#define SSPACE_ALLOC_INFO
+//#define SSPACE_TIME
+
+struct Sspace;
+
+void sspace_verify_init(GC *gc);
+void sspace_verify_alloc(void *addr, unsigned int size);
+void sspace_verify_vtable_mark(GC *gc);
+void sspace_verify_mark(void *addr, unsigned int size);
+void sspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size);
+void sspace_verify_after_collection(GC *gc);
+
+void sspace_chunks_info(Sspace *sspace, Boolean beore_gc);
+void sspace_alloc_info(unsigned int size);
+void sspace_alloc_info_summary(void);
+
+void sspace_gc_time(GC *gc, Boolean before_gc);
+void sspace_mark_time(Boolean before_mark);
+void sspace_sweep_time(Boolean before_sweep);
+void sspace_merge_time(Boolean before_merge);
+
+#endif // _SSPACE_VERIFY_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.h
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Tue Jul  3 20:01:01 2007
@@ -22,8 +22,11 @@
 #define _COLLECTOR_H_
 
 #include "../common/gc_space.h"
+#include "../mark_sweep/sspace_verify.h"
+
 struct Block_Header;
 struct Stealable_Stack;
+struct Free_Chunk_List;
 
 #define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS  8
 #define NORMAL_SIZE_SEGMENT_GRANULARITY (1 << NORMAL_SIZE_SEGMENT_GRANULARITY_BITS)
@@ -62,6 +65,11 @@
   
   Block_Header* cur_compact_block;
   Block_Header* cur_target_block;
+  
+  Free_Chunk_List *free_chunk_list;
+#ifdef SSPACE_VERIFY
+  POINTER_SIZE_INT live_obj_num;
+#endif
   
   void(*task_func)(void*) ;   /* current task */
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Tue Jul  3 20:01:01 2007
@@ -23,6 +23,8 @@
 
 #include "../common/gc_space.h"
 
+struct Chunk_Header;
+
 /* Mutator thread local information for GC */
 typedef struct Mutator {
   /* <-- first couple of fields are overloaded as Allocator */
@@ -37,6 +39,8 @@
   
   Vector_Block* rem_set;
   Vector_Block* obj_with_fin;
+  Chunk_Header **small_chunks;
+  Chunk_Header **medium_chunks;
   Mutator* next;  /* The gc info area associated with the next active thread. */
 } Mutator;
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp Tue Jul  3 20:01:01 2007
@@ -253,4 +253,3 @@
     printf(" %-14s:    %-7s |   Before %10d   |   After %10d   |\n", "hashcode", "NUM", gc_verifier->num_hash_before_gc, gc_verifier->num_hash_after_gc);
 }
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp Tue Jul  3 20:01:01 2007
@@ -418,4 +418,3 @@
 
 
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp Tue Jul  3 20:01:01 2007
@@ -528,4 +528,3 @@
 
 
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.h Tue Jul  3 20:01:01 2007
@@ -97,4 +97,3 @@
 
 #endif
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp Tue Jul  3 20:01:01 2007
@@ -147,4 +147,3 @@
 
 
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_mutator_effect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_mutator_effect.cpp?view=diff&rev=553050&r1=553049&r2=553050
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_mutator_effect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_mutator_effect.cpp Tue Jul  3 20:01:01 2007
@@ -433,4 +433,3 @@
  
 
 
-



Mime
View raw message