harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r606876 [4/6] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ jni/ los/ mark_compact/ mark_sweep/ semi_space/ thread/ trace_forward/ utils/ verify/
Date Wed, 26 Dec 2007 10:17:15 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,548 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_CHUNK_H_
+#define _SSPACE_CHUNK_H_
+
+#include "wspace.h"
+
+//#define SSPACE_USE_FASTDIV
+#ifdef SSPACE_USE_FASTDIV
+#if 0
+#define massert(x) do { if(!(x)) { printf("%s:%s, , Assertion %s failed\n", __FILE__, __LINE__, #x); exit(1); }} while(0) 
+#else
+#define massert(x)
+#endif
+#define MAX_SLOT_SIZE 1024
+#define MAX_SLOT_SIZE_AFTER_SHIFTING 31
+#define MAX_ADDR_OFFSET (16 * 1024)
+#define INIT_ALLOC_SIZE (128 * 1024)
+
+extern unsigned int *shift_table;
+extern unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING];
+extern unsigned int mask[MAX_SLOT_SIZE_AFTER_SHIFTING];
+extern void fastdiv_init(void);
+
+inline int fastdiv_div(int x, int y)
+{
+	massert(x % y == 0);
+	massert(0 <= y && y <= 1024);
+	massert(y % 4 == 0);
+	massert(y <= 128 || y % 8 == 0);
+	massert(y <= 256 || y % 128 == 0);
+	massert(x <= (1 << 16));
+
+	int s = shift_table[y];
+	massert(s >= 2);
+	x >>= s;
+	y >>= s;
+
+	massert(x >= 0 && x <= 16 * (1 << 10));
+	massert(y <= 32 && y % 2);
+
+	return (int)compact_table[y][x & mask[y]];
+}
+#else
+#define fastdiv_div(x,y) ((x) / (y))
+#define fastdiv_init() ((void)0)
+#endif
+
+enum Chunk_Status {
+  CHUNK_NIL = 0,
+  CHUNK_FREE = 0x1,
+  CHUNK_FRESH = 0x2,
+  CHUNK_NORMAL = 0x10,
+  CHUNK_ABNORMAL = 0x20,
+  CHUNK_NEED_ZEROING = 0x100,
+  CHUNK_TO_MERGE = 0x200,
+  CHUNK_IN_USE = 0x400, /* just keep info for now, not used */
+  CHUNK_USED = 0x800, /* just keep info for now, not used */
+  CHUNK_MERGED = 0x1000
+};
+
+typedef volatile POINTER_SIZE_INT Chunk_Status_t;
+
+typedef struct Chunk_Header_Basic {
+  Chunk_Header_Basic *next;
+  Chunk_Header_Basic *prev;
+  Chunk_Status_t status;
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+} Chunk_Header_Basic;
+
+typedef struct Chunk_Header {
+  /* Beginning of Chunk_Header_Basic */
+  Chunk_Header *next;           /* pointing to the next pfc in the pfc pool */
+  Chunk_Header *prev;           /* pointing to the prev pfc in the pfc pool */
+  Chunk_Status_t status;
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  /* End of Chunk_Header_Basic */
+  void *base;
+  unsigned int slot_size;
+  unsigned int slot_num;
+  unsigned int slot_index;      /* the index of which is the first free slot in this chunk */
+  unsigned int alloc_num;       /* the index of which is the first free slot in this chunk */
+  POINTER_SIZE_INT table[1];
+} Chunk_Header;
+
+
+#define NORMAL_CHUNK_SHIFT_COUNT    16
+#define NORMAL_CHUNK_SIZE_BYTES     (1 << NORMAL_CHUNK_SHIFT_COUNT)
+#define NORMAL_CHUNK_LOW_MASK       ((POINTER_SIZE_INT)(NORMAL_CHUNK_SIZE_BYTES - 1))
+#define NORMAL_CHUNK_HIGH_MASK      (~NORMAL_CHUNK_LOW_MASK)
+#define NORMAL_CHUNK_HEADER(addr)   ((Chunk_Header*)((POINTER_SIZE_INT)(addr) & NORMAL_CHUNK_HIGH_MASK))
+#define ABNORMAL_CHUNK_HEADER(addr) ((Chunk_Header*)((POINTER_SIZE_INT)addr & CHUNK_GRANULARITY_HIGH_MASK))
+
+#define MAX_SLOT_INDEX 0xFFffFFff
+#define COLOR_BITS_PER_OBJ 4   // should be powers of 2
+#define SLOT_NUM_PER_WORD_IN_TABLE  (BITS_PER_WORD /COLOR_BITS_PER_OBJ)
+
+/* Two equations:
+ * 1. CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES + slot_size*NORMAL_CHUNK_SLOT_NUM = NORMAL_CHUNK_SIZE_BYTES
+ * 2. (BITS_PER_BYTE * NORMAL_CHUNK_TABLE_SIZE_BYTES)/COLOR_BITS_PER_OBJ >= NORMAL_CHUNK_SLOT_NUM
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM <= BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM = BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ */
+
+#define CHUNK_HEADER_VARS_SIZE_BYTES      ((POINTER_SIZE_INT)&(((Chunk_Header*)0)->table))
+#define NORMAL_CHUNK_SLOT_AREA_SIZE_BITS  (BITS_PER_BYTE * (NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES))
+#define SIZE_BITS_PER_SLOT(chunk)         (BITS_PER_BYTE * chunk->slot_size + COLOR_BITS_PER_OBJ)
+
+#define NORMAL_CHUNK_SLOT_NUM(chunk)          (NORMAL_CHUNK_SLOT_AREA_SIZE_BITS / SIZE_BITS_PER_SLOT(chunk))
+#define NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk)  (((NORMAL_CHUNK_SLOT_NUM(chunk) + SLOT_NUM_PER_WORD_IN_TABLE-1) / SLOT_NUM_PER_WORD_IN_TABLE) * BYTES_PER_WORD)
+#define NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk) (CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk))
+
+#define NORMAL_CHUNK_BASE(chunk)    ((void*)((POINTER_SIZE_INT)(chunk) + NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk)))
+#define ABNORMAL_CHUNK_BASE(chunk)  ((void*)((POINTER_SIZE_INT)(chunk) + sizeof(Chunk_Header)))
+
+#define CHUNK_END(chunk)  ((chunk)->adj_next)
+#define CHUNK_SIZE(chunk) ((POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk)
+
+#define NUM_ALIGNED_FREE_CHUNK_BUCKET   (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT)
+#define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS)
+
+inline void *slot_index_to_addr(Chunk_Header *chunk, unsigned int index)
+{ return (void*)((POINTER_SIZE_INT)chunk->base + chunk->slot_size * index); }
+
+inline unsigned int slot_addr_to_index(Chunk_Header *chunk, void *addr)
+{ return (unsigned int)fastdiv_div(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) , chunk->slot_size); }
+
+typedef struct Free_Chunk {
+  /* Beginning of Chunk_Header_Basic */
+  Free_Chunk *next;             /* pointing to the next free Free_Chunk */
+  Free_Chunk *prev;             /* pointing to the prev free Free_Chunk */
+  Chunk_Status_t status;
+  Chunk_Header_Basic *adj_next;  // adjacent next chunk
+  Chunk_Header_Basic *adj_prev;  // adjacent previous chunk, for merging continuous free chunks
+  /* End of Chunk_Header_Basic */
+} Free_Chunk;
+
+typedef struct Free_Chunk_List {
+  Free_Chunk *head;  /* get new free chunk from head */
+  Free_Chunk *tail;  /* put free chunk to tail */
+  unsigned int chunk_num;
+  SpinLock lock;
+} Free_Chunk_List;
+
+/*
+typedef union Chunk{
+  Chunk_Header   header;
+  Free_Chunk     free_chunk;
+  unsigned char  raw_bytes[NORMAL_CHUNK_SIZE_BYTES];
+} Chunk;
+*/
+
+inline Boolean is_free_chunk_merged(Free_Chunk* free_chunk)
+{
+  assert(free_chunk->status & CHUNK_FREE);
+  return (Boolean)(free_chunk->status & CHUNK_MERGED);
+}
+
+inline void free_chunk_list_init(Free_Chunk_List *list)
+{
+  list->head = NULL;
+  list->tail = NULL;
+  list->chunk_num = 0;
+  list->lock = FREE_LOCK;
+}
+
+inline void free_chunk_list_clear(Free_Chunk_List *list)
+{
+  list->head = NULL;
+  list->tail = NULL;
+  list->chunk_num = 0;
+  assert(list->lock == FREE_LOCK);
+}
+
+inline void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  if(chunk->prev)
+    chunk->prev->next = chunk->next;
+  else  // chunk is the head
+    list->head = chunk->next;
+  
+  if(chunk->next)
+    chunk->next->prev = chunk->prev;
+  else
+    list->tail = chunk->prev;
+}
+
+inline void move_free_chunks_between_lists(Free_Chunk_List *to_list, Free_Chunk_List *from_list)
+{
+  if(to_list->tail){
+    to_list->head->prev = from_list->tail;
+  } else {
+    to_list->tail = from_list->tail;
+  }
+  if(from_list->head){
+    from_list->tail->next = to_list->head;
+    to_list->head = from_list->head;
+  }
+  from_list->head = NULL;
+  from_list->tail = NULL;
+}
+
+/* Padding the last index word in table to facilitate allocation */
+inline void chunk_pad_last_index_word(Chunk_Header *chunk, POINTER_SIZE_INT alloc_mask)
+{
+  unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+  if(!ceiling_index_in_last_word)
+    return;
+  POINTER_SIZE_INT padding_mask = ~((1 << ceiling_index_in_last_word) - 1);
+  padding_mask &= alloc_mask;
+  unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  chunk->table[last_word_index] |= padding_mask;
+}
+
+inline void chunk_pad_last_index_word_concurrent(Chunk_Header *chunk, POINTER_SIZE_INT alloc_mask)
+{
+  unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+  if(!ceiling_index_in_last_word)
+    return;
+  POINTER_SIZE_INT padding_mask = ~((1 << ceiling_index_in_last_word) - 1);
+  padding_mask &= alloc_mask;
+  unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  POINTER_SIZE_INT old_word = chunk->table[last_word_index];
+  POINTER_SIZE_INT new_word = old_word | padding_mask;
+  while (old_word == new_word){
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void **) &chunk->table[last_word_index],(void*)new_word ,(void*)old_word );
+    if(temp == old_word)
+      return;
+    old_word = chunk->table[last_word_index];
+    new_word = old_word | padding_mask;
+  }
+}
+
+
+
+/* Depadding the last index word in table to facilitate allocation */
+inline void chunk_depad_last_index_word(Chunk_Header *chunk)
+{
+  unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+  if(!ceiling_index_in_last_word)
+    return;
+  POINTER_SIZE_INT depadding_mask = (1 << ceiling_index_in_last_word) - 1;
+  unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  chunk->table[last_word_index] &= depadding_mask;
+}
+
+extern POINTER_SIZE_INT cur_alloc_mask;
+/* Used for allocating a fixed-size chunk from free area lists */
+inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size)
+{
+  //Modified this assertion for concurrent sweep
+  //assert(chunk->status == CHUNK_FREE);
+  assert(chunk->status & CHUNK_FREE);
+  assert(CHUNK_SIZE(chunk) == NORMAL_CHUNK_SIZE_BYTES);
+  
+  chunk->next = NULL;
+  chunk->status = CHUNK_FRESH | CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  chunk->slot_size = slot_size;
+  chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk);
+  chunk->slot_index = 0;
+  chunk->alloc_num = 0;
+  chunk->base = NORMAL_CHUNK_BASE(chunk);
+  memset(chunk->table, 0, NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk));//memset table
+  //chunk_pad_last_index_word(chunk, cur_alloc_mask);
+  fastdiv_init();
+}
+
+/* Used for allocating a chunk for large object from free area lists */
+inline void abnormal_chunk_init(Chunk_Header *chunk, unsigned int chunk_size, unsigned int obj_size)
+{
+  //Modified this assertion for concurrent sweep
+  //assert(chunk->status == CHUNK_FREE);
+  assert(chunk->status & CHUNK_FREE);
+  assert(CHUNK_SIZE(chunk) == chunk_size);
+  
+  chunk->next = NULL;
+  chunk->status = CHUNK_ABNORMAL;
+  chunk->slot_size = obj_size;
+  chunk->slot_num = 1;
+  chunk->slot_index = 0;
+  chunk->base = ABNORMAL_CHUNK_BASE(chunk);
+}
+
+
+#ifdef POINTER64
+  #define GC_OBJECT_ALIGNMENT_BITS    3
+#else
+  #define GC_OBJECT_ALIGNMENT_BITS    2
+#endif
+
+#define MEDIUM_OBJ_THRESHOLD  (128)
+#define LARGE_OBJ_THRESHOLD   (256)
+#define SUPER_OBJ_THRESHOLD   (1024)
+#define HYPER_OBJ_THRESHOLD   (128*KB)
+
+#define SMALL_GRANULARITY_BITS  (GC_OBJECT_ALIGNMENT_BITS)
+#define MEDIUM_GRANULARITY_BITS (SMALL_GRANULARITY_BITS + 1)
+#define LARGE_GRANULARITY_BITS  7
+#define CHUNK_GRANULARITY_BITS  10
+
+#define CHUNK_GRANULARITY       (1 << CHUNK_GRANULARITY_BITS)
+#define CHUNK_GRANULARITY_LOW_MASK    ((POINTER_SIZE_INT)(CHUNK_GRANULARITY-1))
+#define CHUNK_GRANULARITY_HIGH_MASK   (~CHUNK_GRANULARITY_LOW_MASK)
+
+#define SMALL_IS_LOCAL_ALLOC   TRUE
+#define MEDIUM_IS_LOCAL_ALLOC  TRUE
+#define LARGE_IS_LOCAL_ALLOC  FALSE
+
+#define NORMAL_SIZE_ROUNDUP(size, seg)  (((size) + seg->granularity-1) & seg->gran_high_mask)
+#define SUPER_OBJ_TOTAL_SIZE(size)  (sizeof(Chunk_Header) + (size))
+#define SUPER_SIZE_ROUNDUP(size)    ((SUPER_OBJ_TOTAL_SIZE(size) + CHUNK_GRANULARITY-1) & CHUNK_GRANULARITY_HIGH_MASK)
+
+#define NORMAL_SIZE_TO_INDEX(size, seg) ((((size)-(seg)->size_min) >> (seg)->gran_shift_bits) - 1)
+#define ALIGNED_CHUNK_SIZE_TO_INDEX(size)     (((size) >> NORMAL_CHUNK_SHIFT_COUNT) - 1)
+#define UNALIGNED_CHUNK_SIZE_TO_INDEX(size)   (((size) >> CHUNK_GRANULARITY_BITS) - 1)
+
+#define NORMAL_INDEX_TO_SIZE(index, seg)  ((((index) + 1) << (seg)->gran_shift_bits) + (seg)->size_min)
+#define ALIGNED_CHUNK_INDEX_TO_SIZE(index)    (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT)
+#define UNALIGNED_CHUNK_INDEX_TO_SIZE(index)  (((index) + 1) << CHUNK_GRANULARITY_BITS)
+
+#define SUPER_OBJ_MASK ((Obj_Info_Type)0x20)  /* the 4th bit in obj info */
+
+//#define WSPACE_CONCURRENT_GC_STATS
+
+#ifdef WSPACE_CONCURRENT_GC_STATS
+#define NEW_OBJ_MASK ((Obj_Info_Type)0x40)
+#endif
+
+#define PFC_STEAL_NUM   3
+#define PFC_STEAL_THRESHOLD   3
+
+
+#define SIZE_SEGMENT_NUM  3
+
+typedef struct Size_Segment {
+  unsigned int size_min;
+  unsigned int size_max;
+  unsigned int seg_index;
+  Boolean local_alloc;
+  unsigned int chunk_num;
+  unsigned int gran_shift_bits;
+  POINTER_SIZE_INT granularity;
+  POINTER_SIZE_INT gran_low_mask;
+  POINTER_SIZE_INT gran_high_mask;
+} Size_Segment;
+
+inline Size_Segment *wspace_get_size_seg(Wspace *wspace, unsigned int size)
+{
+  Size_Segment **size_segs = wspace->size_segments;
+  
+  unsigned int seg_index = 0;
+  for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index)
+    if(size <= size_segs[seg_index]->size_max) break;
+  assert(seg_index < SIZE_SEGMENT_NUM);
+  assert(size_segs[seg_index]->seg_index == seg_index);
+  return size_segs[seg_index];
+}
+
+inline Chunk_Header *wspace_get_pfc(Wspace *wspace, unsigned int seg_index, unsigned int index)
+{
+  /*1. Search PFC pool*/
+  Pool *pfc_pool = wspace->pfc_pools[seg_index][index];
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+
+  /*2. If in concurrent sweeping phase, search PFC backup pool*/
+  if(!chunk && gc_is_concurrent_sweep_phase()){
+    pfc_pool = wspace->pfc_pools_backup[seg_index][index];
+    chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+  }
+  assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+  return chunk;
+}
+
+inline void wspace_put_pfc(Wspace *wspace, Chunk_Header *chunk)
+{
+  unsigned int size = chunk->slot_size;
+  assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
+  assert(chunk->base && chunk->alloc_num);
+  assert(chunk->alloc_num < chunk->slot_num);
+  assert(chunk->slot_index < chunk->slot_num);
+  
+  Size_Segment **size_segs = wspace->size_segments;
+  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size > size_segs[i]->size_max) continue;
+    assert(!(size & size_segs[i]->gran_low_mask));
+    assert(size > size_segs[i]->size_min);
+    unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
+    Pool *pfc_pool;
+    pfc_pool = wspace->pfc_pools[i][index];
+    pool_put_entry(pfc_pool, chunk);
+    return;
+  }
+}
+
+inline void wspace_put_pfc_backup(Wspace *wspace, Chunk_Header *chunk)
+{
+  unsigned int size = chunk->slot_size;
+  assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
+  assert(chunk->base && chunk->alloc_num);
+  assert(chunk->alloc_num < chunk->slot_num);
+  assert(chunk->slot_index < chunk->slot_num);
+  
+  Size_Segment **size_segs = wspace->size_segments;
+  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size > size_segs[i]->size_max) continue;
+    assert(!(size & size_segs[i]->gran_low_mask));
+    assert(size > size_segs[i]->size_min);
+    unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
+    //FIXME: concurrent sweep
+    Pool *pfc_pool;
+    pfc_pool = wspace->pfc_pools_backup[i][index];
+    pool_put_entry(pfc_pool, chunk);
+    return;
+  }
+}
+
+
+/*Function for concurrent sweeping. In concurrent sweeping, PFC chunks are put back to pfc_pools_backup
+    instead of pfc_pools. */
+inline void wspace_backup_pfc(Wspace *wspace, Chunk_Header *chunk)
+{
+  unsigned int size = chunk->slot_size;
+  assert(chunk->base && chunk->alloc_num);
+  assert(chunk && (size <= SUPER_OBJ_THRESHOLD));
+  assert(chunk->slot_index < chunk->slot_num);
+  
+  Size_Segment **size_segs = wspace->size_segments;
+  chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size > size_segs[i]->size_max) continue;
+    assert(!(size & size_segs[i]->gran_low_mask));
+    assert(size > size_segs[i]->size_min);
+    unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]);
+    Pool *pfc_pool = wspace->pfc_pools_backup[i][index];
+    pool_put_entry(pfc_pool, chunk);
+    return;
+  }
+}
+
+inline void wspace_rebuild_chunk_chain(Wspace *wspace)
+{
+  Chunk_Header_Basic *wspace_ceiling = (Chunk_Header_Basic*)space_heap_end((Space*)wspace);
+  Chunk_Header_Basic *prev_chunk = (Chunk_Header_Basic*)space_heap_start((Space*)wspace);
+  Chunk_Header_Basic *chunk = prev_chunk->adj_next;
+  prev_chunk->adj_prev = NULL;
+  
+  while(chunk < wspace_ceiling){
+    chunk->adj_prev = prev_chunk;
+    prev_chunk = chunk;
+    chunk = chunk->adj_next;
+  }
+}
+
+inline Chunk_Header_Basic* chunk_pool_get_chunk(Pool* chunk_pool)
+{
+  return (Chunk_Header_Basic*)pool_get_entry(chunk_pool);
+}
+
+inline void chunk_pool_put_chunk(Pool* chunk_pool, Chunk_Header* chunk)
+{
+  pool_put_entry(chunk_pool,chunk);
+  return;
+}
+
+inline void wspace_register_used_chunk(Wspace* wspace, Chunk_Header* chunk)
+{
+  pool_put_entry(wspace->used_chunk_pool, chunk);
+  return;
+}
+
+inline void wspace_clear_used_chunk_pool(Wspace* wspace)
+{
+  pool_empty(wspace->used_chunk_pool);
+  return;
+}
+
+inline void wspace_register_unreusable_normal_chunk(Wspace* wspace, Chunk_Header* chunk)
+{
+  pool_put_entry(wspace->unreusable_normal_chunk_pool, chunk);
+  return;
+}
+
+inline Chunk_Header*  wspace_get_unreusable_normal_chunk(Wspace* wspace)
+{
+  return (Chunk_Header*)pool_get_entry(wspace->unreusable_normal_chunk_pool);
+}
+
+inline void wspace_register_live_abnormal_chunk(Wspace* wspace, Chunk_Header* chunk)
+{
+  pool_put_entry(wspace->live_abnormal_chunk_pool, chunk);
+  return;
+}
+
+inline Chunk_Header* wspace_get_live_abnormal_chunk(Wspace* wspace)
+{
+  return (Chunk_Header*)pool_get_entry(wspace->live_abnormal_chunk_pool);
+}
+
+extern void wspace_init_chunks(Wspace *wspace);
+extern void wspace_clear_chunk_list(Wspace* wspace);
+
+extern void wspace_put_free_chunk(Wspace *wspace, Free_Chunk *chunk);
+void wspace_put_free_chunk_to_head(Wspace *wspace, Free_Chunk *chunk);
+void wspace_put_free_chunk_to_tail(Wspace *wspace, Free_Chunk *chunk);
+
+extern Free_Chunk *wspace_get_normal_free_chunk(Wspace *wspace);
+extern Free_Chunk *wspace_get_abnormal_free_chunk(Wspace *wspace, unsigned int chunk_size);
+extern Free_Chunk *wspace_get_hyper_free_chunk(Wspace *wspace, unsigned int chunk_size, Boolean is_normal_chunk);
+
+extern void wspace_init_pfc_pool_iterator(Wspace *wspace);
+extern Pool *wspace_grab_next_pfc_pool(Wspace *wspace);
+void wspace_exchange_pfc_pool(Wspace *wspace);
+
+extern Chunk_Header *wspace_steal_pfc(Wspace *wspace, unsigned int index);
+
+extern POINTER_SIZE_INT free_mem_in_wspace(Wspace *wspace, Boolean show_chunk_info);
+
+extern void zeroing_free_chunk(Free_Chunk *chunk);
+
+extern void gc_clear_collector_local_chunks(GC *gc);
+
+extern void wspace_collect_free_chunks_to_list(Wspace *wspace, Free_Chunk_List *list);
+
+#endif //#ifndef _SSPACE_CHUNK_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,237 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_chunk.h"
+#include "wspace_alloc.h"
+#include "wspace_mark_sweep.h"
+#include "wspace_verify.h"
+
+
+#define PFC_SORT_NUM  8
+
+void wspace_decide_compaction_need(Wspace *wspace)
+{
+  POINTER_SIZE_INT free_mem_size = free_mem_in_wspace(wspace, FALSE);
+  float free_mem_ratio = (float)free_mem_size / wspace->committed_heap_size;
+
+#ifdef USE_MARK_SWEEP_GC
+  if(!gc_mark_is_concurrent() && (free_mem_ratio > SSPACE_COMPACT_RATIO) && (wspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
+#else
+  if(gc_match_kind(wspace->gc, MAJOR_COLLECTION)){
+#endif
+    wspace->need_compact = wspace->move_object = TRUE;
+  } else {
+    wspace->need_compact = wspace->move_object = FALSE;
+  }
+}
+
+static inline void sorted_chunk_bucket_add_entry(Chunk_Header **head, Chunk_Header **tail, Chunk_Header *chunk)
+{
+  chunk->prev = NULL; /* Field adj_prev is used as prev */
+  
+  if(!*head){
+    assert(!*tail);
+    chunk->next = NULL;
+    *head = *tail = chunk;
+    return;
+  }
+  
+  assert(*tail);
+  chunk->next = *head;
+  (*head)->prev = chunk;
+  *head = chunk;
+}
+
+/* One assumption: pfc_pool is not empty */
+static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  Chunk_Header *bucket_head[PFC_SORT_NUM];  /* Sorted chunk buckets' heads */
+  Chunk_Header *bucket_tail[PFC_SORT_NUM];  /* Sorted chunk buckets' tails */
+  unsigned int slot_num;
+  unsigned int chunk_num = 0;
+  unsigned int slot_alloc_num = 0;
+  
+  /* Init buckets' heads and tails */
+  memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
+  memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
+  
+  /* Roughly sort chunks in pfc_pool */
+  pool_iterator_init(pfc_pool);
+  Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+  if(chunk) slot_num = chunk->slot_num;
+  while(chunk){
+    ++chunk_num;
+    assert(chunk->alloc_num);
+    slot_alloc_num += chunk->alloc_num;
+    Chunk_Header *next_chunk = chunk->next;
+    unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num;
+    assert(bucket_index < PFC_SORT_NUM);
+    sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk);
+    chunk = next_chunk;
+  }
+  
+  /* Empty the pfc pool because some chunks in this pool will be free after compaction */
+  pool_empty(pfc_pool);
+  
+  /* If we can't get a free chunk after compaction, there is no need to compact.
+   * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact
+   */
+  if(slot_num*(chunk_num-1) <= slot_alloc_num){
+    for(unsigned int i = 0; i < PFC_SORT_NUM; i++){
+      Chunk_Header *chunk = bucket_head[i];
+      while(chunk){
+        Chunk_Header *next_chunk = chunk->next;
+        pool_put_entry(pfc_pool, chunk);
+        chunk = next_chunk;
+      }
+    }
+    return FALSE;
+  }
+  
+  /* Link the sorted chunk buckets into one single ordered bidirectional list */
+  Chunk_Header *head = NULL;
+  Chunk_Header *tail = NULL;
+  for(unsigned int i = PFC_SORT_NUM; i--;){
+    assert((head && tail) || (!head && !tail));
+    assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i]));
+    if(!bucket_head[i]) continue;
+    if(!tail){
+      head = bucket_head[i];
+      tail = bucket_tail[i];
+    } else {
+      tail->next = bucket_head[i];
+      bucket_head[i]->prev = tail;
+      tail = bucket_tail[i];
+    }
+  }
+  
+  assert(head && tail);
+  *least_free_chunk = head;
+  *most_free_chunk = tail;
+  
+  return TRUE;
+}
+
+static inline Chunk_Header *get_least_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  if(!*least_free_chunk){
+    assert(!*most_free_chunk);
+    return NULL;
+  }
+  Chunk_Header *result = *least_free_chunk;
+  *least_free_chunk = (*least_free_chunk)->next;
+  if(*least_free_chunk)
+    (*least_free_chunk)->prev = NULL;
+  else
+    *most_free_chunk = NULL;
+  return result;
+}
+static inline Chunk_Header *get_most_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  if(!*most_free_chunk){
+    assert(!*least_free_chunk);
+    return NULL;
+  }
+  Chunk_Header *result = *most_free_chunk;
+  *most_free_chunk = (*most_free_chunk)->prev;
+  if(*most_free_chunk)
+    (*most_free_chunk)->next = NULL;
+  else
+    *least_free_chunk = NULL;
+  assert(!result->next);
+  return result;
+}
+
+static inline void move_obj_between_chunks(Chunk_Header **dest_ptr, Chunk_Header *src)
+{
+  Chunk_Header *dest = *dest_ptr;
+  assert(dest->slot_size == src->slot_size);
+  
+  unsigned int slot_size = dest->slot_size;
+  unsigned int alloc_num = src->alloc_num;
+  assert(alloc_num);
+  
+  while(alloc_num && dest){
+    Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src);
+    void *target = alloc_in_chunk(dest);
+
+    if(dest->slot_index == MAX_SLOT_INDEX){
+      dest->status = CHUNK_USED | CHUNK_NORMAL;
+      dest = NULL;
+    }
+    
+    assert(p_obj && target);
+    memcpy(target, p_obj, slot_size);
+#ifdef SSPACE_VERIFY
+    wspace_modify_mark_in_compact(target, p_obj, slot_size);
+#endif
+    obj_set_fw_in_oi(p_obj, target);
+    --alloc_num;
+  }
+  
+  /* dest might be set to NULL, so we use *dest_ptr here */
+  assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num);
+  src->alloc_num = alloc_num;
+  if(!dest){
+    assert((*dest_ptr)->alloc_num == (*dest_ptr)->slot_num);
+    *dest_ptr = NULL;
+    clear_free_slot_in_table(src->table, src->slot_index);
+  }
+}
+
+void wspace_compact(Collector *collector, Wspace *wspace)
+{
+  Chunk_Header *least_free_chunk, *most_free_chunk;
+  Pool *pfc_pool = wspace_grab_next_pfc_pool(wspace);
+  
+  for(; pfc_pool; pfc_pool = wspace_grab_next_pfc_pool(wspace)){
+    if(pool_is_empty(pfc_pool)) continue;
+    Boolean pfc_pool_need_compact = pfc_pool_roughly_sort(pfc_pool, &least_free_chunk, &most_free_chunk);
+    if(!pfc_pool_need_compact) continue;
+    
+    Chunk_Header *dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
+    Chunk_Header *src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
+    Boolean src_is_new = TRUE;
+    while(dest && src){
+      if(src_is_new)
+        src->slot_index = 0;
+      chunk_depad_last_index_word(src);
+      move_obj_between_chunks(&dest, src);
+      if(!dest)
+        dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
+      if(!src->alloc_num){
+        collector_add_free_chunk(collector, (Free_Chunk*)src);
+        src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
+        src_is_new = TRUE;
+      } else {
+        src_is_new = FALSE;
+      }
+    }
+    
+    /* Rebuild the pfc_pool */
+    if(dest)
+      wspace_put_pfc(wspace, dest);
+    if(src){
+      //chunk_pad_last_index_word(src, cur_alloc_mask);
+      pfc_reset_slot_index(src);
+      wspace_put_pfc(wspace, src);
+    }
+  }
+}
+
+
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,201 @@
+#ifdef WSPACE_CONCURRENT_GC_STATS
+
+#include "wspace_alloc.h"
+#include "wspace_mark_sweep.h"
+#include "wspace_verify.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+#include "../thread/collector.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../common/fix_repointed_refs.h"
+#include "../common/gc_concurrent.h"
+
+
+
+static unsigned int total_object_num = 0;
+static unsigned int total_live_obj_num = 0;
+static unsigned int total_float_obj_num = 0;
+static unsigned int total_live_new_obj_num = 0;
+static unsigned int total_float_new_obj_num = 0;
+
+static Chunk_Header_Basic *volatile next_chunk_for_scanning;
+
+
+static void wspace_init_chunk_for_scanning(Wspace *wspace)
+{
+  next_chunk_for_scanning = (Chunk_Header_Basic*)space_heap_start((Space*)wspace);
+}
+
+
+static void normal_chunk_scanning(Chunk_Header *chunk)
+{
+  chunk->slot_index = 0;
+  chunk_depad_last_index_word(chunk);
+  
+  unsigned int alloc_num = chunk->alloc_num;
+  assert(alloc_num);
+  
+  if(alloc_num == chunk->slot_num){  /* Filled with objects */
+    unsigned int slot_size = chunk->slot_size;
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0);
+    for(unsigned int i = alloc_num; i--;){
+      total_object_num ++;
+      if(obj_is_marked_in_vt(p_obj)){
+        total_live_obj_num ++;
+        if(p_obj->obj_info & NEW_OBJ_MASK){
+          p_obj->obj_info &= ~NEW_OBJ_MASK;
+          total_live_new_obj_num ++;
+        }
+      }else{
+        total_float_obj_num ++;        
+        if(p_obj->obj_info & NEW_OBJ_MASK){
+          p_obj->obj_info &= ~NEW_OBJ_MASK;
+          total_float_new_obj_num ++;
+        }
+      }
+      p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + slot_size);
+    }
+  } else {  /* Chunk is not full */
+    while(alloc_num){
+      Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk);
+
+      total_object_num ++;
+      if(obj_is_marked_in_vt(p_obj)){
+        total_live_obj_num ++;
+        if(p_obj->obj_info & NEW_OBJ_MASK){
+          p_obj->obj_info &= ~NEW_OBJ_MASK;
+          total_live_new_obj_num ++;
+        }
+      }else{
+        total_float_obj_num ++;        
+        if(p_obj->obj_info & NEW_OBJ_MASK){
+          p_obj->obj_info &= ~NEW_OBJ_MASK;
+          total_float_new_obj_num ++;
+        }
+      }
+      
+      assert(p_obj);
+      --alloc_num;
+    }
+  }
+  
+  if(chunk->alloc_num != chunk->slot_num){
+    //chunk_pad_last_index_word(chunk, cur_alloc_mask);
+    pfc_reset_slot_index(chunk);
+  }
+
+}
+
+static void abnormal_chunk_scanning(Chunk_Header *chunk)
+{
+  Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)chunk->base;
+  total_object_num ++;
+  if(obj_is_marked_in_vt(p_obj)){
+    total_live_obj_num ++;
+    if(p_obj->obj_info & NEW_OBJ_MASK){
+      p_obj->obj_info &= ~NEW_OBJ_MASK;
+      total_live_new_obj_num ++;
+    }
+  }else{
+    total_float_obj_num ++;        
+    if(p_obj->obj_info & NEW_OBJ_MASK){
+      p_obj->obj_info &= ~NEW_OBJ_MASK;
+      total_float_new_obj_num ++;
+    }
+  }
+}
+
+void wspace_scan_heap(GC* gc)
+{
+  Wspace *wspace = gc_get_wspace(gc);
+  total_object_num = 0;
+  total_live_obj_num = 0;
+  total_float_obj_num = 0;
+  total_live_new_obj_num = 0;
+  total_float_new_obj_num = 0;
+
+  wspace_init_chunk_for_scanning(wspace);
+
+  Chunk_Header_Basic *chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_scanning, FALSE);
+  
+  while(chunk){
+    if(chunk->status & CHUNK_NORMAL)
+      normal_chunk_scanning((Chunk_Header*)chunk);
+    else if(chunk->status & CHUNK_ABNORMAL)
+      abnormal_chunk_scanning((Chunk_Header*)chunk);
+    
+    chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_scanning, FALSE);
+  }
+
+  printf("total_object_num: %d \n", total_object_num);
+  printf("total_live_obj_num: %d \n", total_live_obj_num);
+  printf("total_float_obj_num: %d \n", total_float_obj_num);
+  printf("total_live_new_obj_num: %d \n", total_live_new_obj_num);
+  printf("total_float_new_obj_num: %d \n", total_float_new_obj_num);
+}
+
+
+
+static void normal_chunk_clear(Chunk_Header *chunk)
+{
+  chunk->slot_index = 0;
+  chunk_depad_last_index_word(chunk);
+  
+  unsigned int alloc_num = chunk->alloc_num;
+  assert(alloc_num);
+  
+  if(alloc_num == chunk->slot_num){  /* Filled with objects */
+    unsigned int slot_size = chunk->slot_size;
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0);
+    for(unsigned int i = alloc_num; i--;){
+        if(p_obj->obj_info & NEW_OBJ_MASK){
+          p_obj->obj_info &= ~NEW_OBJ_MASK;
+        }
+    }
+  } else {  /* Chunk is not full */
+    while(alloc_num){
+      Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk);
+      if(p_obj->obj_info & NEW_OBJ_MASK){
+        p_obj->obj_info &= ~NEW_OBJ_MASK;
+        total_live_new_obj_num ++;
+      }      
+      assert(p_obj);
+      --alloc_num;
+    }
+  }
+  
+  if(chunk->alloc_num != chunk->slot_num){
+    //chunk_pad_last_index_word(chunk, cur_alloc_mask);
+    pfc_reset_slot_index(chunk);
+  }
+
+}
+
+static void abnormal_chunk_clear(Chunk_Header *chunk)
+{
+  Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)chunk->base;
+    if(p_obj->obj_info & NEW_OBJ_MASK){
+      p_obj->obj_info &= ~NEW_OBJ_MASK;
+    }
+}
+
+void wspace_clear_heap(GC* gc)
+{
+  Wspace *wspace = gc_get_wspace(gc);
+
+  wspace_init_chunk_for_scanning(wspace);
+
+  Chunk_Header_Basic *chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_scanning, FALSE);
+  
+  while(chunk){
+    if(chunk->status & CHUNK_NORMAL)
+      normal_chunk_clear((Chunk_Header*)chunk);
+    else if(chunk->status & CHUNK_ABNORMAL)
+      abnormal_chunk_clear((Chunk_Header*)chunk);
+    
+    chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_scanning, FALSE);
+  }
+
+}
+#endif
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,216 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static Wspace *wspace_in_fallback_marking;
+
+
+static FORCE_INLINE Boolean obj_mark_black(Partial_Reveal_Object *obj)
+{
+  if(obj_belongs_to_space(obj, (Space*)wspace_in_fallback_marking)){
+    Boolean marked_by_self = obj_mark_black_in_table(obj);
+
+#ifndef USE_MARK_SWEEP_GC
+    /* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
+     * If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
+     */
+    if(marked_by_self){
+      Obj_Info_Type oi = obj->obj_info;
+      Obj_Info_Type new_oi = oi & DUAL_MARKBITS_MASK;
+      while(new_oi != oi){
+        Obj_Info_Type temp = atomic_casptrsz((volatile Obj_Info_Type*)get_obj_info_addr(obj), new_oi, oi);
+        if(temp == oi) break;
+        oi = obj->obj_info;
+        new_oi = oi & DUAL_MARKBITS_MASK;
+      }
+    }
+#endif
+    return marked_by_self;
+  } else {
+    return obj_mark_in_vt(obj);
+  }
+}
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
+{
+  if( read_slot(p_ref) == NULL) return;
+  
+  collector_tracestack_push(collector, p_ref);
+}
+
+static FORCE_INLINE void scan_object(Collector *collector, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  assert(p_obj);
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+  
+  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
+    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_obj);
+    write_slot(p_ref, p_obj);
+  }
+  
+  if(!obj_mark_black(p_obj))
+    return;
+  
+  if(!object_has_ref_field(p_obj)) return;
+  
+  if(object_is_array(p_obj)){   /* scan array object */
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    REF *p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for (unsigned int i = 0; i < array_length; i++)
+      scan_slot(collector, p_ref+i);
+    
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+  
+  for(unsigned int i=0; i<num_refs; i++){
+    REF *p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+    scan_slot(collector, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+
+}
+
+static void trace_object(Collector *collector, REF *p_ref)
+{
+  scan_object(collector, p_ref);
+  
+  Vector_Block *trace_stack = collector->trace_stack;
+  while(!vector_stack_is_empty(trace_stack)){
+    p_ref = (REF*)vector_stack_pop(trace_stack);
+    scan_object(collector, p_ref);
+    trace_stack = collector->trace_stack;
+  }
+}
+
+/* NOTE:: This is another marking version: marking in color bitmap table.
+   Originally, we have to mark the object before put it into markstack, to
+   guarantee there is only one occurrance of an object in markstack. This is to
+   guarantee there is only one occurrance of a repointed ref slot in repset (they
+   are put to the set when the object is scanned). If the same object is put to
+   markstack twice, they will be scanned twice and their ref slots will be recorded twice.
+   Problem occurs when the ref slot is updated first time with new position,
+   the second time the value in the ref slot is not the old position as expected.
+   It needs to read the original obj header for forwarding pointer. With the new value,
+   it will read something nonsense since the obj is not moved yet.
+   This can be worked around if we want.
+   To do this we have to use atomic instruction for marking, which is undesirable.
+   So we abondoned this design. We no longer use the repset to remember repointed slots.
+*/
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+void wspace_fallback_mark_scan(Collector *collector, Wspace *wspace)
+{
+  GC *gc = collector->gc;
+  GC_Metadata *metadata = gc->metadata;
+  wspace_in_fallback_marking = wspace;
+  
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32(&num_finished_collectors, 0, num_active_collectors);
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  /* first step: copy all root objects to mark tasks.
+     FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF*)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(read_slot(p_ref) != NULL);
+      /* we have to mark the object before putting it into marktask, because
+         it is possible to have two slots containing a same object. They will
+         be scanned twice and their ref slots will be recorded twice. Problem
+         occurs after the ref slot is updated first time with new position
+         and the second time the value is the ref slot is the old position as expected.
+         This can be worked around if we want.
+      */
+      collector_tracestack_push(collector, p_ref);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task, iter)){
+      REF *p_ref = (REF*)*iter;
+      iter = vector_block_iterator_advance(mark_task, iter);
+      
+      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
+         degenerate my stack into mark_task, and grab another mark_task */
+      trace_object(collector, p_ref);
+    }
+   /* run out one task, put back to the pool and grab another task */
+   vector_stack_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: We can simply spin waiting for num_finished_collectors, because each
+     generated new task would surely be processed by its generating collector eventually.
+     So code below is only for load balance optimization. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if(!pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;
+    }
+  }
+  
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void trace_obj_in_ms_fallback_marking(Collector *collector, void *p_ref)
+{
+  trace_object(collector, (REF*)p_ref);
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,245 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static Wspace *wspace_in_marking;
+static FORCE_INLINE Boolean obj_mark_gray(Partial_Reveal_Object *obj)
+{
+  if(obj_belongs_to_space(obj, (Space*)wspace_in_marking))
+    return obj_mark_gray_in_table(obj);
+  else
+    return obj_mark_in_vt(obj);
+}
+
+static FORCE_INLINE Boolean obj_mark_black(Partial_Reveal_Object *obj)
+{
+  if(obj_belongs_to_space(obj, (Space*)wspace_in_marking)){
+    Boolean marked_by_self = obj_mark_black_in_table(obj);
+
+#ifndef USE_MARK_SWEEP_GC
+    /* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
+     * If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
+     */
+    if(marked_by_self){
+      Obj_Info_Type oi = obj->obj_info;
+      Obj_Info_Type new_oi = oi & DUAL_MARKBITS_MASK;
+      while(new_oi != oi){
+        Obj_Info_Type temp = atomic_casptrsz((volatile Obj_Info_Type*)get_obj_info_addr(obj), new_oi, oi);
+        if(temp == oi) break;
+        oi = obj->obj_info;
+        new_oi = oi & DUAL_MARKBITS_MASK;
+      }
+    }
+#endif
+    return marked_by_self;
+  } else {
+    return obj_mark_in_vt(obj);
+  }
+}
+
+
+/* The caller must be in places where alloc color and mark color haven't been flipped */
+Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj)
+{
+  unsigned int index_in_word;
+  volatile POINTER_SIZE_INT *p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT color_word = *p_color_word;
+  POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word;
+  
+  return (Boolean)(color_word & mark_color);
+}
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  assert(address_belongs_to_gc_heap(p_obj, collector->gc));
+  if(obj_mark_gray(p_obj)){
+    assert(p_obj);
+    collector_tracestack_push(collector, p_obj);
+  }
+}
+
+static FORCE_INLINE void scan_object(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+  
+  Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
+  if(TRACE_JLC_VIA_VTABLE)
+    if(vtable->vtmark == VT_UNMARKED) {
+      vtable->vtmark = VT_MARKED;
+      if(obj_mark_black(vtable->jlC))
+        collector_tracestack_push(collector, vtable->jlC);
+    }
+  
+  if(!object_has_ref_field(p_obj)) return;
+  
+  REF *p_ref;
+  
+  if(object_is_array(p_obj)){   /* scan array object */
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for (unsigned int i = 0; i < array_length; i++)
+      scan_slot(collector, p_ref+i);
+    
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+  
+  for(unsigned int i=0; i<num_refs; i++){
+    p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+    scan_slot(collector, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+
+}
+
+static void trace_object(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+  scan_object(collector, p_obj);
+  obj_mark_black(p_obj);
+  
+  Vector_Block *trace_stack = collector->trace_stack;
+  while(!vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
+    scan_object(collector, p_obj);
+    obj_mark_black(p_obj);
+    trace_stack = collector->trace_stack;
+  }
+}
+
+/* NOTE:: This is another marking version: marking in color bitmap table.
+   Originally, we have to mark the object before put it into markstack, to
+   guarantee there is only one occurrance of an object in markstack. This is to
+   guarantee there is only one occurrance of a repointed ref slot in repset (they
+   are put to the set when the object is scanned). If the same object is put to
+   markstack twice, they will be scanned twice and their ref slots will be recorded twice.
+   Problem occurs when the ref slot is updated first time with new position,
+   the second time the value in the ref slot is not the old position as expected.
+   It needs to read the original obj header for forwarding pointer. With the new value,
+   it will read something nonsense since the obj is not moved yet.
+   This can be worked around if we want.
+   To do this we have to use atomic instruction for marking, which is undesirable.
+   So we abondoned this design. We no longer use the repset to remember repointed slots.
+*/
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+void wspace_mark_scan(Collector *collector, Wspace *wspace)
+{
+  GC *gc = collector->gc;
+  GC_Metadata *metadata = gc->metadata;
+  wspace_in_marking = wspace;
+  
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32(&num_finished_collectors, 0, num_active_collectors);
+  
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  /* first step: copy all root objects to mark tasks.
+     FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF*)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj != NULL);
+      /* we have to mark the object before putting it into marktask, because
+         it is possible to have two slots containing a same object. They will
+         be scanned twice and their ref slots will be recorded twice. Problem
+         occurs after the ref slot is updated first time with new position
+         and the second time the value is the ref slot is the old position as expected.
+         This can be worked around if we want.
+      */
+      assert(address_belongs_to_gc_heap(p_obj, gc));
+      if(obj_mark_gray(p_obj))
+        collector_tracestack_push(collector, p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task, iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task, iter);
+      
+      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
+         degenerate my stack into mark_task, and grab another mark_task */
+      trace_object(collector, p_obj);
+    }
+   /* run out one task, put back to the pool and grab another task */
+   vector_stack_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: We can simply spin waiting for num_finished_collectors, because each
+     generated new task would surely be processed by its generating collector eventually.
+     So code below is only for load balance optimization. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if(!pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;
+    }
+  }
+  
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void trace_obj_in_ms_marking(Collector *collector, void *p_obj)
+{
+  obj_mark_gray((Partial_Reveal_Object*)p_obj);
+  trace_object(collector, (Partial_Reveal_Object*)p_obj);
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,226 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../thread/marker.h"
+
+volatile Boolean need_terminate_concurrent_mark;
+
+Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
+
+static FORCE_INLINE void scan_slot(Collector* marker, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  assert(address_belongs_to_gc_heap(p_obj, marker->gc));
+  if(obj_mark_gray_in_table(p_obj)){
+    assert(p_obj);
+    collector_tracestack_push((Collector*)marker, p_obj);
+  }  
+}
+
+static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj)
+{
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+  if(obj_is_dirty_in_table(p_obj)){ 
+    return;
+  }
+
+  if(!object_has_ref_field(p_obj)) return;
+
+  REF *p_ref;
+  
+  if(object_is_array(p_obj)){   /* scan array object */
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for (unsigned int i = 0; i < array_length; i++)
+      scan_slot((Collector*)marker, p_ref+i);
+    
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+  
+  for(unsigned int i=0; i<num_refs; i++){
+    p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+    scan_slot((Collector*)marker, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference((Collector*)marker, p_obj, scan_slot);
+#endif
+
+}
+
+static void trace_object(Marker* marker, Partial_Reveal_Object *p_obj)
+{
+  scan_object(marker, p_obj);
+  obj_mark_black_in_table(p_obj);
+  
+  Vector_Block *trace_stack = marker->trace_stack;
+  while(!vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
+    scan_object(marker, p_obj);
+    obj_mark_black_in_table(p_obj);    
+    trace_stack = marker->trace_stack;
+  }
+}
+
+/* for marking phase termination detection */
+void wspace_mark_scan_mostly_concurrent_reset()
+{ need_terminate_concurrent_mark = FALSE; }
+
+void wspace_mark_scan_mostly_concurrent_terminate()
+{ need_terminate_concurrent_mark = TRUE; }
+
+static Boolean concurrent_mark_need_terminating(GC* gc)
+{
+  if(need_terminate_concurrent_mark) return TRUE;
+    
+  GC_Metadata *metadata = gc->metadata;
+  return pool_is_empty(metadata->gc_dirty_set_pool);
+}
+
+static volatile unsigned int num_active_markers = 0;
+
+void wspace_mark_scan_mostly_concurrent(Marker* marker)
+{
+  int64 time_mark_start = time_now();
+  GC *gc = marker->gc;
+  GC_Metadata *metadata = gc->metadata;
+  
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int current_thread_id = atomic_inc32(&num_active_markers);
+
+  unsigned int num_dirtyset_slot = 0;  
+  
+  marker->trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  /* first step: copy all root objects to mark tasks.*/
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj!=NULL);
+      assert(address_belongs_to_gc_heap(p_obj, gc));
+      if(obj_mark_gray_in_table(p_obj))
+        collector_tracestack_push((Collector*)marker, p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
+  
+  marker_notify_mark_root_done(marker);
+
+  marker->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+
+  /*second step: mark dirty pool*/
+
+  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+
+  while(dirty_set){
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
+    while(!vector_block_iterator_end(dirty_set,iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      iter = vector_block_iterator_advance(dirty_set,iter);
+
+      assert(p_obj!=NULL); //FIXME: restrict condition?
+      
+      obj_clear_dirty_in_table(p_obj);
+      obj_clear_mark_in_table(p_obj);
+
+      if(obj_mark_gray_in_table(p_obj))
+        collector_tracestack_push((Collector*)marker, p_obj);
+
+      num_dirtyset_slot ++;
+    } 
+    vector_block_clear(dirty_set);
+    pool_put_entry(metadata->free_set_pool, dirty_set);
+    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+  }
+
+   /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  
+
+  /* third step: iterate over the mark tasks and scan objects */
+   marker->trace_stack = free_task_pool_get_entry(metadata);
+
+  
+  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+      trace_object(marker, p_obj);      
+    }
+    /* run out one task, put back to the pool and grab another task */
+    vector_stack_clear(mark_task);
+    pool_put_entry(metadata->free_task_pool, mark_task);
+    mark_task = pool_get_entry(metadata->mark_task_pool);
+  }
+
+  if(current_thread_id == 0){
+    gc_prepare_dirty_set(marker->gc);
+  }
+
+  /* conditions to terminate mark: 
+           1.All thread finished current job.
+           2.Flag is set to terminate concurrent mark.
+    */
+  atomic_dec32(&num_active_markers);
+  while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){
+    if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
+      atomic_inc32(&num_active_markers);
+      goto retry;
+    }
+  }
+  
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)marker->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+  marker->trace_stack = NULL;
+  
+  int64 time_mark = time_now() - time_mark_start;
+  marker->time_mark += time_mark;
+  marker->num_dirty_slots_traced += num_dirtyset_slot;
+  return;
+}
+
+void trace_obj_in_ms_mostly_concurrent_mark(Collector *collector, void *p_obj)
+{
+  obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
+  trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,226 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+#include "wspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../thread/marker.h"
+
+Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
+
+static FORCE_INLINE void scan_slot(Collector* marker, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  assert(address_belongs_to_gc_heap(p_obj, marker->gc));
+  if(obj_mark_gray_in_table(p_obj)){
+    assert(p_obj);
+    collector_tracestack_push((Collector*)marker, p_obj);
+  }  
+}
+
+static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj)
+{
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+
+  if(obj_is_dirty_in_table(p_obj)){ 
+    assert(obj_is_mark_black_in_table(p_obj));
+    return;
+  }
+
+  if(!object_has_ref_field(p_obj)) return;
+
+  REF *p_ref;
+  
+  if(object_is_array(p_obj)){   /* scan array object */
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for (unsigned int i = 0; i < array_length; i++)
+      scan_slot((Collector*)marker, p_ref+i);
+    
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+  
+  for(unsigned int i=0; i<num_refs; i++){
+    p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+    scan_slot((Collector*)marker, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference_direct((Collector*)marker, p_obj, scan_slot);
+#endif
+
+}
+
+static void trace_object(Marker* marker, Partial_Reveal_Object *p_obj)
+{
+  scan_object(marker, p_obj);
+  obj_mark_black_in_table(p_obj);
+  
+  Vector_Block *trace_stack = marker->trace_stack;
+  while(!vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
+    scan_object(marker, p_obj);    
+    obj_mark_black_in_table(p_obj);
+    trace_stack = marker->trace_stack;
+  }
+}
+
+static Boolean concurrent_mark_need_terminating(GC* gc)
+{
+  GC_Metadata *metadata = gc->metadata;
+  return gc_local_dirtyset_is_empty(gc) && pool_is_empty(metadata->gc_dirty_set_pool);
+}
+
+/* for marking phase termination detection */
+static volatile unsigned int num_active_markers = 0;
+
+void wspace_mark_scan_concurrent(Marker* marker)
+{
+  int64 time_mark_start = time_now();
+  GC *gc = marker->gc;
+  GC_Metadata *metadata = gc->metadata;
+  
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  atomic_inc32(&num_active_markers);
+  
+  marker->trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  
+  /* first step: copy all root objects to mark tasks.*/
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj!=NULL);
+      assert(address_belongs_to_gc_heap(p_obj, gc));
+      if(obj_mark_gray_in_table(p_obj))
+        collector_tracestack_push((Collector*)marker, p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
+  
+  marker_notify_mark_root_done(marker);
+
+  marker->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  
+  /*second step: mark dirty object snapshot pool*/
+  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+
+  while(dirty_set){
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
+    while(!vector_block_iterator_end(dirty_set,iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+      iter = vector_block_iterator_advance(dirty_set,iter);
+
+      assert(p_obj!=NULL); //FIXME: restrict?
+      if(obj_mark_gray_in_table(p_obj))
+        collector_tracestack_push((Collector*)marker, p_obj);
+    } 
+    vector_block_clear(dirty_set);
+    pool_put_entry(metadata->free_set_pool, dirty_set);
+    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+  }
+
+    /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  
+
+  /* third step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  marker->trace_stack = free_task_pool_get_entry(metadata);
+
+  
+  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+      trace_object(marker, p_obj);      
+    }
+    /* run out one task, put back to the pool and grab another task */
+    vector_stack_clear(mark_task);
+    pool_put_entry(metadata->free_task_pool, mark_task);
+    mark_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* termination condition:  
+           1.all thread finished current job.
+           2.local snapshot vectors are empty.
+           3.global snapshot pool is empty.
+    */
+  atomic_dec32(&num_active_markers);
+  while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){
+    if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
+      atomic_inc32(&num_active_markers);
+      goto retry; 
+    }else{
+      /*grab a block from mutator and begin tracing*/
+      POINTER_SIZE_INT thread_num = (POINTER_SIZE_INT)marker->thread_handle;
+      Vector_Block* local_dirty_set = gc_get_local_dirty_set(gc, (unsigned int)(thread_num + 1));      
+      /*1.  If local_dirty_set has been set full bit, the block is full and will no longer be put into global snapshot pool; 
+                  so it should be checked again to see if there're remaining entries unscanned in it. In this case, the 
+                  share bit in local_dirty_set should not be cleared, beacause of rescanning exclusively. 
+             2.  If local_dirty_set has not been set full bit, the block is used by mutator and has the chance to be put into
+                  global snapshot pool. In this case, we simply clear the share bit in local_dirty_set. 
+           */
+      if(local_dirty_set != NULL){
+        atomic_inc32(&num_active_markers);
+        while(!vector_block_is_empty(local_dirty_set) || !vector_block_not_full_set_unshared(local_dirty_set)){
+          Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_dirty_set);        
+          if(obj_mark_gray_in_table(p_obj)) 
+            collector_tracestack_push((Collector*)marker, p_obj);
+        }
+        goto retry;
+      }
+    }
+  }
+  
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)marker->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);
+  marker->trace_stack = NULL;
+  assert(pool_is_empty(metadata->gc_dirty_set_pool));
+
+  int64 time_mark = time_now() - time_mark_start;
+  marker->time_mark = time_mark;
+  
+  return;
+}
+
+void trace_obj_in_ms_concurrent_mark(Collector *collector, void *p_obj)
+{
+  obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
+  trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,380 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_alloc.h"
+#include "wspace_mark_sweep.h"
+#include "wspace_verify.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+#include "../thread/collector.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../common/fix_repointed_refs.h"
+#include "../common/gc_concurrent.h"
+
+POINTER_SIZE_INT cur_alloc_mask = (~MARK_MASK_IN_TABLE) & FLIP_COLOR_MASK_IN_TABLE;
+POINTER_SIZE_INT cur_mark_mask = MARK_MASK_IN_TABLE;
+POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE;
+POINTER_SIZE_INT cur_mark_gray_color = OBJ_COLOR_GRAY;
+POINTER_SIZE_INT cur_mark_black_color = OBJ_COLOR_BLACK;
+
+static Chunk_Header_Basic *volatile next_chunk_for_fixing;
+
+
+/******************** General interfaces for Mark-Sweep-Compact ***********************/
+void gc_init_collector_free_chunk_list(Collector *collector)
+{
+  Free_Chunk_List *list = (Free_Chunk_List*)STD_MALLOC(sizeof(Free_Chunk_List));
+  free_chunk_list_init(list);
+  collector->free_chunk_list = list;
+}
+
+/* Argument need_construct stands for whether or not the dual-directon list needs constructing */
+Chunk_Header_Basic *wspace_grab_next_chunk(Wspace *wspace, Chunk_Header_Basic *volatile *shared_next_chunk, Boolean need_construct)
+{
+  Chunk_Header_Basic *cur_chunk = *shared_next_chunk;
+  
+  Chunk_Header_Basic *wspace_ceiling = (Chunk_Header_Basic*)space_heap_end((Space*)wspace);
+  while(cur_chunk < wspace_ceiling){
+    Chunk_Header_Basic *next_chunk = CHUNK_END(cur_chunk);
+    
+    Chunk_Header_Basic *temp = (Chunk_Header_Basic*)atomic_casptr((volatile void**)shared_next_chunk, next_chunk, cur_chunk);
+    if(temp == cur_chunk){
+      if(need_construct && next_chunk < wspace_ceiling)
+        next_chunk->adj_prev = cur_chunk;
+      return cur_chunk;
+    }
+    cur_chunk = *shared_next_chunk;
+  }
+  
+  return NULL;
+}
+
+
+/******************** Interfaces for Forwarding ***********************/
+
+static void nos_init_block_for_forwarding(GC_Gen *gc_gen)
+{ blocked_space_block_iterator_init((Blocked_Space*)gc_get_nos(gc_gen)); }
+
+static inline void block_forward_live_objects(Collector *collector, Wspace *wspace, Block_Header *cur_block)
+{
+  Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)cur_block->base;
+  Partial_Reveal_Object *block_end = (Partial_Reveal_Object*)cur_block->free;
+  
+  for(; p_obj < block_end; p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + vm_object_size(p_obj))){
+    if(!obj_is_marked_in_vt(p_obj)) continue;
+    
+    obj_clear_dual_bits_in_vt(p_obj);
+    Partial_Reveal_Object *p_target_obj = collector_forward_object(collector, p_obj); /* Could be implemented with a optimized function */
+    if(!p_target_obj){
+      assert(collector->result == FALSE);
+      printf("Out of mem in forwarding nos!\n");
+      exit(0);
+    }
+  }
+}
+
+static void collector_forward_nos_to_wspace(Collector *collector, Wspace *wspace)
+{
+  Blocked_Space *nos = (Blocked_Space*)gc_get_nos((GC_Gen*)collector->gc);
+  Block_Header *cur_block = blocked_space_block_iterator_next(nos);
+  
+  /* We must iterate over all nos blocks to forward live objects in them */
+  while(cur_block){
+    block_forward_live_objects(collector, wspace, cur_block);
+    cur_block = blocked_space_block_iterator_next(nos);
+  }
+}
+
+
+/******************** Interfaces for Ref Fixing ***********************/
+
+static void wspace_init_chunk_for_ref_fixing(Wspace *wspace)
+{
+  next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)wspace);
+  next_chunk_for_fixing->adj_prev = NULL;
+}
+
+static inline void slot_double_fix(REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if(!p_obj) return;
+  
+  if(obj_is_fw_in_oi(p_obj)){
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_obj);
+    if(obj_is_fw_in_oi(p_obj)){
+      p_obj = obj_get_fw_in_oi(p_obj);
+      assert(p_obj);
+    }
+    write_slot(p_ref, p_obj);
+  }
+}
+
+static inline void object_double_fix_ref_slots(Partial_Reveal_Object *p_obj)
+{
+  if(!object_has_ref_field(p_obj)) return;
+  
+  /* scan array object */
+  if(object_is_array(p_obj)){
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    assert(!obj_is_primitive_array(p_obj));
+    
+    int32 array_length = array->array_len;
+    REF *p_refs = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    for(int i = 0; i < array_length; i++){
+      slot_double_fix(p_refs + i);
+    }
+    return;
+  }
+  
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);    
+  int* ref_iterator = object_ref_iterator_init(p_obj);
+    
+  for(unsigned int i=0; i<num_refs; i++){  
+    REF * p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
+    slot_double_fix(p_ref);
+  }    
+}
+
+static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk, Boolean double_fix)
+{
+  /* Init field slot_index and depad the last index word in table for fixing */
+  chunk->slot_index = 0;
+  chunk_depad_last_index_word(chunk);
+  
+  unsigned int alloc_num = chunk->alloc_num;
+  assert(alloc_num);
+  
+  /* After compaction, many chunks are filled with objects.
+   * For these chunks, we needn't find the allocated slot one by one by calling next_alloc_slot_in_chunk.
+   * That is a little time consuming.
+   * We'd like to fix those objects by incrementing their addr to find the next.
+   */
+  if(alloc_num == chunk->slot_num){  /* Filled with objects */
+    unsigned int slot_size = chunk->slot_size;
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0);
+    for(unsigned int i = alloc_num; i--;){
+      if(double_fix)
+        object_double_fix_ref_slots(p_obj);
+      else
+        object_fix_ref_slots(p_obj);
+#ifdef SSPACE_VERIFY
+      wspace_verify_fix_in_compact();
+#endif
+      p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + slot_size);
+    }
+  } else {  /* Chunk is not full */
+    while(alloc_num){
+      Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk);
+      assert(p_obj);
+      if(double_fix)
+        object_double_fix_ref_slots(p_obj);
+      else
+        object_fix_ref_slots(p_obj);
+#ifdef SSPACE_VERIFY
+      wspace_verify_fix_in_compact();
+#endif
+      --alloc_num;
+    }
+  }
+  
+  if(chunk->alloc_num != chunk->slot_num){
+    //chunk_pad_last_index_word(chunk, cur_alloc_mask);
+    pfc_reset_slot_index(chunk);
+  }
+}
+
+static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk, Boolean double_fix)
+{
+  if(double_fix)
+    object_double_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
+  else
+    object_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
+#ifdef SSPACE_VERIFY
+  wspace_verify_fix_in_compact();
+#endif
+}
+
+static void wspace_fix_repointed_refs(Collector *collector, Wspace *wspace, Boolean double_fix)
+{
+  Chunk_Header_Basic *chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_fixing, TRUE);
+  
+  while(chunk){
+    if(chunk->status & CHUNK_NORMAL)
+      normal_chunk_fix_repointed_refs((Chunk_Header*)chunk, double_fix);
+    else if(chunk->status & CHUNK_ABNORMAL)
+      abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk, double_fix);
+    
+    chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_fixing, TRUE);
+  }
+}
+
+
+/******************** Main body of Mark-Sweep-Compact ***********************/
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_sweeping_collectors = 0;
+static volatile unsigned int num_compacting_collectors = 0;
+static volatile unsigned int num_forwarding_collectors = 0;
+static volatile unsigned int num_fixing_collectors = 0;
+
+void mark_sweep_wspace(Collector *collector)
+{
+  GC *gc = collector->gc;
+  Wspace *wspace = gc_get_wspace(gc);
+  Space *nos = NULL;
+  if(gc_match_kind(gc, MAJOR_COLLECTION))
+    nos = gc_get_nos((GC_Gen*)gc);
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: **************************************************
+     Mark all live objects in heap ****************************/
+  atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
+
+  if(!gc_mark_is_concurrent()){  
+    if(gc_match_kind(gc, FALLBACK_COLLECTION))
+      wspace_fallback_mark_scan(collector, wspace);
+    else
+      wspace_mark_scan(collector, wspace);
+  }
+  
+  unsigned int old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+#ifdef SSPACE_TIME
+    wspace_mark_time(FALSE);
+#endif
+    if(!IGNORE_FINREF )
+      collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+    else {
+      gc_set_weakref_sets(gc);
+      gc_update_weakref_ignore_finref(gc);
+    }
+#endif
+    gc_identify_dead_weak_roots(gc);
+    gc_init_chunk_for_sweep(gc, wspace);
+    /* let other collectors go */
+    num_marking_collectors++;
+  }
+  while(num_marking_collectors != num_active_collectors + 1);
+  
+  /* Pass 2: **************************************************
+     Sweep dead objects ***************************************/
+  atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors+1);
+  
+  wspace_sweep(collector, wspace);
+  
+  old_num = atomic_inc32(&num_sweeping_collectors);
+  if( ++old_num == num_active_collectors ){
+#ifdef SSPACE_TIME
+    wspace_sweep_time(FALSE, wspace->need_compact);
+#endif
+    ops_color_flip();
+#ifdef SSPACE_VERIFY
+    wspace_verify_after_sweep(gc);
+#endif
+
+    if(gc_match_kind(gc, MAJOR_COLLECTION)){
+      wspace_merge_free_chunks(gc, wspace);
+      nos_init_block_for_forwarding((GC_Gen*)gc);
+    }
+    if(wspace->need_compact)
+      wspace_init_pfc_pool_iterator(wspace);
+    if(wspace->need_fix)
+      wspace_init_chunk_for_ref_fixing(wspace);
+    /* let other collectors go */
+    num_sweeping_collectors++;
+  }
+  while(num_sweeping_collectors != num_active_collectors + 1);
+  
+  /* Optional Pass: *******************************************
+     Forward live obj in nos to mos (wspace) ******************/
+  if(gc_match_kind(gc, MAJOR_COLLECTION)){
+    atomic_cas32( &num_forwarding_collectors, 0, num_active_collectors+1);
+    
+    collector_forward_nos_to_wspace(collector, wspace);
+    
+    old_num = atomic_inc32(&num_forwarding_collectors);
+    if( ++old_num == num_active_collectors ){
+      gc_clear_collector_local_chunks(gc);
+      num_forwarding_collectors++;
+    }
+    
+    while(num_forwarding_collectors != num_active_collectors + 1);
+  }
+  
+  /* Optional Pass: *******************************************
+     Compact pfcs with the same size **************************/
+  if(wspace->need_compact){
+    atomic_cas32(&num_compacting_collectors, 0, num_active_collectors+1);
+    
+    wspace_compact(collector, wspace);
+    
+    /* If we need forward nos to mos, i.e. in major collection, an extra fixing phase after compaction is needed. */
+    old_num = atomic_inc32(&num_compacting_collectors);
+    if( ++old_num == num_active_collectors ){
+      if(gc_match_kind(gc, MAJOR_COLLECTION))
+        wspace_remerge_free_chunks(gc, wspace);
+      /* let other collectors go */
+      num_compacting_collectors++;
+    }
+    while(num_compacting_collectors != num_active_collectors + 1);
+  }
+  
+  /* Optional Pass: *******************************************
+     Fix repointed refs ***************************************/
+  if(wspace->need_fix){
+    atomic_cas32( &num_fixing_collectors, 0, num_active_collectors);
+    
+    /* When we forwarded nos AND compacted wspace,
+     * we need double fix object slots,
+     * because some objects are forwarded from nos to mos and compacted into another chunk afterwards.
+     */
+    Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && wspace->need_compact;
+    wspace_fix_repointed_refs(collector, wspace, double_fix);
+    
+    atomic_inc32(&num_fixing_collectors);
+    while(num_fixing_collectors != num_active_collectors);
+  }
+  
+  if( collector->thread_handle != 0 )
+    return;
+  
+  /* Leftover: *************************************************/
+  
+  if(wspace->need_fix){
+    Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && wspace->need_compact;
+    gc_fix_rootset(collector, double_fix);
+#ifdef SSPACE_TIME
+    wspace_fix_time(FALSE);
+#endif
+  }
+  
+  if(!gc_match_kind(gc, MAJOR_COLLECTION))
+    wspace_merge_free_chunks(gc, wspace);
+
+#ifdef USE_MARK_SWEEP_GC
+  wspace_set_space_statistic(wspace);
+#endif 
+
+#ifdef SSPACE_VERIFY
+  wspace_verify_after_collection(gc);
+#endif
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
------------------------------------------------------------------------------
    svn:eol-style = native



Mime
View raw message