harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x..@apache.org
Subject svn commit: r606876 [5/6] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ jni/ los/ mark_compact/ mark_sweep/ semi_space/ thread/ trace_forward/ utils/ verify/
Date Wed, 26 Dec 2007 10:17:15 GMT
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,453 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_MARK_SWEEP_H_
+#define _SSPACE_MARK_SWEEP_H_
+
+#include "wspace_chunk.h"
+#include "wspace_verify.h"
+
+#define PFC_REUSABLE_RATIO 0.1
+#define SSPACE_COMPACT_RATIO 0.06
+
+inline Boolean chunk_is_reusable(Chunk_Header *chunk)
+{ return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; }
+
+#define OBJ_ALLOC_BIT_IN_TABLE  0x01
+#define OBJ_BLACK_BIT_IN_TABLE  0x02
+#define OBJ_GRAY_BIT_IN_TABLE   0x04
+#define OBJ_COLOR_BIT_IN_TABLE  0x06
+#define OBJ_DIRTY_BIT_IN_TABLE  0x08
+
+enum Obj_Color {
+  OBJ_COLOR_BLUE =  0x0,
+  OBJ_COLOR_WHITE = OBJ_ALLOC_BIT_IN_TABLE,
+  OBJ_COLOR_GRAY =  OBJ_GRAY_BIT_IN_TABLE,
+  OBJ_COLOR_BLACK = OBJ_BLACK_BIT_IN_TABLE,
+  OBJ_COLOR_MASK =  OBJ_COLOR_BIT_IN_TABLE
+};
+
+#ifdef POINTER64
+  //#define BLACK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0xAAAAAAAAAAAAAAAA)
+  #define MARK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0x2222222222222222)
+  #define FLIP_COLOR_MASK_IN_TABLE  ((POINTER_SIZE_INT)0x3333333333333333)
+  //#define DIRY_MASK_IN_TABLE   ((POINTER_SIZE_INT)0x4444444444444444)
+#else
+  #define MARK_MASK_IN_TABLE  ((POINTER_SIZE_INT)0x22222222)
+  #define FLIP_COLOR_MASK_IN_TABLE  ((POINTER_SIZE_INT)0x33333333)
+  //#define DIRY_MASK_IN_TABLE   ((POINTER_SIZE_INT)0x44444444)
+#endif
+
+extern POINTER_SIZE_INT cur_alloc_color;
+extern POINTER_SIZE_INT cur_mark_gray_color;
+extern POINTER_SIZE_INT cur_mark_black_color;
+extern POINTER_SIZE_INT cur_alloc_mask;
+extern POINTER_SIZE_INT cur_mark_mask;
+
+inline Boolean is_super_obj(Partial_Reveal_Object *obj)
+{
+  //return get_obj_info_raw(obj) & SUPER_OBJ_MASK;/*
+  if(vm_object_size(obj) > SUPER_OBJ_THRESHOLD){
+    return TRUE;
+  } else {
+    return FALSE;
+  }
+}
+
+FORCE_INLINE POINTER_SIZE_INT *get_color_word_in_table(Partial_Reveal_Object *obj, unsigned int &index_in_word)
+{
+  Chunk_Header *chunk;
+  unsigned int index;
+  
+  if(is_super_obj(obj)){
+    chunk = ABNORMAL_CHUNK_HEADER(obj);
+    index = 0;
+  } else {
+    chunk = NORMAL_CHUNK_HEADER(obj);
+    index = slot_addr_to_index(chunk, obj);
+  }
+  unsigned int word_index = index / SLOT_NUM_PER_WORD_IN_TABLE;
+  index_in_word = COLOR_BITS_PER_OBJ * (index % SLOT_NUM_PER_WORD_IN_TABLE);
+  //unsigned int word_index = index >> 3;
+  //index_in_word = COLOR_BITS_PER_OBJ * (index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+  
+  return &chunk->table[word_index];
+}
+FORCE_INLINE POINTER_SIZE_INT *get_color_word_in_table(Partial_Reveal_Object *obj, unsigned int &index_in_word, unsigned int size)
+{
+  Chunk_Header *chunk;
+  unsigned int index;
+  
+  if(size > SUPER_OBJ_THRESHOLD){
+    chunk = ABNORMAL_CHUNK_HEADER(obj);
+    index = 0;
+  } else {
+    chunk = NORMAL_CHUNK_HEADER(obj);
+    index = slot_addr_to_index(chunk, obj);
+  }
+  unsigned int word_index = index >> 3;
+  index_in_word = COLOR_BITS_PER_OBJ * (index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+  
+  return &chunk->table[word_index];
+}
+
+#if 0
+/* Accurate marking: TRUE stands for being marked by this collector, and FALSE for another collector */
+inline Boolean obj_mark_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;
+  POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+#ifdef SSPACE_VERIFY
+#ifndef SSPACE_VERIFY_FINREF
+      assert(obj_is_marked_in_vt(obj));
+#endif
+      obj_unmark_in_vt(obj);
+      wspace_record_mark(obj, vm_object_size(obj));
+#endif
+      return TRUE;
+    }
+    old_word = *p_color_word;
+    new_word = (old_word & color_bits_mask) | mark_color;
+  }
+  
+  return FALSE;
+}
+
+#endif
+
+FORCE_INLINE Boolean obj_is_mark_gray_in_table(Partial_Reveal_Object *obj)
+{
+  POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  POINTER_SIZE_INT current_word = *p_color_word;
+  POINTER_SIZE_INT mark_gray_color = cur_mark_gray_color << index_in_word;
+
+  if(current_word & mark_gray_color)
+    return TRUE;
+  else
+    return FALSE;
+}
+
+FORCE_INLINE Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj)
+{
+  POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  POINTER_SIZE_INT current_word = *p_color_word;
+  POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+  
+  if(current_word & mark_black_color)
+    return TRUE;
+  else
+    return FALSE;
+  
+}
+
+FORCE_INLINE Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size)
+{
+  POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word, size);
+  POINTER_SIZE_INT current_word = *p_color_word;
+  POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+  
+  if(current_word & mark_black_color)
+    return TRUE;
+  else
+    return FALSE;
+  
+}
+
+
+FORCE_INLINE Boolean obj_mark_gray_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;  
+  if(old_word & mark_color) return FALSE; /*already marked gray or black.*/
+  
+  //POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
+  POINTER_SIZE_INT new_word = old_word | mark_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    if(old_word & mark_color) return FALSE; /*already marked gray or black.*/
+    
+    //new_word = (old_word & color_bits_mask) | mark_color;
+    new_word = old_word | mark_color;
+  }
+  
+  return FALSE;
+}
+
+FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size)
+{
+  //assert(obj_is_mark_in_table(obj));
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word, size);
+  assert(p_color_word);
+  
+  //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;  
+  if(old_word & mark_black_color) return FALSE; /*already marked black*/
+  
+  POINTER_SIZE_INT new_word = old_word | mark_black_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    if(old_word & mark_black_color) return FALSE; /*already marked black*/
+    
+    new_word = old_word | mark_black_color;
+  }
+  
+  return FALSE;
+
+}
+
+FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj)
+{
+ // assert(obj_is_mark_in_table(obj));
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;
+  if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/
+  
+  POINTER_SIZE_INT new_word = old_word | mark_black_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/
+    
+    new_word = old_word | mark_black_color;
+  }
+  
+  return FALSE;
+}
+
+FORCE_INLINE Boolean obj_dirty_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT obj_dirty_bit_in_word = OBJ_DIRTY_BIT_IN_TABLE<< index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;
+  if(old_word & obj_dirty_bit_in_word) return FALSE; 
+  
+  POINTER_SIZE_INT new_word = old_word | obj_dirty_bit_in_word;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    if(old_word & obj_dirty_bit_in_word) return FALSE; 
+    
+    new_word = old_word | obj_dirty_bit_in_word;
+  }
+  
+  return FALSE;
+}
+
+FORCE_INLINE Boolean obj_is_dirty_in_table(Partial_Reveal_Object *obj)
+{
+  POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  POINTER_SIZE_INT current_word = *p_color_word;
+  POINTER_SIZE_INT obj_dirty_bit_in_word = OBJ_DIRTY_BIT_IN_TABLE<< index_in_word;
+  
+
+  if(current_word & obj_dirty_bit_in_word)
+    return TRUE;
+  else
+    return FALSE;
+}
+
+FORCE_INLINE Boolean obj_clear_mark_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_color = (cur_mark_black_color|cur_mark_gray_color) << index_in_word;
+  POINTER_SIZE_INT clear_mask = ~mark_color;
+
+  POINTER_SIZE_INT old_word = *p_color_word;  
+  
+  POINTER_SIZE_INT new_word = old_word & clear_mask;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    //if(old_word & clear_mask) return FALSE; /*already marked black*/
+    
+    new_word = old_word & clear_mask;
+  }
+  
+  return FALSE;
+
+}
+
+FORCE_INLINE Boolean obj_clear_dirty_in_table(Partial_Reveal_Object *obj)
+{
+  volatile POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  assert(p_color_word);
+  
+  //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+  POINTER_SIZE_INT mark_color = OBJ_DIRTY_BIT_IN_TABLE << index_in_word;
+  POINTER_SIZE_INT clear_mask = ~mark_color;
+
+  POINTER_SIZE_INT old_word = *p_color_word;  
+  
+  POINTER_SIZE_INT new_word = old_word & clear_mask;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return TRUE; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    //if(old_word & clear_mask) return FALSE; /*already marked black*/
+    
+    new_word = old_word & clear_mask;
+  }
+  
+  return FALSE;
+
+}
+
+FORCE_INLINE Boolean obj_is_alloc_in_color_table(Partial_Reveal_Object *obj)
+{
+  POINTER_SIZE_INT *p_color_word;
+  unsigned int index_in_word;
+  p_color_word = get_color_word_in_table(obj, index_in_word);
+  POINTER_SIZE_INT current_word = *p_color_word;
+  POINTER_SIZE_INT obj_alloc_color_bit_in_word = cur_alloc_color << index_in_word;
+  
+  return (Boolean)(current_word & obj_alloc_color_bit_in_word);
+}
+
+FORCE_INLINE Boolean obj_need_take_snapshot(Partial_Reveal_Object *obj)
+{
+  return !obj_is_mark_black_in_table(obj) && !obj_is_dirty_in_table(obj); 
+}
+
+FORCE_INLINE Boolean obj_need_remember(Partial_Reveal_Object *obj)
+{
+  return (obj_is_mark_gray_in_table(obj) || obj_is_mark_black_in_table(obj)) && !obj_is_dirty_in_table(obj); 
+}
+
+FORCE_INLINE Boolean obj_need_remember_oldvar(Partial_Reveal_Object *obj)
+{
+  return !obj_is_mark_gray_in_table(obj) && !obj_is_mark_black_in_table(obj); 
+}
+
+inline void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
+{
+  Free_Chunk_List *list = collector->free_chunk_list;
+  
+  chunk->status = CHUNK_FREE | CHUNK_TO_MERGE;
+  chunk->next = list->head;
+  chunk->prev = NULL;
+  if(list->head)
+    list->head->prev = chunk;
+  else
+    list->tail = chunk;
+  list->head = chunk;
+}
+
+
+inline unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+{
+  unsigned int count = 0;
+  
+  while(word){
+    word &= word - 1;
+    ++count;
+  }
+  return count;
+}
+
+inline void ops_color_flip(void)
+{
+  POINTER_SIZE_INT temp = cur_alloc_color;
+  cur_alloc_color = cur_mark_black_color;
+  //FIXME: Need barrier here.
+  //apr_memory_rw_barrier();
+  cur_mark_black_color = temp;
+  cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE;
+  cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE;
+  //printf("color flip\n");
+}
+
+extern void wspace_mark_scan(Collector *collector, Wspace *wspace);
+extern void wspace_fallback_mark_scan(Collector *collector, Wspace *wspace);
+extern void gc_init_chunk_for_sweep(GC *gc, Wspace *wspace);
+extern void wspace_sweep(Collector *collector, Wspace *wspace);
+extern void wspace_compact(Collector *collector, Wspace *wspace);
+extern void wspace_merge_free_chunks(GC *gc, Wspace *wspace);
+extern void wspace_remerge_free_chunks(GC *gc, Wspace *wspace);
+extern Chunk_Header_Basic *wspace_grab_next_chunk(Wspace *wspace, Chunk_Header_Basic *volatile *shared_next_chunk, Boolean need_construct);
+
+extern void pfc_set_slot_index(Chunk_Header *chunk, unsigned int first_free_word_index, POINTER_SIZE_INT alloc_color);
+extern void pfc_reset_slot_index(Chunk_Header *chunk);
+
+#endif // _SSPACE_MARK_SWEEP_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,270 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_chunk.h"
+#include "wspace_mark_sweep.h"
+
+
+static Chunk_Header_Basic *volatile next_chunk_for_sweep;
+
+
+void gc_init_chunk_for_sweep(GC *gc, Wspace *wspace)
+{
+  next_chunk_for_sweep = (Chunk_Header_Basic*)space_heap_start((Space*)wspace);
+  next_chunk_for_sweep->adj_prev = NULL;
+  
+  unsigned int i = gc->num_collectors;
+  while(i--){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    assert(!list->head);
+    assert(!list->tail);
+    assert(list->lock == FREE_LOCK);
+  }
+}
+
+void zeroing_free_chunk(Free_Chunk *chunk)
+{
+  //Modified this assertion for concurrent sweep
+  //assert(chunk->status == CHUNK_FREE);
+  assert(chunk->status & CHUNK_FREE);
+  
+  void *start = (void*)((POINTER_SIZE_INT)chunk + sizeof(Free_Chunk));
+  POINTER_SIZE_INT size = CHUNK_SIZE(chunk) - sizeof(Free_Chunk);
+  memset(start, 0, size);
+}
+
+/* Zeroing should be optimized to do it while sweeping index word */
+static void zeroing_free_areas_in_pfc(Chunk_Header *chunk, unsigned int live_num)
+{
+  assert(live_num);
+  
+  assert(chunk->status & CHUNK_NORMAL);
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int slot_size = chunk->slot_size;
+  POINTER_SIZE_INT chunk_base = (POINTER_SIZE_INT)chunk->base;
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  POINTER_SIZE_INT base = (POINTER_SIZE_INT)NULL;
+  assert(slot_num >= live_num);
+  unsigned int free_slot_num = slot_num - live_num;
+  unsigned int cur_free_slot_num = 0;
+  unsigned int slot_index = chunk->slot_index;
+  unsigned int word_index = slot_index / SLOT_NUM_PER_WORD_IN_TABLE;
+  assert(live_num >= slot_index);
+  live_num -= slot_index;
+  POINTER_SIZE_INT index_word = table[word_index];
+  POINTER_SIZE_INT mark_color = cur_mark_black_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE));
+  for(; slot_index < slot_num; ++slot_index){
+    assert(!(index_word & ~cur_mark_mask));
+    if(index_word & mark_color){
+      if(cur_free_slot_num){
+        memset((void*)base, 0, slot_size*cur_free_slot_num);
+        assert(free_slot_num >= cur_free_slot_num);
+        free_slot_num -= cur_free_slot_num;
+        cur_free_slot_num = 0;
+        if(!free_slot_num) break;
+      }
+      assert(live_num);
+      --live_num;
+    } else {
+      if(cur_free_slot_num){
+        ++cur_free_slot_num;
+      } else {
+        base = chunk_base + slot_size * slot_index;
+        cur_free_slot_num = 1;
+        if(!live_num) break;
+      }
+    }
+    mark_color <<= COLOR_BITS_PER_OBJ;
+    if(!mark_color){
+      mark_color = cur_mark_black_color;
+      ++word_index;
+      index_word = table[word_index];
+      while(index_word == cur_mark_mask && cur_free_slot_num == 0 && slot_index < slot_num){
+        slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+        ++word_index;
+        index_word = table[word_index];
+        assert(live_num >= SLOT_NUM_PER_WORD_IN_TABLE);
+        live_num -= SLOT_NUM_PER_WORD_IN_TABLE;
+      }
+      while(index_word == 0 && cur_free_slot_num > 0 && slot_index < slot_num){
+        slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+        ++word_index;
+        index_word = table[word_index];
+        cur_free_slot_num += SLOT_NUM_PER_WORD_IN_TABLE;
+      }
+    }
+  }
+  assert((cur_free_slot_num>0 && live_num==0) || (cur_free_slot_num==0 && live_num>0));
+  if(cur_free_slot_num)
+    memset((void*)base, 0, slot_size*free_slot_num);
+}
+
+static void collector_sweep_normal_chunk(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+{
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int live_num = 0;
+  unsigned int first_free_word_index = MAX_SLOT_INDEX;
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    table[i] &= cur_mark_mask;
+    unsigned int live_num_in_word = (table[i] == cur_mark_mask) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
+    live_num += live_num_in_word;
+    if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
+      first_free_word_index = i;
+      pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_mark_black_color);
+    }
+  }
+  assert(live_num <= slot_num);
+  //chunk->alloc_num = live_num;
+  collector->live_obj_size += live_num * chunk->slot_size;
+  collector->live_obj_num += live_num;
+
+  if(!live_num){  /* all objects in this chunk are dead */
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+  } else if(chunk_is_reusable(chunk)){  /* most objects in this chunk are swept, add chunk to pfc list*/
+    chunk->alloc_num = live_num;
+    //chunk_pad_last_index_word((Chunk_Header*)chunk, cur_mark_mask);
+    wspace_put_pfc(wspace, chunk);
+    assert(chunk->next != chunk);
+  }else{  /* the rest: chunks with free rate < PFC_REUSABLE_RATIO. we don't use them */
+    chunk->alloc_num = live_num;    
+    chunk->status = CHUNK_USED | CHUNK_NORMAL;
+    wspace_register_used_chunk(wspace,chunk);
+  }
+}
+
+static inline void collector_sweep_abnormal_chunk(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+{
+  assert(chunk->status & CHUNK_ABNORMAL);
+  POINTER_SIZE_INT *table = chunk->table;
+  table[0] &= cur_mark_mask;
+  if(!table[0]){
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+  }
+  else {
+    chunk->status = CHUNK_ABNORMAL| CHUNK_USED;
+    wspace_register_used_chunk(wspace,chunk);
+    collector->live_obj_size += CHUNK_SIZE(chunk);
+    collector->live_obj_num++;
+  }
+}
+
+void wspace_sweep(Collector *collector, Wspace *wspace)
+{
+  Chunk_Header_Basic *chunk;
+  collector->live_obj_size = 0;
+  collector->live_obj_num = 0;
+
+  chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_sweep, TRUE);
+  while(chunk){
+    /* chunk is free before GC */
+    if(chunk->status == CHUNK_FREE){
+      collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+    } else if(chunk->status & CHUNK_NORMAL){   /* chunk is used as a normal sized obj chunk */
+      collector_sweep_normal_chunk(collector, wspace, (Chunk_Header*)chunk);
+    } else {  /* chunk is used as a super obj chunk */
+      collector_sweep_abnormal_chunk(collector, wspace, (Chunk_Header*)chunk);
+    }
+    
+    chunk = wspace_grab_next_chunk(wspace, &next_chunk_for_sweep, TRUE);
+  }
+}
+
+/************ For merging free chunks in wspace ************/
+
+static void merge_free_chunks_in_list(Wspace *wspace, Free_Chunk_List *list)
+{
+  Free_Chunk *wspace_ceiling = (Free_Chunk*)space_heap_end((Space*)wspace);
+  Free_Chunk *chunk = list->head;
+  
+  while(chunk){
+    assert(chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE));
+    /* Remove current chunk from the chunk list */
+    list->head = chunk->next;
+    if(list->head)
+      list->head->prev = NULL;
+    /* Check if the prev adjacent chunks are free */
+    Free_Chunk *prev_chunk = (Free_Chunk*)chunk->adj_prev;
+    while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){
+      assert(prev_chunk < chunk);
+      /* Remove prev_chunk from list */
+      free_list_detach_chunk(list, prev_chunk);
+      prev_chunk->adj_next = chunk->adj_next;
+      chunk = prev_chunk;
+      prev_chunk = (Free_Chunk*)chunk->adj_prev;
+    }
+    /* Check if the back adjcent chunks are free */
+    Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
+    while(back_chunk < wspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){
+      assert(chunk < back_chunk);
+      /* Remove back_chunk from list */
+      free_list_detach_chunk(list, back_chunk);
+      back_chunk = (Free_Chunk*)back_chunk->adj_next;
+      chunk->adj_next = (Chunk_Header_Basic*)back_chunk;
+    }
+    if(back_chunk < wspace_ceiling)
+      back_chunk->adj_prev = (Chunk_Header_Basic*)chunk;
+    
+    /* put the free chunk to the according free chunk list */
+    wspace_put_free_chunk(wspace, chunk);
+    
+    chunk = list->head;
+  }
+}
+
+void wspace_merge_free_chunks(GC *gc, Wspace *wspace)
+{
+  Free_Chunk_List free_chunk_list;
+  free_chunk_list.head = NULL;
+  free_chunk_list.tail = NULL;
+  
+  /* Collect free chunks from collectors to one list */
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    move_free_chunks_between_lists(&free_chunk_list, list);
+  }
+  
+  merge_free_chunks_in_list(wspace, &free_chunk_list);
+}
+
+void wspace_remerge_free_chunks(GC *gc, Wspace *wspace)
+{
+  Free_Chunk_List free_chunk_list;
+  free_chunk_list.head = NULL;
+  free_chunk_list.tail = NULL;
+  
+  /* If a new chunk is partitioned from a bigger one in the forwarding phase,
+   * its adj_prev has not been set yet.
+   * And the adj_prev field of the chunk next to it will be wrong either.
+   * So a rebuilding operation is needed here.
+   */
+  wspace_rebuild_chunk_chain(wspace);
+  
+  /* Collect free chunks from wspace free chunk lists to one list */
+  wspace_collect_free_chunks_to_list(wspace, &free_chunk_list);
+  
+  /* Collect free chunks from collectors to one list */
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    move_free_chunks_between_lists(&free_chunk_list, list);
+  }
+  
+  merge_free_chunks_in_list(wspace, &free_chunk_list);
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,382 @@
+#include "wspace.h"
+#include "wspace_chunk.h"
+#include "wspace_mark_sweep.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+
+static void collector_sweep_normal_chunk_concurrent(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+{
+  unsigned int slot_num = chunk->slot_num;
+  unsigned int live_num = 0;
+  unsigned int first_free_word_index = MAX_SLOT_INDEX;
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    table[i] &= cur_alloc_mask;
+    unsigned int live_num_in_word = (table[i] == cur_alloc_mask) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
+    live_num += live_num_in_word;
+    if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
+      first_free_word_index = i;
+      pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_alloc_color);
+    }
+  }
+  assert(live_num <= slot_num);
+  collector->live_obj_size += live_num * chunk->slot_size;
+  collector->live_obj_num += live_num;
+
+  if(!live_num){  /* all objects in this chunk are dead */
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+   } else if(!chunk_is_reusable(chunk)){  /* most objects in this chunk are swept, add chunk to pfc list*/
+    chunk->alloc_num = live_num;
+    wspace_register_unreusable_normal_chunk(wspace, chunk);
+   } else {  /* most objects in this chunk are swept, add chunk to pfc list*/
+    chunk->alloc_num = live_num;
+    wspace_put_pfc_backup(wspace, chunk);
+  }
+}
+
+static inline void collector_sweep_abnormal_chunk_concurrent(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+{
+  assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED));
+  POINTER_SIZE_INT *table = chunk->table;
+  table[0] &= cur_alloc_mask;
+  if(!table[0]){    
+    collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+  }
+  else {
+    wspace_register_live_abnormal_chunk(wspace, chunk);
+    collector->live_obj_size += CHUNK_SIZE(chunk);
+    collector->live_obj_num++;
+  }
+}
+
+static void wspace_sweep_chunk_concurrent(Wspace* wspace, Collector* collector, Chunk_Header_Basic* chunk)
+{  
+  if(chunk->status & CHUNK_NORMAL){   /* chunk is used as a normal sized obj chunk */
+    assert(chunk->status == (CHUNK_NORMAL | CHUNK_USED));
+    collector_sweep_normal_chunk_concurrent(collector, wspace, (Chunk_Header*)chunk);
+  } else {  /* chunk is used as a super obj chunk */
+    assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED));
+    collector_sweep_abnormal_chunk_concurrent(collector, wspace, (Chunk_Header*)chunk);
+  }
+}
+
+static Free_Chunk_List* wspace_get_free_chunk_list(Wspace* wspace)
+{
+  GC* gc = wspace->gc;
+  Free_Chunk_List* free_chunk_list = (Free_Chunk_List*) STD_MALLOC(sizeof(Free_Chunk_List));
+  assert(free_chunk_list);
+  memset(free_chunk_list, 0, sizeof(Free_Chunk_List));
+  
+  /* Collect free chunks from collectors to one list */
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+    move_free_chunks_between_lists(free_chunk_list, list);
+  }
+  
+  return free_chunk_list;
+}
+
+Boolean wspace_get_free_chunk_concurrent(Wspace *wspace, Free_Chunk* chunk)
+{
+  POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+
+  Free_Chunk_List* free_list = NULL;
+
+  /*Find list*/
+  if(chunk_size > HYPER_OBJ_THRESHOLD)
+    free_list = wspace->hyper_free_chunk_list;
+  else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+    free_list = &wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
+  else
+    free_list = &wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
+
+  /*Lock this free list*/
+  lock(free_list->lock);
+
+  /*Search free list for chunk*/
+  Free_Chunk* chunk_iter = free_list->head;
+  while((POINTER_SIZE_INT)chunk_iter){
+    if((POINTER_SIZE_INT)chunk_iter == (POINTER_SIZE_INT)chunk){
+      /*Find chunk and delete from list.*/     
+      free_list_detach_chunk(free_list, chunk);
+      unlock(free_list->lock);
+      return TRUE;
+    }
+    chunk_iter = chunk_iter->next;
+  }
+  
+  unlock(free_list->lock);
+  
+  return FALSE;
+}
+
+void wspace_merge_adj_free_chunks(Wspace* wspace,Free_Chunk* chunk)
+{
+  Free_Chunk *wspace_ceiling = (Free_Chunk*)space_heap_end((Space*)wspace);
+
+  /* Check if the back adjcent chunks are free */
+  Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
+  while(back_chunk < wspace_ceiling && (back_chunk->status & CHUNK_FREE)){
+    assert(chunk < back_chunk);
+    /* Remove back_chunk from list */
+    if(wspace_get_free_chunk_concurrent(wspace,back_chunk)){
+      back_chunk = (Free_Chunk*)back_chunk->adj_next;
+      chunk->adj_next = (Chunk_Header_Basic*)back_chunk;
+    }else{
+      break;
+    }
+  }
+
+  chunk->status = CHUNK_FREE | CHUNK_MERGED;
+  /* put the free chunk to the according free chunk list */
+  wspace_put_free_chunk_to_tail(wspace, chunk);
+
+}
+
+static void wspace_merge_list_concurrent(Wspace* wspace, Free_Chunk_List* free_list)
+{
+  lock(free_list->lock);
+  Free_Chunk* chunk = free_list->head;
+  
+  while(chunk && !is_free_chunk_merged(chunk)){
+    free_list_detach_chunk(free_list, chunk);
+    unlock(free_list->lock);
+    
+    wspace_merge_adj_free_chunks(wspace, chunk);
+    
+    lock(free_list->lock);
+    chunk = free_list->head;
+  }
+  
+  unlock(free_list->lock);
+}
+
+static void wspace_merge_free_chunks_concurrent(Wspace* wspace, Free_Chunk_List* free_list)
+{
+  Free_Chunk *chunk = free_list->head;
+
+  /*merge free list*/
+  wspace_merge_list_concurrent(wspace, free_list);
+  
+  /*check free pool*/
+  unsigned int i;
+  
+  for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    wspace_merge_list_concurrent(wspace, &wspace->aligned_free_chunk_lists[i]);
+
+  for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    wspace_merge_list_concurrent(wspace, &wspace->unaligned_free_chunk_lists[i]);
+
+  wspace_merge_list_concurrent(wspace, wspace->hyper_free_chunk_list);
+}
+
+static void wspace_reset_free_list_chunks(Wspace* wspace, Free_Chunk_List* free_list)
+{
+  lock(free_list->lock);
+  Free_Chunk* chunk = free_list->head;
+  
+  while(chunk ){
+    assert(chunk->status & CHUNK_FREE);
+    chunk->status = CHUNK_FREE;
+    chunk = chunk->next;
+  }
+  
+  unlock(free_list->lock);
+}
+
+
+static void wspace_reset_free_chunks_status(Wspace* wspace)
+{
+  unsigned int i;
+  
+  for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    wspace_reset_free_list_chunks(wspace, &wspace->aligned_free_chunk_lists[i]);
+
+  for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    wspace_reset_free_list_chunks(wspace, &wspace->unaligned_free_chunk_lists[i]);
+
+  wspace_reset_free_list_chunks(wspace, wspace->hyper_free_chunk_list);
+
+}
+
+static void allocator_sweep_local_chunks(Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  Size_Segment **size_segs = wspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(!size_segs[i]->local_alloc){
+      assert(!local_chunks[i]);
+      continue;
+    }
+    Chunk_Header **chunks = local_chunks[i];
+    assert(chunks);
+    for(unsigned int j = size_segs[i]->chunk_num; j--;){
+      if(chunks[j]){
+        unsigned int slot_num = chunks[j]->slot_num;
+        POINTER_SIZE_INT *table = chunks[j]->table;
+        
+        unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+        for(unsigned int i=0; i<index_word_num; ++i){
+          //atomic sweep.
+          POINTER_SIZE_INT old_word = table[i];
+          POINTER_SIZE_INT new_word = old_word & cur_alloc_mask;
+          while(old_word != new_word){
+            POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**) &table[i],(void*) new_word,(void*) old_word);
+            if(temp == old_word){
+              break;
+            }
+            old_word = table[i];
+            new_word = old_word & cur_alloc_mask;
+          }
+        }
+      }
+    }
+  }
+}
+
+
+static void gc_sweep_mutator_local_chunks(GC *gc)
+{
+#ifdef USE_MARK_SWEEP_GC
+  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+
+  /* release local chunks of each mutator in unique mark-sweep GC */
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    wait_mutator_signal(mutator, DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
+    allocator_sweep_local_chunks((Allocator*)mutator);
+    mutator = mutator->next;
+  }
+
+  unlock(gc->mutator_list_lock);
+#endif
+}
+
+static void gc_check_mutator_local_chunks(GC *gc, unsigned int handshake_signal)
+{
+  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+
+  /* release local chunks of each mutator in unique mark-sweep GC */
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    wait_mutator_signal(mutator, handshake_signal);
+    mutator = mutator->next;
+  }
+
+  unlock(gc->mutator_list_lock);
+}
+
+
+static volatile unsigned int num_sweeping_collectors = 0;
+
+/*Concurrent Sweep:  
+   The mark bit and alloc bit is exchanged before entering this function. 
+   This function is to clear the mark bit and merge the free chunks concurrently.   
+  */
+void wspace_sweep_concurrent(Collector* collector)
+{
+  GC *gc = collector->gc;
+  Wspace *wspace = gc_get_wspace(gc);
+
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  atomic_cas32(&num_sweeping_collectors, 0, num_active_collectors+1);
+
+  Pool* used_chunk_pool = wspace->used_chunk_pool;
+
+  Chunk_Header_Basic* chunk_to_sweep;
+  
+  /*1. Grab chunks from used list, sweep the chunk and push back to PFC backup list & free list.*/
+  chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
+  while(chunk_to_sweep != NULL){
+    wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep);
+    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
+  }
+
+  /*2. Grab chunks from PFC list, sweep the chunk and push back to PFC backup list & free list.*/
+  Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
+  while(pfc_pool != NULL){
+    if(!pool_is_empty(pfc_pool)){
+      /*sweep the chunks in pfc_pool. push back to pfc backup list*/
+      chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
+      while(chunk_to_sweep != NULL){
+        assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+        chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
+        wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep);
+        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
+      }
+    }
+    /*grab more pfc pools*/
+    pfc_pool = wspace_grab_next_pfc_pool(wspace);
+  }
+
+  unsigned int old_num = atomic_inc32(&num_sweeping_collectors);
+  if( ++old_num == num_active_collectors ){    
+    
+    /*3. Check the local chunk of mutator*/
+    gc_sweep_mutator_local_chunks(wspace->gc);
+
+    /*4. Sweep gloabl alloc normal chunks again*/
+    gc_set_sweeping_global_normal_chunk();
+    gc_check_mutator_local_chunks(wspace->gc, DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS);
+    wspace_init_pfc_pool_iterator(wspace);
+    Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
+    while(pfc_pool != NULL){
+      if(!pool_is_empty(pfc_pool)){
+        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
+        while(chunk_to_sweep != NULL){
+          assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+          chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
+          wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep);
+          chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
+        }
+      }
+      /*grab more pfc pools*/
+      pfc_pool = wspace_grab_next_pfc_pool(wspace);
+    }
+    gc_unset_sweeping_global_normal_chunk();
+    
+    /*4. Check the used list again.*/
+    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
+    while(chunk_to_sweep != NULL){
+      wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep);
+      chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
+    }
+
+    /*5. Switch the PFC backup list to PFC list.*/
+    wspace_exchange_pfc_pool(wspace);
+
+    /*6. Put back live abnormal chunk and normal unreusable chunk*/
+    Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
+    while(used_abnormal_chunk){      
+      used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL;
+      wspace_register_used_chunk(wspace,used_abnormal_chunk);
+      used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
+    }
+    pool_empty(wspace->live_abnormal_chunk_pool);
+
+    Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
+    while(unreusable_normal_chunk){  
+      unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL;
+      wspace_register_used_chunk(wspace,unreusable_normal_chunk);
+      unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
+    }
+    pool_empty(wspace->unreusable_normal_chunk_pool);
+    
+    
+    /*7. Merge free chunks*/
+    Free_Chunk_List* free_chunk_list = wspace_get_free_chunk_list(wspace);
+    wspace_merge_free_chunks_concurrent(wspace, free_chunk_list);
+    wspace_reset_free_chunks_status(wspace);
+    
+    /* let other collectors go */
+    num_sweeping_collectors++;
+  }
+  while(num_sweeping_collectors != num_active_collectors + 1);
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,676 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace_verify.h"
+#include "wspace_chunk.h"
+#include "wspace_mark_sweep.h"
+#include "../utils/vector_block.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+#ifdef SSPACE_VERIFY
+
+#define VERIFY_CARD_SIZE_BYTES_SHIFT 12
+#define VERIFY_CARD_SIZE_BYTES (1 << VERIFY_CARD_SIZE_BYTES_SHIFT)
+#define VERIFY_CARD_LOW_MASK (VERIFY_CARD_SIZE_BYTES - 1)
+#define VERIFY_CARD_HIGH_MASK (~VERIFY_CARD_LOW_MASK)
+
+#define VERIFY_MAX_OBJ_SIZE_BYTES (1 << (32-VERIFY_CARD_SIZE_BYTES_SHIFT))
+
+typedef struct Verify_Card {
+  SpinLock lock;
+  Vector_Block *block;
+} Verify_Card;
+
+typedef unsigned int Obj_Addr;
+
+static GC *gc_in_verify = NULL;
+static Verify_Card *alloc_verify_cards = NULL;
+static Verify_Card *mark_verify_cards = NULL;
+static POINTER_SIZE_INT card_num = 0;
+static volatile POINTER_SIZE_INT alloc_obj_num = 0;
+static volatile POINTER_SIZE_INT live_obj_in_mark = 0;
+static volatile POINTER_SIZE_INT live_obj_in_fix = 0;
+
+void wspace_verify_init(GC *gc)
+{
+  gc_in_verify = gc;
+  
+  Wspace *wspace = gc_get_wspace(gc);
+  POINTER_SIZE_INT space_size = space_committed_size((Space*)wspace);
+  card_num = space_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num;
+  
+  alloc_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+  memset(alloc_verify_cards, 0, cards_size);
+  
+  mark_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+  memset(mark_verify_cards, 0, cards_size);
+}
+
+static Obj_Addr compose_obj_addr(unsigned int offset, unsigned int size)
+{
+  assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+  return offset | (size << VERIFY_CARD_SIZE_BYTES_SHIFT);
+}
+
+static void *decompose_obj_addr(Obj_Addr obj_addr, POINTER_SIZE_INT card_index, unsigned int & size)
+{
+  assert(card_index < card_num);
+  POINTER_SIZE_INT card_offset = obj_addr & VERIFY_CARD_LOW_MASK;
+  POINTER_SIZE_INT heap_offset = VERIFY_CARD_SIZE_BYTES * card_index + card_offset;
+  size = (obj_addr & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+  return (void*)(heap_offset + (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+}
+
+static Boolean obj_addr_overlapped(Obj_Addr addr1, Obj_Addr addr2)
+{
+  unsigned int offset1 = addr1 & VERIFY_CARD_LOW_MASK;
+  unsigned int size1 = (addr1 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  unsigned int ceiling1 = offset1 + size1;
+  unsigned int offset2 = addr2 & VERIFY_CARD_LOW_MASK;
+  unsigned int size2 = (addr2 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+  unsigned int ceiling2 = offset2 + size2;
+  
+  unsigned int reason = 0;
+  if(offset1 == offset2)
+    reason = 1;
+  if((offset1 < offset2) && (ceiling1 > offset2))
+    reason = 2;
+  if((offset2 < offset1) && (ceiling2 > offset1))
+    reason = 3;
+  if(!reason)
+    return FALSE;
+  printf("\nreason: %d\nold offset: %x  size: %d\nnew offset: %x size: %d", reason, (POINTER_SIZE_INT)offset1, size1, (POINTER_SIZE_INT)offset2, size2);
+  return TRUE;
+}
+
+static Vector_Block *create_vector_block(unsigned int size)
+{
+  Vector_Block *block = (Vector_Block*)STD_MALLOC(size);
+  vector_block_init(block, size);
+  return block;
+}
+
+static void verify_card_get_block(Verify_Card *card)
+{
+  lock(card->lock);
+  if(card->block){
+    unlock(card->lock);
+    return;
+  }
+  card->block = create_vector_block(VECTOR_BLOCK_DATA_SIZE_BYTES);
+  unlock(card->lock);
+}
+
+void wspace_verify_alloc(void *addr, unsigned int size)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  atomic_inc32(&alloc_obj_num);
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &alloc_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  verify_card_get_block(card);
+  Vector_Block *block = card->block;
+  
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  lock(card->lock);
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    assert(!obj_addr_overlapped(obj_addr, *p_addr));
+    p_addr++;
+  }
+  vector_block_add_entry(block, obj_addr);
+  unlock(card->lock);
+}
+
+/* size is rounded up size */
+static Boolean obj_position_is_correct(void *addr, unsigned int size)
+{
+  Chunk_Header *chunk = NULL;
+  
+  if(size <= SUPER_OBJ_THRESHOLD)
+    chunk = NORMAL_CHUNK_HEADER(addr);
+  else
+    chunk = ABNORMAL_CHUNK_HEADER(addr);
+  if(chunk->slot_size != size) return FALSE;
+  if(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) % size != 0) return FALSE;
+  return TRUE;
+}
+
+static void wspace_verify_weakref(Partial_Reveal_Object *p_obj)
+{
+  WeakReferenceType type = special_reference_type(p_obj);
+  if(type == NOT_REFERENCE) return;
+  
+  REF *p_referent_field = obj_get_referent_field(p_obj);
+  Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
+  if (!p_referent) return;
+  
+  unsigned int size = vm_object_size(p_referent);
+  if(size <= SUPER_OBJ_THRESHOLD){
+    Wspace *wspace = gc_get_wspace(gc_in_verify);
+    Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
+    size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  }
+  
+  assert(obj_position_is_correct(p_referent, size));
+}
+
+static void mark_card_add_entry(void *addr, unsigned int size)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  verify_card_get_block(card);
+  Vector_Block *block = card->block;
+  
+  if(size <= SUPER_OBJ_THRESHOLD){
+    Wspace *wspace = gc_get_wspace(gc_in_verify);
+    Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
+    size = NORMAL_SIZE_ROUNDUP(size, size_seg);
+  }
+  
+  assert(obj_position_is_correct(addr, size));
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  lock(card->lock);
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    assert(!obj_addr_overlapped(obj_addr, *p_addr));
+    p_addr++;
+  }
+  vector_block_add_entry(block, obj_addr);
+  unlock(card->lock);
+
+}
+
+/* size is real size of obj */
+void wspace_record_mark(void *addr, unsigned int size)
+{
+  atomic_inc32(&live_obj_in_mark);
+  mark_card_add_entry(addr, size);
+}
+
+static void verify_mark(void *addr, unsigned int size, Boolean destructively)
+{
+  assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+  
+  unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+  unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+  Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+  
+  Vector_Block *block = card->block;
+  assert(block);
+  
+  Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+  
+  Obj_Addr *p_addr = block->head;
+  while(p_addr < block->tail){
+    if(obj_addr == *p_addr){
+      if(destructively)
+        *p_addr = 0;
+      break;
+    }
+    p_addr++;
+  }
+  assert(p_addr < block->tail);
+}
+
+void wspace_modify_mark_in_compact(void *new_addr, void *old_addr, unsigned int size)
+{
+  /* Verify the old addr and remove it in the according mark card */
+  verify_mark(old_addr, size, TRUE);
+  /* Add new_addr into mark card */
+  mark_card_add_entry(new_addr, size);
+}
+
+void wspace_verify_fix_in_compact(void)
+{
+  atomic_inc32(&live_obj_in_fix);
+}
+
+static void check_and_clear_mark_cards(void)
+{
+  for(POINTER_SIZE_INT i=0; i<card_num; i++){
+    Vector_Block *block = mark_verify_cards[i].block;
+    if(!block)
+      continue;
+    Obj_Addr *p_addr = block->head;
+    while(p_addr < block->tail){
+      if(*p_addr){
+        unsigned int size = 0;
+        void *addr = NULL;
+        addr = decompose_obj_addr(*p_addr, i, size);
+        printf("Extra mark obj: %x  size: %d\n", (POINTER_SIZE_INT)addr, size);
+      }
+      p_addr++;
+    }
+    vector_block_clear(block);
+  }
+}
+
+static void clear_alloc_cards(void)
+{
+  for(POINTER_SIZE_INT i=0; i<card_num; i++){
+    Verify_Card *card = &alloc_verify_cards[i];
+    if(card->block)
+      vector_block_clear(card->block);
+  }
+}
+
+static void summarize_sweep_verify(GC *gc)
+{
+  POINTER_SIZE_INT live_obj_num = 0;
+  for(unsigned int i=0; i<gc->num_collectors; ++i){
+    live_obj_num += gc->collectors[i]->live_obj_num;
+  }
+  printf("Live obj in sweeping: %d\n", live_obj_num);
+}
+
+void wspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size)
+{
+  POINTER_SIZE_INT *p_value = start;
+  
+  assert(!(size % BYTES_PER_WORD));
+  size /= BYTES_PER_WORD;
+  while(size--)
+    assert(!*p_value++);
+}
+
+static POINTER_SIZE_INT wspace_live_obj_num(Wspace *wspace, Boolean gc_finished)
+{
+  Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)wspace);
+  Chunk_Header *wspace_ceiling = (Chunk_Header*)space_heap_end((Space*)wspace);
+  POINTER_SIZE_INT live_num = 0;
+  
+  for(; chunk < wspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+    /* chunk is free before GC */
+    if(chunk->status & CHUNK_FREE){
+      assert((gc_finished && chunk->status==CHUNK_FREE)
+              || (!gc_finished && chunk->status==(CHUNK_FREE|CHUNK_TO_MERGE)));
+      continue;
+    }
+    if(chunk->status & CHUNK_ABNORMAL){
+      assert(chunk->status == CHUNK_ABNORMAL);
+      assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+      Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+      assert(chunk->slot_size == vm_object_size(obj));
+      assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+    }
+    /* chunk is used as a normal or abnormal one in which there are live objects */
+    unsigned int slot_num = chunk->slot_num;
+    POINTER_SIZE_INT *table = chunk->table;
+    POINTER_SIZE_INT live_num_in_chunk = 0;
+    
+    unsigned int word_index = 0;
+    for(unsigned int i=0; i<slot_num; ++i){
+      unsigned int color_index = COLOR_BITS_PER_OBJ * i;
+      word_index = color_index / BITS_PER_WORD;
+      void *p_obj = slot_index_to_addr(chunk, i);
+      if(table[word_index] & (cur_alloc_color << (color_index % BITS_PER_WORD))){
+        ++live_num_in_chunk;
+        verify_mark(p_obj, chunk->slot_size, gc_finished);
+        if(gc_finished){
+          wspace_verify_alloc(p_obj, chunk->slot_size);
+          wspace_verify_weakref((Partial_Reveal_Object*)p_obj);
+        }
+      }
+    }
+    live_num += live_num_in_chunk;
+  }
+  
+  return live_num;
+}
+
+static void allocator_verify_local_chunks(Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  Size_Segment **size_segs = wspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(!size_segs[i]->local_alloc){
+      assert(!local_chunks[i]);
+      continue;
+    }
+    Chunk_Header **chunks = local_chunks[i];
+    assert(chunks);
+    for(unsigned int j = size_segs[i]->chunk_num; j--;){
+      assert(!chunks[j]);
+    }
+  }
+}
+
+static void gc_verify_allocator_local_chunks(GC *gc)
+{
+  if(gc_match_kind(gc, MARK_SWEEP_GC)){
+    Mutator *mutator = gc->mutator_list;
+    while(mutator){
+      allocator_verify_local_chunks((Allocator*)mutator);
+      mutator = mutator->next;
+    }
+  }
+  
+  if(gc_match_kind(gc, MAJOR_COLLECTION))
+    for(unsigned int i = gc->num_collectors; i--;){
+      allocator_verify_local_chunks((Allocator*)gc->collectors[i]);
+    }
+}
+
+void wspace_verify_before_collection(GC *gc)
+{
+  printf("Allocated obj: %d\n", alloc_obj_num);
+  alloc_obj_num = 0;
+}
+
+void wspace_verify_after_sweep(GC *gc)
+{
+  printf("Live obj in marking: %d\n", live_obj_in_mark);
+  live_obj_in_mark = 0;
+  
+  summarize_sweep_verify(gc);
+  
+  Wspace *wspace = gc_get_wspace(gc);
+  POINTER_SIZE_INT total_live_obj = wspace_live_obj_num(wspace, FALSE);
+  printf("Live obj after sweep: %d\n", total_live_obj);
+}
+
+void wspace_verify_after_collection(GC *gc)
+{
+  printf("Live obj in fixing: %d\n", live_obj_in_fix);
+  live_obj_in_fix = 0;
+  
+  clear_alloc_cards();
+  
+  Wspace *wspace = gc_get_wspace(gc);
+  POINTER_SIZE_INT total_live_obj = wspace_live_obj_num(wspace, TRUE);
+  printf("Live obj after collection: %d\n", total_live_obj);
+  check_and_clear_mark_cards();
+  gc_verify_allocator_local_chunks(gc);
+}
+
+/*
+void wspace_verify_super_obj(GC *gc)
+{
+  Wspace *wspace = gc_get_wspace(gc);
+  Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)wspace);
+  Chunk_Header *wspace_ceiling = (Chunk_Header*)space_heap_end((Space*)wspace);
+  
+  for(; chunk < wspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+    if(chunk->status & CHUNK_ABNORMAL){
+      assert(chunk->status == CHUNK_ABNORMAL);
+      assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+      Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+      assert(chunk->slot_size == vm_object_size(obj));
+      assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+    }
+  }
+}
+*/
+
+
+/* wspace verify marking with vtable marking in advance */
+
+Wspace *wspace_in_verifier;
+static Pool *trace_pool = NULL;
+static Vector_Block *trace_stack = NULL;
+POINTER_SIZE_INT live_obj_in_verify_marking = 0;
+
+static Boolean obj_mark_in_vtable(GC *gc, Partial_Reveal_Object *obj)
+{
+  assert(address_belongs_to_gc_heap(obj, gc));
+  assert((vm_object_size(obj) <= SUPER_OBJ_THRESHOLD) || (get_obj_info_raw(obj) & SUPER_OBJ_MASK));
+  Boolean marked = obj_mark_in_vt(obj);
+#ifdef SSPACE_VERIFY
+  if(marked) live_obj_in_verify_marking++;
+#endif
+  return marked;
+}
+
+static void tracestack_push(void *p_obj)
+{
+  vector_stack_push(trace_stack, (POINTER_SIZE_INT)p_obj);
+  
+  if( !vector_stack_is_full(trace_stack)) return;
+  
+  pool_put_entry(trace_pool, trace_stack);
+  trace_stack = free_task_pool_get_entry(&gc_metadata);
+  assert(trace_stack);
+}
+
+static FORCE_INLINE void scan_slot(GC *gc, REF *p_ref)
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+  
+  if(obj_belongs_to_space(p_obj, (Space*)wspace_in_verifier) && obj_mark_in_vtable(gc, p_obj))
+    tracestack_push(p_obj);
+  
+  return;
+}
+
+static FORCE_INLINE void scan_object(GC *gc, Partial_Reveal_Object *p_obj)
+{
+  if(!object_has_ref_field(p_obj) ) return;
+  
+  REF *p_ref;
+  
+  if (object_is_array(p_obj)) {   /* scan array object */
+    
+    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len;
+    
+    p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+    
+    for (unsigned int i = 0; i < array_length; i++) {
+      scan_slot(gc, p_ref+i);
+    }
+  } else { /* scan non-array object */
+    
+    unsigned int num_refs = object_ref_field_num(p_obj);
+    
+    int *ref_iterator = object_ref_iterator_init(p_obj);
+    
+    for(unsigned int i=0; i<num_refs; i++){
+      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+      scan_slot(gc, p_ref);
+    }
+#ifndef BUILD_IN_REFERENT
+    //scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  }
+}
+
+static void trace_object(GC *gc, Partial_Reveal_Object *p_obj)
+{
+  scan_object(gc, p_obj);
+  
+  while( !vector_stack_is_empty(trace_stack)){
+    p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack);
+    scan_object(gc, p_obj);
+  }
+}
+
+void wspace_verify_vtable_mark(GC *gc)
+{
+  wspace_in_verifier = gc_get_wspace(gc);
+  GC_Metadata *metadata = gc->metadata;
+  Pool *rootset_pool = metadata->gc_rootset_pool;
+  
+  trace_stack = free_task_pool_get_entry(metadata);
+  trace_pool = sync_pool_create();
+  
+  pool_iterator_init(rootset_pool);
+  Vector_Block *root_set = pool_iterator_next(rootset_pool);
+  
+  while(root_set){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set, iter)){
+      REF *p_ref = (REF*)*iter;
+      iter = vector_block_iterator_advance(root_set, iter);
+      
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+      assert(p_obj!=NULL);
+      if(obj_belongs_to_space(p_obj, (Space*)wspace_in_verifier) && obj_mark_in_vtable(gc, p_obj))
+        tracestack_push(p_obj);
+    }
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */
+  pool_put_entry(trace_pool, trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  trace_stack = free_task_pool_get_entry(metadata);
+  
+  Vector_Block *mark_task = pool_get_entry(trace_pool);
+  
+  while(mark_task){
+    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task, iter)){
+      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+      iter = vector_block_iterator_advance(mark_task, iter);
+      
+      trace_object(gc, p_obj);
+    }
+    /* run out one task, put back to the pool and grab another task */
+    vector_stack_clear(mark_task);
+    pool_put_entry(metadata->free_task_pool, mark_task);
+    mark_task = pool_get_entry(trace_pool);
+  }
+  
+  /* put back the last mark stack to the free pool */
+  vector_stack_clear(trace_stack);
+  pool_put_entry(metadata->free_task_pool, trace_stack);
+  trace_stack = NULL;
+  sync_pool_destruct(trace_pool);
+  trace_pool = NULL;
+  printf("Live obj in vtable marking: %d\n", live_obj_in_verify_marking);
+  live_obj_in_verify_marking = 0;
+}
+
+
+#endif
+
+
+
+#ifdef SSPACE_TIME
+
+inline uint64 tsc()
+{
+  __asm _emit 0x0F;
+  __asm _emit 0x31
+}
+
+#define CPU_HZ 3000000  // per ms
+
+static uint64 gc_start_time;
+static uint64 mark_start_time;
+static uint64 sweep_start_time;
+static uint64 compact_start_time;
+static uint64 fix_start_time;
+static uint64 merge_start_time;
+
+void wspace_gc_time(GC *gc, Boolean before_gc)
+{
+  if(before_gc){
+    gc_start_time = tsc();
+    mark_start_time = gc_start_time;
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > gc_start_time);
+    printf("\n\nGC %d time: %dms\n\n", gc->num_collections, (end_time-gc_start_time) / CPU_HZ);
+  }
+}
+
+void wspace_mark_time(Boolean before_mark)
+{
+  assert(before_mark == FALSE);
+  if(before_mark){
+    mark_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > mark_start_time);
+    printf("\nMark time: %dms\n", (end_time-mark_start_time) / CPU_HZ);
+    sweep_start_time = end_time;
+  }
+}
+
+void wspace_sweep_time(Boolean before_sweep, Boolean wspace_need_compact)
+{
+  assert(before_sweep == FALSE);
+  if(before_sweep){
+    sweep_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > sweep_start_time);
+    printf("\nSweep time: %dms\n", (end_time-sweep_start_time) / CPU_HZ);
+    if(wspace_need_compact)
+      compact_start_time = end_time;
+    else
+      merge_start_time = end_time;
+  }
+}
+
+void wspace_compact_time(Boolean before_compact)
+{
+  assert(before_compact == FALSE);
+  if(before_compact){
+    compact_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > compact_start_time);
+    printf("\nCompact time: %dms\n", (end_time-compact_start_time) / CPU_HZ);
+    fix_start_time = end_time;
+  }
+}
+
+void wspace_fix_time(Boolean before_fix)
+{
+  assert(before_fix == FALSE);
+  if(before_fix){
+    fix_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > fix_start_time);
+    printf("\nFix time: %dms\n", (end_time-fix_start_time) / CPU_HZ);
+    merge_start_time = end_time;
+  }
+}
+
+void wspace_merge_time(Boolean before_merge)
+{
+  assert(before_merge == FALSE);
+  if(before_merge){
+    merge_start_time = tsc();
+  } else {
+    uint64 end_time = tsc();
+    assert(end_time > merge_start_time);
+    printf("\nMerge time: %dms\n\n", (end_time-merge_start_time) / CPU_HZ);
+  }
+}
+
+#endif

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,53 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_VERIFY_H_
+#define _SSPACE_VERIFY_H_
+
+#include "../common/gc_common.h"
+
+//#define SSPACE_VERIFY
+//#define SSPACE_VERIFY_FINREF
+//#define SSPACE_CHUNK_INFO
+//#define SSPACE_ALLOC_INFO
+//#define SSPACE_TIME
+
+struct Wspace;
+
+void wspace_verify_init(GC *gc);
+void wspace_verify_alloc(void *addr, unsigned int size);
+void wspace_verify_vtable_mark(GC *gc);
+void wspace_record_mark(void *addr, unsigned int size);
+void wspace_modify_mark_in_compact(void *new_addr, void *old_addr, unsigned int size);
+void wspace_verify_fix_in_compact(void);
+void wspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size);
+void wspace_verify_before_collection(GC *gc);
+void wspace_verify_after_sweep(GC *gc);
+void wspace_verify_after_collection(GC *gc);
+
+void wspace_chunks_info(Wspace *wspace, Boolean show_info);
+void wspace_alloc_info(unsigned int size);
+void wspace_alloc_info_summary(void);
+
+void wspace_gc_time(GC *gc, Boolean before_gc);
+void wspace_mark_time(Boolean before_mark);
+void wspace_sweep_time(Boolean before_sweep, Boolean wspace_need_compact);
+void wspace_compact_time(Boolean before_compact);
+void wspace_fix_time(Boolean before_fix);
+void wspace_merge_time(Boolean before_merge);
+
+#endif // _SSPACE_VERIFY_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_verify.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,111 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SEMI_SPACE_H_
+#define _SEMI_SPACE_H_
+
+#include "../thread/gc_thread.h"
+
+typedef struct Sspace{
+  /* <-- first couple of fields are overloadded as Space */
+  void* heap_start;
+  void* heap_end;
+  POINTER_SIZE_INT reserved_heap_size;
+  POINTER_SIZE_INT committed_heap_size;
+  unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
+  GC* gc;
+  Boolean move_object;
+
+  Space_Statistics* space_statistic;
+
+  /* Size allocted since last minor collection. */
+  volatile uint64 last_alloced_size;
+  /* Size allocted since last major collection. */
+  uint64 accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  uint64 total_alloced_size;
+
+  /* Size survived from last collection. */
+  uint64 last_surviving_size;
+  /* Size survived after a certain period. */
+  uint64 period_surviving_size;  
+
+  /* END of Space --> */
+
+  Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
+  
+  /* FIXME:: the block indices should be replaced with block header addresses */
+  unsigned int first_block_idx; /* always pointing to sspace bottom */
+  unsigned int ceiling_block_idx; /* tospace ceiling */
+  volatile unsigned int free_block_idx; /* tospace cur free block */
+  
+  unsigned int num_used_blocks;
+  unsigned int num_managed_blocks;
+  unsigned int num_total_blocks;
+  
+  volatile Block_Header* block_iterator;
+  /* END of Blocked_Space --> */
+  
+  Block_Header* cur_free_block;
+  unsigned int tospace_first_idx;
+  void* survivor_area_top;
+  void* survivor_area_bottom;
+
+}Sspace;
+
+Sspace *sspace_initialize(GC* gc, void* start, POINTER_SIZE_INT sspace_size, POINTER_SIZE_INT commit_size);
+void sspace_destruct(Sspace *sspace);
+
+void* sspace_alloc(unsigned size, Allocator *allocator);
+Boolean sspace_alloc_block(Sspace* sspace, Allocator* allocator);
+
+void sspace_collection(Sspace* sspace);
+void sspace_prepare_for_collection(Sspace* sspace);
+void sspace_reset_after_collection(Sspace* sspace);
+
+void* semispace_alloc(unsigned int size, Allocator* allocator);
+
+void nongen_ss_pool(Collector* collector);
+void gen_ss_pool(Collector* collector);
+
+FORCE_INLINE Boolean sspace_has_free_block(Sspace* sspace)
+{
+  return (sspace->cur_free_block != NULL);
+}
+
+FORCE_INLINE Boolean obj_belongs_to_survivor_area(Sspace* sspace, Partial_Reveal_Object* p_obj)
+{
+  return (p_obj >= sspace->survivor_area_bottom && 
+                          p_obj < sspace->survivor_area_top);
+}
+
+/* treat semispace alloc as thread local alloc. If it fails or p_obj is old, forward it to MOS */
+FORCE_INLINE void* semispace_forward_obj(Partial_Reveal_Object* p_obj, unsigned int size, Allocator* allocator)
+{
+  void* p_targ_obj = NULL;
+  Sspace* sspace = (Sspace*)allocator->alloc_space;
+  
+  if( !obj_belongs_to_survivor_area(sspace, p_obj) )
+    p_targ_obj = semispace_alloc(size, allocator);           
+  
+  return p_targ_obj;
+}
+
+#endif // _FROM_SPACE_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Wed Dec 26 02:17:10 2007
@@ -25,7 +25,7 @@
 #include "../mark_compact/mspace.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
 #include "../common/space_tuner.h"
-#include "../mark_sweep/sspace.h"
+#include "../mark_sweep/wspace.h"
 
 unsigned int MINOR_COLLECTORS = 0;
 unsigned int MAJOR_COLLECTORS = 0;
@@ -84,14 +84,6 @@
   */
     
   GC_Metadata* metadata = collector->gc->metadata;
-
-/* TO_REMOVE
-
-  assert(collector->rep_set==NULL);
-  if( !gc_is_gen_mode() || !gc_match_kind(collector->gc, MINOR_COLLECTION)){
-    collector->rep_set = free_set_pool_get_entry(metadata);
-  }
-*/
   
   if(gc_is_gen_mode() && gc_match_kind(collector->gc, MINOR_COLLECTION) && NOS_PARTIAL_FORWARD){
     assert(collector->rem_set==NULL);
@@ -102,7 +94,7 @@
   collector_reset_weakref_sets(collector);
 #endif
 
-#ifndef USE_MARK_SWEEP_GC
+#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
   /*For LOS_Shrink and LOS_Extend*/
   if(gc_has_space_tuner(collector->gc) && collector->gc->tuner->kind != TRANS_NOTHING){
     collector->non_los_live_obj_size = 0;
@@ -157,7 +149,7 @@
   return;
 }
 
-static void wait_collection_finish(GC* gc)
+void wait_collection_finish(GC* gc)
 {
   unsigned int num_active_collectors = gc->num_active_collectors;
   for(unsigned int i=0; i<num_active_collectors; i++)
@@ -176,6 +168,7 @@
   while(true){
     /* Waiting for newly assigned task */
     collector_wait_for_task(collector); 
+    collector->collector_is_active = TRUE;
     
     /* waken up and check for new task */
     TaskType task_func = collector->task_func;
@@ -186,9 +179,13 @@
       
     task_func(collector);
 
-    alloc_context_reset((Allocator*)collector);
-
+   //conducted after collection to return last TLB in hand 
+   #if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+    gc_reset_collector_alloc(collector->gc, collector);
+   #endif
     collector_notify_work_done(collector);
+    
+    collector->collector_is_active = FALSE;
   }
 
   return 0;
@@ -221,7 +218,6 @@
   while(old_live_collector_num == live_collector_num)
     vm_thread_yield(); /* give collector time to die */
   
-  delete collector->trace_stack;  
   return;
 }
 
@@ -232,14 +228,14 @@
 
 void collector_init_stats(Collector* collector)
 {
-#ifndef USE_MARK_SWEEP_GC
+#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
   gc_gen_collector_stats_initialize(collector);
 #endif
 }
 
 void collector_destruct_stats(Collector* collector)
 {
-#ifndef USE_MARK_SWEEP_GC
+#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
   gc_gen_collector_stats_destruct(collector);
 #endif
 }
@@ -256,8 +252,7 @@
 #ifdef GC_GEN_STATS
     collector_destruct_stats(collector);
 #endif
-    STD_FREE(collector);
-   
+    gc_destruct_collector_alloc(gc, collector);   
   }
   assert(live_collector_num == 0);
   
@@ -288,13 +283,10 @@
     /* FIXME:: thread_handle is for temporary control */
     collector->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i;
     collector->gc = gc;
-    collector_init_thread(collector);
-
-#ifdef USE_MARK_SWEEP_GC
-    collector_init_free_chunk_list(collector);
-#else
-    gc_gen_hook_for_collector_init(collector);
-#endif
+    //init collector allocator (mainly for semi-space which has two target spaces)
+    gc_init_collector_alloc(gc, collector);
+    //init thread scheduling related stuff, creating collector thread
+    collector_init_thread(collector); 
 
 #ifdef GC_GEN_STATS
     collector_init_stats(collector);
@@ -316,3 +308,41 @@
     
   return;
 }
+
+void collector_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_collectors)
+{
+  assign_collector_with_task(gc, task_func, space);
+
+  return;
+}
+
+void collector_release_weakref_sets(GC* gc, unsigned int num_collectors)
+{
+  Finref_Metadata *metadata = gc->finref_metadata;
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  unsigned int i = 0;
+  for(; i<num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    pool_put_entry(metadata->free_pool, collector->softref_set);
+    pool_put_entry(metadata->free_pool, collector->weakref_set);
+    pool_put_entry(metadata->free_pool, collector->phanref_set);
+    collector->softref_set = NULL;
+    collector->weakref_set = NULL;
+    collector->phanref_set = NULL;
+  }
+}
+
+Boolean is_collector_finished(GC* gc)
+{
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  unsigned int i = 0;
+  for(; i<num_active_collectors; i++){
+    Collector* collector = gc->collectors[i];
+    if(collector->collector_is_active){
+      return FALSE;
+    }
+  }
+  return TRUE;
+
+}
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Wed Dec 26 02:17:10 2007
@@ -43,14 +43,18 @@
   void *ceiling;
   void *end;
   void *alloc_block;
-  Chunk_Header ***local_chunks;
+  Chunk_Header ***local_chunks; /* this is for MARK-SWEEP GC */
   Space* alloc_space;
   GC* gc;
   VmThreadHandle thread_handle;   /* This thread; */
+  unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
   /* End of Allocator --> */
 
   /* FIXME:: for testing */
   Space* collect_space;
+  
+  /* backup allocator in case there are two target copy spaces, such as semispace GC */
+  Allocator* backup_allocator;
 
   Vector_Block *trace_stack;
   
@@ -77,11 +81,14 @@
   
   void(*task_func)(void*) ;   /* current task */
   
+  /* following three fields are to support LOS extension: to estimate MOS size */
   POINTER_SIZE_INT non_los_live_obj_size;
   POINTER_SIZE_INT los_live_obj_size;
   POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM];
   unsigned int result;
 
+  Boolean collector_is_active;
+
   /*for collect statistics info*/
 #ifdef GC_GEN_STATS
   void* stats;
@@ -95,6 +102,8 @@
 void collector_reset(GC* gc);
 
 void collector_execute_task(GC* gc, TaskType task_func, Space* space);
+void collector_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_collectors);
+void collector_release_weakref_sets(GC* gc, unsigned int num_collectors);
 
 void collector_restore_obj_info(Collector* collector);
 #ifdef USE_32BITS_HASHCODE
@@ -104,6 +113,9 @@
 #ifndef USE_MARK_SWEEP_GC
 void gc_gen_hook_for_collector_init(Collector *collector);
 #endif
+
+Boolean is_collector_finished(GC* gc);
+void wait_collection_finish(GC* gc);
 
 inline Boolean gc_collection_result(GC* gc)
 {

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Wed Dec 26 02:17:10 2007
@@ -23,13 +23,19 @@
 #define _COLLECTOR_ALLOC_H_
 
 #include "gc_thread.h"
+
 #ifdef USE_32BITS_HASHCODE
 #include "../common/hashcode.h"
 #endif
 
+#include "../semi_space/sspace.h"
+
 extern Space_Alloc_Func mos_alloc;
 
-/* NOS forward obj to MOS in MINOR_COLLECTION */
+//FIXME: MINOR_ALGO is static
+extern unsigned int MINOR_ALGO;
+
+/* NOS forward obj to other space in MINOR_COLLECTION */
 FORCE_INLINE Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
 {
   Obj_Info_Type oi = get_obj_info_raw(p_obj);
@@ -44,16 +50,34 @@
   
 #ifdef USE_32BITS_HASHCODE
   Boolean obj_is_set_hashcode = hashcode_is_set(p_obj);
-  if(obj_is_set_hashcode) size += GC_OBJECT_ALIGNMENT;
+  Boolean obj_hashcode_attached = FALSE;
+  if(obj_is_set_hashcode){
+    size += GC_OBJECT_ALIGNMENT;
+   /* the tospace of semispace GC may have objects with hashcode attached*/
+   obj_hashcode_attached = hashcode_is_attached(p_obj);
+  }
 #endif
 
   Partial_Reveal_Object* p_targ_obj = NULL;
-  if(is_collector_local_alloc){
-    p_targ_obj = thread_local_alloc(size, (Allocator*)collector);
-    if(!p_targ_obj)
-      p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector);
-  } else {
-    p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector);
+
+  Allocator* allocator = (Allocator*)collector;
+  
+  /* can also use collector->collect_space->collect_algorithm */
+  if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL){
+
+    p_targ_obj = (Partial_Reveal_Object*)semispace_forward_obj(p_obj, size, allocator);
+    if( !p_targ_obj )
+      allocator = ((Collector*)collector)->backup_allocator;
+
+  }else{ /* other non-ss algorithms. FIXME:: I am going to remove this branch if it has no perf impact. */
+    
+    if(is_collector_local_alloc){  /* try local alloc first if collector supports it. Marksweep doesn't. */
+      p_targ_obj = thread_local_alloc(size, allocator);
+    }
+  }
+    
+  if(!p_targ_obj){
+    p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, allocator);
   }
     
   if(p_targ_obj == NULL){
@@ -71,37 +95,44 @@
        block. The remaining part of the switched block cannot be revivied for next allocation of 
        object that has smaller size than this one. */
     assert( obj_is_fw_in_oi(p_obj));
-    thread_local_unalloc(size, (Allocator*)collector);
+    thread_local_unalloc(size, allocator);
     return NULL;
   }
 
-assert((((POINTER_SIZE_INT)p_targ_obj) % GC_OBJECT_ALIGNMENT) == 0);
+  assert((((POINTER_SIZE_INT)p_targ_obj) % GC_OBJECT_ALIGNMENT) == 0);
+
 #ifdef USE_32BITS_HASHCODE
-  if(obj_is_set_hashcode){
-    memcpy(p_targ_obj, p_obj, size-GC_OBJECT_ALIGNMENT);
-    oi = trace_forward_process_hashcode(p_targ_obj, p_obj ,oi, size);
-  }else{
-    memcpy(p_targ_obj, p_obj, size);    
+  if(obj_is_set_hashcode && !obj_hashcode_attached){ 
+    size -= GC_OBJECT_ALIGNMENT;  //restore object size for memcpy from original object
+    oi = forward_obj_attach_hashcode(p_targ_obj, p_obj ,oi, size);  //get oi for following set_obj_info
   }
-#else
-  memcpy(p_targ_obj, p_obj, size);
 #endif //USE_32BITS_HASHCODE
 
-  /* we need clear the bit to give major collection a clean status. */
-  if(gc_is_gen_mode())
-    set_obj_info(p_targ_obj, oi&DUAL_MARKBITS_MASK);
+  memcpy(p_targ_obj, p_obj, size);  //copy once. 
 
+  /* restore oi, which currently is the forwarding pointer. 
+     for semispace GC, p_targ_obj is still in NOS, we should clear its oi mark_bits. */
+  if( obj_belongs_to_nos(p_targ_obj) || gc_is_gen_mode() )
+    /* we need clear the bit to give a clean status (it's possibly unclean due to partial forwarding) */
+    set_obj_info(p_targ_obj, oi&DUAL_MARKBITS_MASK);
+  else{
 #ifdef MARK_BIT_FLIPPING 
-  /* we need set MARK_BIT to indicate this object is processed for nongen forwarding */
-  else
+  /* we mark it to make the object look like other original live objects in MOS */
     set_obj_info(p_targ_obj, oi|FLIP_MARK_BIT);
-
-#else
+#else 
+    set_obj_info(p_targ_obj, oi);  
+#endif // MARK_BIT_FLIPPING 
+  }
+  
 #ifdef USE_32BITS_HASHCODE
-  else if(obj_is_set_hashcode) 
-    set_obj_info(p_targ_obj, oi);
-#endif
-#endif
+  if(obj_hashcode_attached){
+    /* this is tricky. In fallback compaction, we need iterate the heap for live objects, 
+       so we need know the exact object size. The hashbit of original copy is overwritten by forwarding pointer.
+       We use this bit in VT to indicate the original copy has attached hashcode.
+       We can't set the bit earlier before the memcopy. */
+    obj_sethash_in_vt(p_obj);  
+  }
+#endif //USE_32BITS_HASHCODE
 
   return p_targ_obj;  
  

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h Wed Dec 26 02:17:10 2007
@@ -31,8 +31,8 @@
 #define ALLOC_PREFETCH
 #endif
 
-#ifdef ALLOC_ZEROING
-#ifdef ALLOC_PREFETCH
+#ifdef ALLOC_ZEROING  /* ----------------- */
+#ifdef ALLOC_PREFETCH /* vvvvvvvvvvvvvvvv  */
 
 #ifdef _WINDOWS_
 #include <xmmintrin.h>
@@ -45,10 +45,10 @@
 extern POINTER_SIZE_INT ZEROING_SIZE;
 extern POINTER_SIZE_INT PREFETCH_STRIDE;
 extern Boolean  PREFETCH_ENABLED;
-#else /* ALLOC_PREFETCH */
+#else /* ALLOC_PREFETCH  ^^^^^^^^^^^^^^^^ */
 #define ZEROING_SIZE	256
 #endif /* !ALLOC_PREFETCH */
-#endif /* ALLOC_ZEROING */
+#endif /* ALLOC_ZEROING  ----------------- */
 
 extern POINTER_SIZE_INT tls_gc_offset;
 
@@ -74,6 +74,7 @@
   Space* alloc_space;
   GC   *gc;
   VmThreadHandle thread_handle;   /* This thread; */
+  unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
 }Allocator;
 
 inline void thread_local_unalloc(unsigned int size, Allocator* allocator)
@@ -150,7 +151,9 @@
 {
     assert(alloc_block->status == BLOCK_FREE);
     alloc_block->status = BLOCK_IN_USE;
-
+#ifdef USE_UNIQUE_MOVE_COMPACT_GC
+    alloc_block->num_multi_block = 0;
+#endif
     /* set allocation context */
     void* new_free = alloc_block->free;
     allocator->free = new_free;
@@ -186,7 +189,8 @@
 inline void alloc_context_reset(Allocator* allocator)
 {
   Block_Header* block = (Block_Header*)allocator->alloc_block;
-  /* it can be NULL if GC happens before the mutator resumes, or called by collector */
+  /* it can be NULL when GC happens before the mutator resumes (the memory is run out by other mutators),
+     or the function is called by collector after it finishes collection, before sleeps waiting for new task */
   if( block != NULL ){
     assert(block->status == BLOCK_IN_USE);
     block->free = allocator->free;
@@ -194,9 +198,9 @@
     allocator->alloc_block = NULL;
   }
 
-   allocator->free = NULL;
-   allocator->ceiling = NULL;
-   allocator->end = NULL;
+  allocator->free = NULL;
+  allocator->ceiling = NULL;
+  allocator->end = NULL;
 
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp Wed Dec 26 02:17:10 2007
@@ -198,6 +198,7 @@
 
 void assign_marker_with_task(GC* gc, TaskType task_func, Space* space)
 {
+  gc->num_active_markers = gc->num_markers;
   for(unsigned int i=0; i<gc->num_markers; i++)
   {
     Marker* marker = gc->markers[i];
@@ -216,7 +217,8 @@
   gc->num_active_markers += num_markers;
   for(; i < gc->num_active_markers; i++)
   {
-    printf("start mark thread %d \n", i);
+    //printf("start mark thread %d \n", i);
+    
     Marker* marker = gc->markers[i];
     
     marker_reset_thread(marker);
@@ -227,16 +229,9 @@
   return;
 }
 
-void marker_execute_task(GC* gc, TaskType task_func, Space* space)
-{
-  assign_marker_with_task(gc, task_func, space);
-  wait_mark_finish(gc);    
-  return;
-}
-
 void wait_mark_root_finish(GC* gc)
 {
-  unsigned int num_marker = gc->num_markers;
+  unsigned int num_marker = gc->num_active_markers;
   for(unsigned int i=0; i<num_marker; i++)
   {
     Marker* marker = gc->markers[i];
@@ -257,6 +252,14 @@
   return;
 }
 
+void marker_execute_task(GC* gc, TaskType task_func, Space* space)
+{
+  assign_marker_with_task(gc, task_func, space);  
+  wait_mark_root_finish(gc);
+  wait_mark_finish(gc);    
+  return;
+}
+
 void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space)
 {
   assign_marker_with_task(gc, task_func, space);
@@ -273,4 +276,6 @@
   wait_mark_root_finish(gc, num_markers);
   return;
 }
+
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h Wed Dec 26 02:17:10 2007
@@ -19,7 +19,7 @@
 #define _MARKER_H_
 
 #include "../common/gc_space.h"
-#include "../mark_sweep/sspace_chunk.h"
+#include "../mark_sweep/wspace_chunk.h"
 
 typedef struct Marker{
   /* <-- first couple of fields are overloaded as Allocator */
@@ -31,6 +31,7 @@
   Space* alloc_space;
   GC* gc;
   VmThreadHandle thread_handle;   /* This thread; */
+  unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
   /* End of Allocator --> */
 
   /* FIXME:: for testing */
@@ -66,11 +67,13 @@
   POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM];
   unsigned int result;
 
+  Boolean marker_is_active;
+
   VmEventHandle markroot_finished_event;
 
-  Boolean marker_is_active;
   int64 time_mark;
-  Marker* next; 
+  Marker* next;
+  unsigned int num_dirty_slots_traced;
 } Marker;
 
 typedef Marker* Marker_List;
@@ -92,4 +95,6 @@
 
 
 #endif //_MARKER_H_
+
+
 



Mime
View raw message